repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
unknown | revision_date
unknown | committer_date
unknown | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
unknown | gha_created_at
unknown | gha_updated_at
unknown | gha_pushed_at
unknown | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
hhr14/Speech2Face | 4,681,514,401,106 | 5017df1f43db3a736bee80de4e1a86b5cdcacb1a | 6b223b0c2575af9edc29d28d3daba21c2e23ff62 | /tools/lib/delf5.py | bcdec884f2a359cbc2654bb2ea86020eaa7fbd9d | [] | no_license | https://github.com/hhr14/Speech2Face | d91fc0cb845e99a6875ba6c0b67e526b8dad7343 | 7371eec10d698ea56ae7f501554f2c62f4da3a4b | refs/heads/master | "2020-08-13T10:16:13.025059" | "2019-12-23T13:32:11" | "2019-12-23T13:32:11" | 214,952,944 | 5 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import sys
a = np.frombuffer(sys.stdin.buffer.read(), dtype='float32')
dim = int(sys.argv[1])
assert a[0] == 0 and a[1] == 0 and a[2] == 0
res = a[5:]
assert len(res) % dim == 0
res = np.reshape(res, (-1, dim))
sys.stdout.buffer.write(res.astype('float32'))
| UTF-8 | Python | false | false | 277 | py | 41 | delf5.py | 39 | 0.642599 | 0.592058 | 0 | 9 | 29.777778 | 59 |
pphowakande/bakround-applicant | 6,055,903,892,107 | cd27d4d78ea6539dcc578816bbb0e8d24ab3890c | 964d79bf9b2ab5b5389514f8cd730f1fefe1ffc8 | /bakround_applicant/services/ondemandviewrefresherservice/consumer.py | ef9a0aa3a74a0889056df295368d619d0a6dcd9a | [] | no_license | https://github.com/pphowakande/bakround-applicant | d216368231d3a998ba12a3c4210d5508e3eb9beb | 6cf5081fe4fd7b4ee7a9b458043ad2513a90560e | refs/heads/master | "2022-01-18T23:03:37.240329" | "2020-02-13T18:24:05" | "2020-02-13T18:24:05" | 240,319,316 | 0 | 0 | null | false | "2022-01-05T08:14:38" | "2020-02-13T17:23:57" | "2020-02-13T18:25:02" | "2022-01-05T08:14:35" | 58,233 | 0 | 0 | 22 | JavaScript | false | false | __author__ = "natesymer"
from ..queue import QueueNames
from bakround_applicant.services.base import BaseConsumer
import django
import time
class Consumer(BaseConsumer):
service_name = "ON_DEMAND_VIEW_REFRESHER_SERVICE"
queue_name = QueueNames.on_demand_view_refresher
def handle_message(self, body):
try:
time.sleep(20)
with django.db.connection.cursor() as cursor:
cursor.execute("select pg_advisory_lock(102)")
try:
cursor.execute("REFRESH MATERIALIZED VIEW CONCURRENTLY profile_info_view")
cursor.execute("REFRESH MATERIALIZED VIEW CONCURRENTLY recent_scores_view")
finally:
cursor.execute("select pg_advisory_unlock(102)")
except Exception as e:
self.logger.exception(e)
| UTF-8 | Python | false | false | 853 | py | 422 | consumer.py | 261 | 0.63306 | 0.623681 | 0 | 25 | 33.12 | 95 |
ad1v7/ternpy | 18,339,510,363,911 | 64e905cb259f4714c53a7012586fe5f9a6cd2e1f | 0273a9c46f1df09b83b45ae24ffc9cc812079ebb | /ternpy/commands/__init__.py | 7d17e5506ace15c30d04127feaa32a2ddc7ab616 | [] | no_license | https://github.com/ad1v7/ternpy | bb13c0723038edf6680405f31884e1011ba63167 | 9cd8745c2dee99f3fa186349b2c94e7b960b6690 | refs/heads/master | "2021-01-25T10:55:45.388544" | "2017-07-20T14:18:04" | "2017-07-20T14:18:04" | 93,890,485 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .config import *
from .newternary import *
from .extract import *
from .tplot import *
from .getmeta import *
from .hi import *
| UTF-8 | Python | false | false | 133 | py | 17 | __init__.py | 16 | 0.729323 | 0.729323 | 0 | 6 | 21.166667 | 25 |
adrianhust/Santa_hat | 15,659,450,807,464 | dcc60283dc407bb1c42692beb140d7aee47fbddc | 1b81daee37b99f6195268ab9d842ec1172b9a165 | /santa_hat.py | 0f2c4b182ad1271a615057e8a0c9937e502d54f5 | [] | no_license | https://github.com/adrianhust/Santa_hat | 762e304545d85bb7131a0bcbf60d2fff8a893a04 | 05585127a61f20c8606d43c0caa074c45db16d44 | refs/heads/master | "2021-05-14T19:32:57.730587" | "2017-12-27T10:07:39" | "2017-12-27T10:07:39" | 115,505,762 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
from flask import Flask, request, url_for, send_from_directory
from werkzeug import secure_filename
import cv2
import face
import time
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = os.getcwd()
app.config['HAT_FILE'] = os.path.join(app.config['UPLOAD_FOLDER'], 'Santa-hat-icon.png')
app.config['MAX_CONTENT_LENGTH'] = 16 * 4096 * 4096
html = '''
<!DOCTYPE html>
<title>Upload File</title>
<h1>Photo Upload</h1>
<form method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=upload>
</form>
'''
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename)
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
upload_file = os.path.join(app.config['UPLOAD_FOLDER'], filename)
hat_file = app.config['HAT_FILE']
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
face_img = face.santa(upload_file, hat_file)
save_img = "{}_{}".format(int(time.time()), filename)
cv2.imwrite(save_img, face_img)
file_url = url_for('uploaded_file', filename=save_img)
return html + '<br><img src=' + file_url + '>'
else:
print >> sys.stderr, "img: %s format invalid" % file.filename
return html
if __name__ == '__main__':
app.run(host="0.0.0.0", port=9998)
| UTF-8 | Python | false | false | 1,884 | py | 4 | santa_hat.py | 2 | 0.599257 | 0.585987 | 0 | 60 | 30.4 | 89 |
jancuk/shopcart | 446,676,647,073 | 4c1b17466f14ce2c22625748e5e671c0070296c6 | abebbab13a3cd8f330c99aab6e0b7f80c7ad7f6d | /ecommerce/admin.py | 744198efb4fc664161843763b90093a06b30a7c0 | [] | no_license | https://github.com/jancuk/shopcart | 5769d9d7d037031788e6435f8402b9c2de35a005 | fd410a2aa15e645825ad456d0a0ebb1f1090ad2b | refs/heads/master | "2021-01-10T10:35:47.948273" | "2015-10-22T04:00:38" | "2015-10-22T04:00:38" | 44,721,137 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.contrib import admin
from .models import *
# Register your models here.
class ProductAdmin(admin.ModelAdmin):
list_display = ['title','price','image_tag']
readonly_fields = ('image_tag',)
admin.site.register(Brand)
admin.site.register(Product)
admin.site.register(Category)
admin.site.register(CartItem)
admin.site.register(Cart)
| UTF-8 | Python | false | false | 352 | py | 9 | admin.py | 6 | 0.755682 | 0.755682 | 0 | 13 | 26.076923 | 48 |
KbHarold/python-challenge | 8,753,143,394,186 | 1e116b5c2fb20e471eee9e5e22614243a16e2f63 | f34c9d599c7bd81f6b730eb64e839cb08b000768 | /PyPoll/main.py | f8f38b97d18a2a6f0cb6283f3069c844e3452a38 | [] | no_license | https://github.com/KbHarold/python-challenge | d2e2146db1f81a92df52f031c68d442d2f18b20d | 03ea523618cb07729dc5db90897bb9242298724f | refs/heads/master | "2020-04-19T00:53:08.572448" | "2019-02-06T00:06:17" | "2019-02-06T00:06:17" | 167,858,430 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import csv
#instructions on where to find the csv and assign it to the name poll_path
poll_path = os.path.join("Resources", "election_data.csv")
# Create variables for output
votes_cast = []
candidates = []
cand_names = []
winner =[]
#Open the CSV file and read the rows in to count the number of votes and collect the names voted for into a list
#After collecting the votes in the candidates list create another list to hold unique candidates names for comparison to votes
with open(poll_path, "r") as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
if csv.Sniffer().has_header:
next(csvreader)
for row in csvreader:
votes_cast.append(row[0])
candidates.append(row[2])
for candidate in candidates:
if candidate not in cand_names:
cand_names.append(candidate)
# Use the index of the unique candidates list to count the number of times it appears in tht total candidates list
cand_1_votes = candidates.count(str(cand_names[0]))
cand_2_votes = candidates.count(str(cand_names[1]))
cand_3_votes = candidates.count(str(cand_names[2]))
cand_4_votes = candidates.count(str(cand_names[3]))
# Create a new list of candiate votes organized by the index of the prior step
cand_tot_votes = [cand_1_votes,cand_2_votes,cand_3_votes,cand_4_votes]
# Calculate the percent of the vote each candidate by dividing it by the total votes
cand_1_perct = (round(((int(cand_1_votes) / int(len(votes_cast)))*100),2))
cand_2_perct = (round(((cand_2_votes / len(votes_cast))*100),2))
cand_3_perct = (round((cand_3_votes / len(votes_cast))*100,2))
cand_4_perct = (round((cand_4_votes / len(votes_cast))*100,2))
#Create a list of candidates percentage of votes
cand_tot_perct = [cand_1_perct,cand_2_perct,cand_3_perct, cand_4_perct]
#Combine the unique names, votes and percentage into a single list
results = list(zip(cand_names,cand_tot_votes,cand_tot_perct))
#Iterate over the combined list to find the winner and add to winner list
for name in results:
if max(cand_tot_votes) == name[1]:
winner.append(name[0])
#instructions to print the results of the election to the terminal
print("Election Results")
print("-----------------------------------")
print("Total Votes: ", int(len(votes_cast)))
print("-----------------------------------")
print(f"{cand_names[0]} {cand_1_perct}% ({cand_1_votes})")
print(f"{cand_names[1]} {cand_2_perct}% ({cand_2_votes})")
print(f"{cand_names[2]} {cand_3_perct}% ({cand_3_votes})")
print(f"{cand_names[3]} {cand_4_perct}% ({cand_4_votes})")
print("-----------------------------------")
print("Winner: ", str(winner).strip('[]'))
print("-----------------------------------")
#instructions to output results to an output folder in the form of a text file
output_path = os.path.join( "Output","Election_Results.txt")
with open(output_path, "w") as txtfile:
txtfile.write("Election Results" + "\n")
txtfile.write("-----------------------------------" "\n")
txtfile.write("Total Votes: " + str(len(votes_cast)) + "\n")
txtfile.write("-----------------------------------" "\n")
txtfile.write(str(cand_names[0])+ ": " + str(cand_1_perct)+ "% " +" " + "("+str(cand_1_votes)+")" + "\n")
txtfile.write(str(cand_names[1])+ ": " + str(cand_2_perct)+ "% " +" " +"("+str(cand_2_votes)+")" + "\n")
txtfile.write(str(cand_names[2])+ ": " + str(cand_3_perct)+ "% " +" " +"("+str(cand_3_votes)+")" + "\n")
txtfile.write(str(cand_names[3])+ ": " + str(cand_4_perct)+ "% " +" " +"("+str(cand_4_votes)+")" + "\n")
txtfile.write("-----------------------------------" "\n")
txtfile.write("Winner : " +str(winner).strip('[]')+ "\n")
txtfile.write("-----------------------------------" "\n")
txtfile.close() | UTF-8 | Python | false | false | 3,745 | py | 2 | main.py | 2 | 0.613084 | 0.594927 | 0 | 79 | 46.417722 | 126 |
RajatPuri09/100DaysOfCode | 12,610,024,016,502 | 20846491fdb6ff69e643f5dc3f1f4416034ba075 | 8c06f6d684c37e324dfaed34c53148b5d23a2768 | /Day 10 - Calculator/Calculator.py | 6894839a865dc9e368ee6a42d353992c2671c5ea | [] | no_license | https://github.com/RajatPuri09/100DaysOfCode | 724c2a275691e0e2d48d7726555bed08664ff548 | 2f1d5d710d84764ff6821808e0fa9c49bdeca8f3 | refs/heads/main | "2023-08-13T23:17:27.885793" | "2021-09-17T07:44:45" | "2021-09-17T07:44:45" | 403,476,544 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import art
print(art.logo)
def add(a, b):
return a + b
def subtract(a, b):
return a - b
def multiply(a, b):
return a * b
def divide(a, b):
return a / b
operations = {
"+" : add,
"-" : subtract,
"*" : multiply,
"/" : divide,
}
def calculator():
num1 = float(input("Enter the first number "))
should_continue = True
while should_continue:
for operation in operations:
print(operation)
selected_operation = input("Pick an operation")
num2 = float(input("Enter the next number: "))
calculation_function = operations[selected_operation]
answer = calculation_function(num1, num2)
print(f"{num1} {selected_operation} {num2} = {answer}\n")
choice = input(f"Type 'y' to continue with {answer}, or type 'n' to start a new calculation, or type 'e' to exit")
if choice == 'y':
num1 = answer
elif choice == 'n':
calculator()
else:
should_continue = False
calculator() | UTF-8 | Python | false | false | 1,055 | py | 12 | Calculator.py | 11 | 0.561137 | 0.554502 | 0 | 53 | 18.924528 | 122 |
peilinsun/filestagram-app | 5,875,515,260,967 | 0628c48ed576c37c1126c69e0ea437ea872a8142 | ca244f5861223821fe92b0c393cac5fdb536e554 | /app/utils/utils.py | e0047645814c8bee8a65b6c5dd90d36d3e3b312a | [] | no_license | https://github.com/peilinsun/filestagram-app | 2277cd368f1a51f800ab1ee095d70791b2cc1995 | 1addd4d5ebc3017ef5d3093811d3662f487f8275 | refs/heads/main | "2023-02-13T15:48:37.452501" | "2021-01-12T09:33:50" | "2021-01-12T09:33:50" | 328,930,874 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask_uploads import UploadSet, ALL, extension
import os
import posixpath
from flask import render_template, session, redirect, url_for, request, abort, current_app, flash, make_response
from math import ceil
# from werkzeug import secure_filename, FileStorage
def paginate(items, page, per_page, error_out=False):
if page < 1:
if error_out:
abort(404)
else:
page = 1
paged_items = items[(page - 1) * per_page:page * per_page]
if not paged_items and page != 1 and error_out:
abort(404)
total = len(items)
return Pagination(page,per_page, total, paged_items, items)
class Pagination(object):
"""Internal helper class returned by :meth:`BaseQuery.paginate`. You
can also construct it from any other SQLAlchemy query object if you are
working with other libraries. Additionally it is possible to pass `None`
as query object in which case the :meth:`prev` and :meth:`next` will
no longer work.
"""
def __init__(self, page, per_page, total, items, all_items):
#: the unlimited query object that was used to create this
#: pagination object.
#: the current page number (1 indexed)
self.page = page
#: the number of items to be displayed on a page.
self.per_page = per_page
#: the total number of items matching the query
self.total = total
#: the items for the current page
self.items = items
self.all_items = all_items
@property
def pages(self):
"""The total number of pages"""
if self.per_page == 0:
pages = 0
else:
pages = int(ceil(self.total / float(self.per_page)))
return pages
def prev(self, error_out=False):
"""Returns a :class:`Pagination` object for the previous page."""
return paginate(self.all_items, self.page - 1, self.per_page, error_out)
@property
def prev_num(self):
"""Number of the previous page."""
if not self.has_prev:
return None
return self.page - 1
@property
def has_prev(self):
"""True if a previous page exists"""
return self.page > 1
def next(self, error_out=False):
"""Returns a :class:`Pagination` object for the next page."""
return paginate(self.all_items, self.page + 1, self.per_page, error_out)
@property
def has_next(self):
"""True if a next page exists."""
return self.page < self.pages
@property
def next_num(self):
"""Number of the next page"""
if not self.has_next:
return None
return self.page + 1
def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2):
"""Iterates over the page numbers in the pagination. The four
parameters control the thresholds how many numbers should be produced
from the sides. Skipped page numbers are represented as `None`.
This is how you could render such a pagination in the templates:
.. sourcecode:: html+jinja
{% macro render_pagination(pagination, endpoint) %}
<div class=pagination>
{%- for page in pagination.iter_pages() %}
{% if page %}
{% if page != pagination.page %}
<a href="{{ url_for(endpoint, page=page) }}">{{ page }}</a>
{% else %}
<strong>{{ page }}</strong>
{% endif %}
{% else %}
<span class=ellipsis>…</span>
{% endif %}
{%- endfor %}
</div>
{% endmacro %}
"""
last = 0
for num in range(1, self.pages + 1):
if num <= left_edge or \
(num > self.page - left_current - 1 and \
num < self.page + right_current) or \
num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
class CustomizeUploadSet(UploadSet):
"""
This module inherits from the UploadSet class from flask_uploads with
method of saving the file.
"""
def __init__(self, name='files', extensions=ALL, default_dest=None):
"""Initialize with filenames, file extentions and the default destination for
saving the file
"""
super(CustomizeUploadSet, self).__init__(name, extensions, default_dest)
def save(self, storage, folder=None, name=None):
"""Saves the target file in the designated destination.
Used in app/main/pipeline.py
storage: the target file
folder: the folder name under the default file path
name: specified file name
"""
self.config.destination = "/tmp"
if folder is None and name is not None and "/" in name:
folder, name = os.path.split(name)
basename = name
if folder:
target_folder = os.path.join(self.config.destination, folder)
else:
target_folder = self.config.destination
if not os.path.exists(target_folder):
os.makedirs(target_folder)
if os.path.exists(os.path.join(target_folder, basename)):
basename = self.resolve_conflict(target_folder, basename)
target = os.path.join(target_folder, basename)
storage.save(target)
if folder:
re = posixpath.join(folder, basename)
else:
re = basename
return re
| UTF-8 | Python | false | false | 5,621 | py | 19 | utils.py | 12 | 0.574124 | 0.569318 | 0 | 168 | 32.446429 | 112 |
2575829997/jiaoTongWuLianWang | 11,141,145,194,652 | 1a8306c808a6c7c09f61518ebfbd5eff7889415b | 2e87b5b868a39b8d58b788d7865bfdca6f90cbbc | /dataVisual.py | 308696208b575c4bf79fd7413bbacab70a2f341d | [] | no_license | https://github.com/2575829997/jiaoTongWuLianWang | 542c4b0a90895ee6e079e3561e5cc334c3a7af75 | f56d746753261eb7ee65c24331ec4a5bc75182bc | refs/heads/master | "2022-04-20T17:25:22.607122" | "2020-04-20T15:38:40" | "2020-04-20T15:38:40" | 257,318,567 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import xlrd
from PIL import Image
from PIL import ImageTk
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
import numpy as np
import tkinter
from tkinter import StringVar
import tkinter.font as tkFont
import tkinter.messagebox #弹窗库
import dataVisual as dv
#温度12 位移45 应变12
f = Figure(figsize=(5, 4), dpi=100)
f_plot = f.add_subplot(111)
filename=r"E:\qq下载的\交通物联网\naodu625_strain_temp.xls"
data = xlrd.open_workbook(filename)
sheet_name='温度'
sh = data.sheet_by_name(sheet_name)
def draw_T(col):
f_plot.clear()
print(col)
test_TEM = int(col)-1
if test_TEM<13:
col_test_TEM = sh.col_values(test_TEM,start_rowx=1) #从第一排开始取test-TEW这列
x=np.arange(0,1078,1)
y=np.array(col_test_TEM)
print(y)
f_plot.set(title='temprature_self',xlabel='time /10min',ylabel='temprature')
f_plot.plot(x,y)
canvs.draw()
def draw_D(col):
f_plot.clear()
test_VD = int(col)-1
if test_VD<46:
sheet_name='应变'
sh = data.sheet_by_name(sheet_name)
col_test_VD = sh.col_values(test_VD,start_rowx=1)
x=np.arange(0,1078,1)
y=np.array(col_test_VD)
print(y)
f_plot.set(title='ver_d_self',
xlabel='time /10min',ylabel='ver_d')
f_plot.plot(x,y)
canvs.draw()
def draw_Y(col):
f_plot.clear()
test_YB = int(col)-1
if test_YB<13:
sheet_name='挠度'
sh_y= data.sheet_by_name(sheet_name)
col_test_YB = sh_y.col_values(test_YB,start_rowx=1)
x=np.arange(0,1078,1)
y=np.array(col_test_YB)
print(y)
f_plot.set(title='YB_self',
xlabel='time /10min',ylabel='YingBian')
f_plot.plot(x,y)
canvs.draw()
def draw_data():
root2 = tkinter.Toplevel()
root2.title('数据可视化(周期估算)')
root2.geometry("800x850+650+100") #窗口大小和窗口位置
global canvs
canvs=FigureCanvasTkAgg(f, root2)
canvs.get_tk_widget().pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
ft1 = tkFont.Font(family='Fixdsys', size=15, weight=tkFont.BOLD)
# tkinter.Button(root2, text='pic2', command=draw_picture2).pack()
# tkinter.Button(root2, text='pic3', command=draw_picture3).pack()
L1 = tkinter.Label(root2, text="选择温度(最多到12列)",font=ft1)
L1.pack(fill='both')
var1=StringVar()
E11 = tkinter.Entry(root2, bd =5, textvariable=var1)
E11.pack(fill='both')
B1 = tkinter.Button(root2, text ="选择测试点", command = lambda:draw_T(var1.get()),bg = 'green',font=ft1)
B1.pack(fill='both')
L1 = tkinter.Label(root2, text="-------------------------------------",font=ft1)
L1.pack(fill='both')
L1 = tkinter.Label(root2, text="选择位移(最多到45列)",font=ft1)
L1.pack(fill='both')
var2=StringVar()
E12 = tkinter.Entry(root2, bd =5,textvariable=var2)
E12.pack(fill='both')
B1 = tkinter.Button(root2, text ="选择测试点", command = lambda:draw_D(var2.get()),bg = 'green',font=ft1)
B1.pack(fill='both')
L1 = tkinter.Label(root2, text="-------------------------------------",font=ft1)
L1.pack(fill='both')
L1 = tkinter.Label(root2, text="选择应变(最多到12列)",font=ft1)
L1.pack(fill='both')
var3=StringVar()
E13 = tkinter.Entry(root2, bd =5,textvariable=var3)
E13.pack(fill='both')
B1 = tkinter.Button(root2, text ="选择测试点", command = lambda:draw_Y(var3.get()),bg = 'green',font=ft1)
B1.pack(fill='both')
root2.mainloop() | UTF-8 | Python | false | false | 3,921 | py | 4 | dataVisual.py | 3 | 0.58759 | 0.54881 | 0 | 107 | 32.962617 | 105 |
karthikpappu/pyc_source | 15,315,853,383,168 | 0250a1c70270f2e3d661a513e271c6243a8a62f4 | 91fa095f423a3bf47eba7178a355aab3ca22cf7f | /pycfiles/collective.advancedsearchtamer-1.0.tar/config.py | af12774c1d6a487ea9adedf4272313269f858038 | [] | no_license | https://github.com/karthikpappu/pyc_source | 0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f | 739e7e73180f2c3da5fd25bd1304a3fecfff8d6e | refs/heads/master | "2023-02-04T11:27:19.098827" | "2020-12-27T04:51:17" | "2020-12-27T04:51:17" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # uncompyle6 version 3.6.7
# Python bytecode 2.4 (62061)
# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]
# Embedded file name: build/bdist.macosx-10.3-i386/egg/collective/ads/config.py
# Compiled at: 2009-01-02 03:03:18
from Products.CMFCore.permissions import setDefaultRoles
PROJECTNAME = 'collective.ads'
TOOL_TITLE = 'AdsAdmin'
try:
from Products.CMFPlone.migrations import v2_1
except ImportError:
HAS_PLONE21 = False
else:
HAS_PLONE21 = True
DEFAULT_ADD_CONTENT_PERMISSION = 'Add portal content'
setDefaultRoles(DEFAULT_ADD_CONTENT_PERMISSION, ('Manager', 'Owner'))
product_globals = globals()
DEPENDENCIES = []
PRODUCT_DEPENDENCIES = []
STYLESHEETS = []
JAVASCRIPTS = []
try:
from collective.ads.AppConfig import *
except ImportError:
pass | UTF-8 | Python | false | false | 824 | py | 114,545 | config.py | 111,506 | 0.740291 | 0.658981 | 0 | 26 | 30.730769 | 104 |
cberardi4/Jobcoin | 5,274,219,847,462 | 0bd3de4e23587312c3c97d3ca6d03bca60990fd8 | 39320e66b1cf33e1af43b97163ac2943de268e67 | /tests/test_address_api.py | 91df76cc01b41f374cc2b636242db5defd5ee9d6 | [] | no_license | https://github.com/cberardi4/Jobcoin | d697b6a15f61a8e585434085d2b0320c09f310fc | b038c89ab300193f5ae2316517cf58ee222674c7 | refs/heads/main | "2023-02-01T00:12:35.410743" | "2020-12-18T19:05:08" | "2020-12-18T19:05:08" | 321,864,096 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pytest
from jobcoin.classes.transaction_api import Transaction_API
from jobcoin.classes.address_api import Address_API
from jobcoin.classes.transaction import Transaction
def test_check_activated_address_invalid():
'''
Test to make sure that the API does not return a known unactivated address
as valid/activated.
'''
a_api = Address_API()
result = a_api.check_activated_address('randomaddress')
assert 'unactivated' == result
def test_check_activated_address_valid():
'''
Test to make sure that a known activated address is returned as valid.
'''
a_api = Address_API()
result = a_api.check_activated_address('Tina')
assert 'activated' == result
def test_address_info():
'''
Test to make sure that a dictionary of an account's balance and transactions is returned
'''
a_api = Address_API()
result = a_api.get_address_info('Tina')
assert 'balance' and 'transactions' in result.json().keys()
| UTF-8 | Python | false | false | 976 | py | 11 | test_address_api.py | 10 | 0.70082 | 0.70082 | 0 | 29 | 32.655172 | 92 |
ALMPartners/ahjo | 7,060,926,250,833 | 34bf7119c8a24cfd6bcb1d938de39e5ae856cd5b | 89f5c69d2905a5e5381141aad202a87add6d247a | /src/ahjo/database_utilities/conn_info.py | 98cdbb2305f39e281f500bd3896ea53d42e7b7c3 | [
"Apache-2.0"
] | permissive | https://github.com/ALMPartners/ahjo | ad9614363e303f7b768bd3a9ad2aaad1fa17a28f | 5e86f3f0074994b077614f88eda291218e651d67 | refs/heads/master | "2023-06-26T07:23:47.418692" | "2023-06-13T10:44:03" | "2023-06-13T10:44:03" | 210,797,645 | 4 | 1 | Apache-2.0 | false | "2022-12-14T08:15:24" | "2019-09-25T08:45:52" | "2022-12-12T06:56:42" | "2022-12-14T08:15:22" | 234 | 1 | 1 | 0 | Python | false | false | # Ahjo - Database deployment framework
#
# Copyright 2019 - 2022 ALM Partners Oy
# SPDX-License-Identifier: Apache-2.0
"""Utily for extracting connection info from configuration json.
"""
import importlib
from typing import Union
from ahjo.credential_handler import get_credentials
def create_conn_info(conf: dict) -> dict:
"""Create a dictionary holding all important items for creating a connection to database.
Call get_credentials to either read credentials from file or ask them from user.
Arguments
---------
conf
Project configuration loaded from JSON.
Returns
-------
dict
Dictionary with the following keys: host, port, server, database, driver,
dialect, username, password, azure_auth, token, odbc_trust_server_certificate and odbc_encrypt.
"""
host = conf.get('target_server_hostname')
port = conf.get('sql_port')
server = _create_server_string(host, port)
database = conf.get('target_database_name')
driver = conf.get('sql_driver')
dialect = conf.get('sql_dialect', 'mssql+pyodbc')
azure_auth = conf.get('azure_authentication')
username_file = conf.get("username_file")
password_file = conf.get("password_file")
odbc_trust_server_certificate = conf.get("odbc_trust_server_certificate", "no")
odbc_encrypt = conf.get("odbc_encrypt", "yes")
token = None
username = None
password = None
azure_auth_lower = azure_auth.lower() if azure_auth is not None else None
if azure_auth in ('ActiveDirectoryIntegrated', 'ActiveDirectoryInteractive'):
username, password = get_credentials(
usrn_file_path=username_file,
pw_file_path=password_file,
pw_prompt=None # do not ask for password
)
elif azure_auth_lower == "azureidentity":
azure = importlib.import_module('.identity', 'azure')
struct = importlib.import_module("struct")
azure_identity_settings = conf.get("azure_identity_settings")
token_url = azure_identity_settings.get("token_url") if isinstance(azure_identity_settings, dict) and "token_url" in azure_identity_settings else "https://database.windows.net/.default"
azure_credentials = azure.AzureCliCredential()
raw_token = azure_credentials.get_token(
token_url # The token URL for any Azure SQL database
).token.encode("utf-16-le")
raw_token_len = len(raw_token)
token = struct.pack(f"<I{raw_token_len}s", raw_token_len, raw_token)
else:
username, password = get_credentials(
usrn_file_path = username_file,
pw_file_path = password_file
)
return {
'host': host,
'port': port,
'server': server,
'database': database,
'driver': driver,
'dialect': dialect,
'username': username,
'password': password,
'azure_auth': azure_auth,
'token': token,
'odbc_trust_server_certificate': odbc_trust_server_certificate,
'odbc_encrypt': odbc_encrypt
}
def _create_server_string(hostname: str, server_port: Union[str, int]) -> str:
if server_port is not None and server_port != 0:
server_string = str(hostname) + ',' + str(server_port)
else:
server_string = str(hostname)
return server_string
| UTF-8 | Python | false | false | 3,341 | py | 87 | conn_info.py | 62 | 0.64801 | 0.644119 | 0 | 87 | 37.402299 | 193 |
VaishnaveeRN18102000/GestureVolumeController | 15,461,882,281,109 | 4d4261dd28d85c82c144356ea6a0aefa7f3c272c | a14cd8c4fd9aad4f19a182e8fc17c1b6754603f3 | /VolumeController.py | 4d2196fcdfc49721b5d0626a34b389ea7f2e61e4 | [] | no_license | https://github.com/VaishnaveeRN18102000/GestureVolumeController | 3ac78c543023c6a3c99718b9892409cf366c7249 | d13f042631da71e2e64682a1ed9e9a30338aeb32 | refs/heads/master | "2023-05-01T00:47:55.698117" | "2021-05-21T07:21:25" | "2021-05-21T07:21:25" | 367,112,979 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import numpy as np
import time
from ctypes import cast, POINTER
from comtypes import CLSCTX_ALL
from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
import HandTracker as ht
wc, hc = 640, 480
cam = cv2.VideoCapture(0)
cam.set(3, wc)
cam.set(4, hc)
pt = 0
boundbox = []
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
volrange = volume.GetVolumeRange()
minvol = volrange[0]
maxvol = volrange[1]
volbar = 400
sysvol = 0
volper = 0
ar = 0
colsysvol = (255, 0, 0)
detector = ht.handDetector(detectionCon=0.75, maxHands=1)
while True:
suc, img = cam.read()
img = detector.findHands(img)
lmList, boundbox = detector.findPosition(img)
if len(lmList) != 0:
ar = (boundbox[2] - boundbox[0]) * (boundbox[3] - boundbox[1]) // 100
if 250 <= ar <= 1500:
length, img, liList = detector.findLength(4, 8, img)
fingers = detector.checkFingersUp()
print(fingers)
volbar = np.interp(length, [30, 250], [400, 150])
volper = np.interp(length, [30, 250], [0, 100])
incr = 5
volper = incr * round(volper / incr)
if not fingers[4]:
cv2.circle(img, (liList[4], liList[5]), 9, (255, 0, 0), cv2.FILLED)
volume.SetMasterVolumeLevelScalar(volper/100, None)
colsysvol = (0, 255, 0)
else:
colsysvol = (255, 0, 0)
cv2.rectangle(img, (50, 150), (85, 400), (255, 0, 0), 2)
cv2.rectangle(img, (50, int(volbar)), (85, 400), (255, 0, 0), cv2.FILLED)
cv2.putText(img, f'{int(volper)} %', (40, 450), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0), 2)
sysvol = int(volume.GetMasterVolumeLevelScalar() * 100)
cv2.putText(img, f'Volume Set: {int(sysvol)}', (400, 70), cv2.FONT_HERSHEY_PLAIN, 1, colsysvol, 2)
ct = time.time()
fps = 1 / (ct - pt)
pt = ct
cv2.putText(img, f'FPS: {int(fps)}', (40, 70), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0), 2)
cv2.imshow("Camera", img)
cv2.waitKey(1)
| UTF-8 | Python | false | false | 2,124 | py | 2 | VolumeController.py | 1 | 0.609228 | 0.531544 | 0 | 56 | 36.928571 | 102 |
nhanvolac/ThucHanh_Bai3_myoffice | 9,105,330,677,806 | 2a7de2b85f9bde0208237b76d173f8f1c30555c4 | 4b5205e9cc9b5a0f3cc4242242a402ba3821bf1d | /officemaster/apps.py | fce4fc2a91cde557395344ed8861a7881c98348c | [] | no_license | https://github.com/nhanvolac/ThucHanh_Bai3_myoffice | c12701e3706555da7b8c34cd6707b2857335f0c1 | 267e4d7e303692f41aea249c5f2f5de724467a04 | refs/heads/master | "2022-12-14T16:54:56.218834" | "2020-09-12T02:09:53" | "2020-09-12T02:09:53" | 294,851,969 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.apps import AppConfig
class OfficemasterConfig(AppConfig):
name = 'officemaster'
| UTF-8 | Python | false | false | 99 | py | 3 | apps.py | 3 | 0.777778 | 0.777778 | 0 | 5 | 18.8 | 36 |
kanha95/BBC_Crawler | 4,475,355,960,227 | 5c310a68694aa3a72a96880634f2f0c0a7ca285e | 6800086e4735913f42850d0c592c20770448222a | /bbc_crawler.py | 2cd591f207c25461d95aae0c2cb2522ac9a10cd6 | [] | no_license | https://github.com/kanha95/BBC_Crawler | d2ca8e8368d22747f2ce533172dc23a60f2c9cd4 | e352659bc1e9100d1d5c8f8868150d0453e53792 | refs/heads/master | "2020-04-18T13:23:46.703605" | "2019-01-25T16:51:06" | "2019-01-25T16:51:06" | 167,560,566 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import pycurl
#import cStringIO
import json
import ast
import os
import selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import jellyfish
from pyvirtualdisplay import Display
import csv
# remove this once done
display = Display(visible=0, size=(800, 600))
display.start()
profile = webdriver.FirefoxProfile()
profile.set_preference("network.proxy.type", 2)
profile.set_preference("network.proxy.autoconfig_url", "http://mcs182012:password@www.cc.iitd.ac.in/cgi-bin/proxy.research")
profile.update_preferences()
sel2 = webdriver.Firefox(firefox_profile=profile)
with open('links.csv', 'r') as csvFile:
divs = csv.reader(csvFile)
for i in divs:
row = []
print(i[0])
sel2.get(i[0])
divs2 = sel2.find_elements_by_xpath('//li//div[@class=\'date date--v2\']')
if len(divs2)==0: continue
row.append(divs2[0].text)
print(divs2[0].text) #date
divs4 = sel2.find_elements_by_xpath('//h1[@class=\'story-body__h1\']')
row.append(divs4[0].text)
print(divs4[0].text) #title
row.append(i[0])
print(i[0]) #url
divs3 = sel2.find_elements_by_xpath('//div[@class=\'story-body__inner\']//p') #content
str1 = ""
for j in divs3:
str1 = str1 + j.text
print(j.text)
row.append(str1)
#df = pd.DataFrame(row)
#df.to_csv('file.csv', index=False)
with open('people1.csv', 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
#divs2 = sel.find_elements_by_xpath('//dd//time')
"""
for i in divs2:
print(i.get_attribute('datetime')) #datetime
print(i.text) #date
"""
| UTF-8 | Python | false | false | 1,761 | py | 4 | bbc_crawler.py | 3 | 0.623509 | 0.59682 | 0 | 65 | 26.092308 | 124 |
akshayc11/cesar_vis | 11,579,231,862,999 | e01b9b8d1b116010f66357ee1c9360f2a5361371 | d398461d893c3f6423a68031b76e7edb5cb441b7 | /downsample_data_streams.py | 005885497a0d1d8a5c12204501e3eb747bf094d6 | [
"Apache-2.0"
] | permissive | https://github.com/akshayc11/cesar_vis | 2d03ab46a3844481bcdc2fde1d2a5834b11c9871 | 0d3d53dbbfec256304a0e468d9848dedf9a54b24 | refs/heads/master | "2021-01-18T01:39:35.461403" | "2013-09-30T22:46:58" | "2013-09-30T22:46:58" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # downsample_data_streams.py
# downsample a synchronized data streams file.
# take 1 out of every N sync points.
from xml.dom import minidom
import argparse
def downsample_data_streams(in_file_name,
out_file_name,
N=150):
print 'parsing...'
in_doc = minidom.parse(open(in_file_name, 'r'))
print 'done parsing'
top_element = in_doc.childNodes[0]
out_doc = minidom.getDOMImplementation().createDocument(None, "SYNCHRONIZED_DATA_STREAMS", None)
out_top_element = out_doc.documentElement
print 'choosing keepers'
keepers = [n for n in top_element.childNodes if n.localName == 'sync_point'][0::N]
print 'writing keepers to new top elem...'
for k in keepers:
out_top_element.appendChild(k)
print 'writing outfile'
#write file
out_doc.writexml(open(out_file_name,'w'), addindent=' ', newl='\n')
return None
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='''
downsample a synchronized data stream file taking
one out of every N sync points
''')
parser.add_argument('input',
metavar='<input file name>',
help=': input synchronized data stream file')
parser.add_argument('output',
metavar='<downsampled output file name>',
help=': downsampled output synchronzed data stream file')
parser.add_argument('-N',
type=int,
default=150,
help=': downsampling ratio. default=150')
args = vars(parser.parse_args())
N = args['N']
in_file_name = args['input']
out_file_name = args['output']
downsample_data_streams(in_file_name,
out_file_name,
N)
| UTF-8 | Python | false | false | 1,874 | py | 9 | downsample_data_streams.py | 4 | 0.571505 | 0.565101 | 0 | 56 | 32.464286 | 100 |
humoncy/mlhw5 | 9,998,683,869,295 | 92801c4013af9f2292a0195c82c8137f345ba71d | 0e0236ea4a54db4a2b49abb7e0185b20e23d913c | /main.py | e54c4a684461862fdb24ffc5de00748aee0bbf0c | [] | no_license | https://github.com/humoncy/mlhw5 | 1dea77eade2b536f0722745edff42b586ee95d49 | c86b8cb2fd69973745652d3a027654f3ec99840b | refs/heads/master | "2021-05-06T15:45:23.068753" | "2017-12-31T06:45:52" | "2017-12-31T06:45:52" | 113,659,899 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans, SpectralClustering
from kernel_kmeans_ref import KernelKMeans
from kmeans import k_means
from kernel_kmeans import kernel_k_means
from utility import show_cluster
from spectral_clustering import spectral_clustering
if __name__ == "__main__":
data1 = np.loadtxt('data/test1_data.txt')
gt1 = np.loadtxt('data/test1_ground.txt')
data2 = np.loadtxt('data/test2_data.txt')
gt2 = np.loadtxt('data/test2_ground.txt')
data = data2
gt = gt2
data_name = "data2"
# Show data with ground truth
plt.scatter(data[:, 0], data[:, 1], c=gt)
plt.title("Ground Truth of " + data_name)
plt.show()
k = 2
# sklearn kmeans
# kmeans = KMeans(n_clusters=2, random_state=0).fit(data)
# plt.scatter(data[:, 0], data[:, 1], c=kmeans.labels_)
# plt.show()
#
# K-means on my own
centroids, cluster_assignment = k_means(data, k)
show_cluster(data, k, cluster_assignment, centroids, title="K-means clusters", data_name=data_name)
# sklearn kernel k-means
# kernel_kmeans = KernelKMeans(n_clusters=2, kernel='rbf', gamma=0.03125)
# plt.scatter(data[:, 0], data[:, 1], c=kernel_kmeans.fit_predict(data))
# plt.show()
#
# Kernel k-means on my own
cluster_assignment = kernel_k_means(data, k, gamma=0.03125)
show_cluster(data, k, cluster_assignment, title="Kernel K-means clusters", data_name=data_name)
# sklearn spectral clustering
# spectral_clusters = SpectralClustering(n_clusters=2, gamma=0.1)
# plt.scatter(data[:, 0], data[:, 1], c=spectral_clusters.fit_predict(data))
# plt.show()
#
# Spectral clustering on my own
cluster_assignment = spectral_clustering(data, k, gamma=0.1)
show_cluster(data, k, cluster_assignment, title="Spectral clustering", data_name=data_name)
'''
The following code is used to run different number of clusters at the same time
'''
# ks = [2, 3, 5, 10]
# datas = [data1, data2]
# # print(datas[1].shape)
# # data = datas[1]
# # plt.scatter(data[:, 0], data[:, 1], c=gt2)
# # plt.title("Ground Truth of " + data_name)
# # plt.show()
# for data_index in range(2):
# for index, val in enumerate(ks):
# data = datas[data_index]
# data_name = "data" + (data_index+1).__str__()
#
# k = val
#
# centroids, cluster_assignment = k_means(data, k)
# show_cluster(data, k, cluster_assignment, centroids, title="K-means clusters", data_name=data_name)
#
# cluster_assignment = kernel_k_means(data, k, gamma=0.03125)
# show_cluster(data, k, cluster_assignment, title="Kernel K-means clusters", data_name=data_name)
#
# cluster_assignment = spectral_clustering(data, k, gamma=0.1)
# show_cluster(data, k, cluster_assignment, title="Spectral clustering", data_name=data_name)
| UTF-8 | Python | false | false | 2,970 | py | 5 | main.py | 4 | 0.632323 | 0.611448 | 0 | 84 | 34.357143 | 113 |
rchauhan9/imdb-scraper | 4,604,204,987,943 | d694461338c693780b254aab118e88bd181bf7e8 | 8d187459ba2502d1a46c501a05ee8c4bbd4b15ff | /src/gql_client/client.py | 460c68fa40ef3bb4cd2f1604b3ac36cc64e6b67c | [] | no_license | https://github.com/rchauhan9/imdb-scraper | a854bfb5dd3888675c56557e39092a40ad12b54f | f4f07ccf1aea9b4066c373c34d9e22b2a5a67117 | refs/heads/master | "2023-01-12T05:03:22.378062" | "2020-11-19T20:16:33" | "2020-11-19T20:16:33" | 300,712,649 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from gql import gql, Client
from gql.transport.requests import RequestsHTTPTransport
class GQLClient():
"""
A small wrapper class to make executing GraphQL queries and mutations from files easier.
Args:
gql_endpoint: The URI of the GraphQL endpoint the user needs to query.
Attributes:
transport: A RequestsHTTPTransport object from the 'gql' library.
client: A Client object from the 'gql' library.
"""
def __init__(self, gql_endpoint):
self.transport = RequestsHTTPTransport(
url=gql_endpoint,
use_json=True,
headers={
"Content-type": "application/json",
},
verify=False,
retries=3,
)
self.client = Client(transport=self.transport, fetch_schema_from_transport=True)
def execute(self, filepath: str, variables: dict):
"""
A method to execute a GraphQL query or mutation from a file.
Args:
filepath: The path to where a query/mutation is saved.
variables: A map of variable names and values to be inserted into the query.
Returns:
The response object of GraphQL command request.
"""
file = open(filepath, "r")
command = gql(file.read().rstrip())
return self.client.execute(command, variable_values=variables) | UTF-8 | Python | false | false | 1,385 | py | 27 | client.py | 8 | 0.617329 | 0.616606 | 0 | 43 | 31.232558 | 92 |
HopperElec/HopperBot-Python | 13,408,887,928,092 | c9722a61f6ca5218253d07e1d0b2420819f0f0a1 | f143f2dde58601ca10ccc97237c3214accd83c4f | /Cogs/Shared/AI/generateCounts.py | fb87e1f6163750cda6a0580ba7178d312ebd2760 | [] | no_license | https://github.com/HopperElec/HopperBot-Python | 396f727e36d1440026081b320efbd5f8aac8f966 | 84abe56929e2423c62361a6a9ae0b9526a63a66d | refs/heads/master | "2023-07-12T22:20:38.218196" | "2021-08-07T08:58:50" | "2021-08-07T08:58:50" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Commonly used function for combining lists of lists
def getIn(inside):
return [inner for outer in inside for inner in outer]
# Import required modules
from functools import lru_cache,wraps
from collections import Counter
from nltk import ngrams
from os import listdir
from json import dump as jsonwrite, load as jsonread
def saveAuthorCounts(authorCounts):
with open('authorCounts.json','w+') as jsonfile:
jsonwrite({k:dict(sorted(v.items(),key=lambda item:item[1],reverse=True)) for k,v in authorCounts.items()},jsonfile,indent=4)
@lru_cache(10)
def getCounts(ns=4,lengthWeight=1,lengthType=1,save=False):
words = {k:getIn([formatM(message,ns) for message in v]) for k,v in data.items()}
authorCounts = {k:Counter(word for word in words[k]) for k in data}
authorCounts = {author:{value:counts*(len(value.split() if lengthType == 0 else value)**lengthWeight) for value,counts in values.items() if counts >= 3} for author,values in authorCounts.items()}
if save:
saveAuthorCounts(authorCounts)
return authorCounts
def probabilityCounts(authorCounts,probabilityWeight=1,save=True):
authorCounts = {author:{value:counts/(len(values)**probabilityWeight) for value,counts in values.items()} for author,values in authorCounts.items()}
if save:
saveAuthorCounts(authorCounts)
return authorCounts
@lru_cache(99)
def formatM(m,testNs):
return getIn([result for result in [[' '.join(ngram) for ngram in ngrams(m.split(' '),n)] for n in range(1,testNs+1)] if result != []])
# Get messages from file
data = {}
for filename in listdir("messages"):
if filename.endswith('.txt') and filename != "all.txt":
with open("messages/"+filename,'r',errors='ignore') as txtfile:
data[filename[:-4]] = [line.strip() for line in txtfile.readlines()]
# Get a dict of dicts showing the number of times each word is used by each author
ns,probabilityWeight,lengthWeight = 6,1,3
authorCounts = getCounts(ns,lengthWeight)
authorCounts = probabilityCounts(authorCounts,probabilityWeight)
# Output
for key in authorCounts.keys():
print(key+':',', '.join(list({k:dict(sorted(v.items(),key=lambda item:item[1],reverse=True)) for k,v in authorCounts.items() if k == key}[key].keys())[:100])+"\n")
# Use accuracy to try and find the best combination of weights
input("Press any key to run accuracy test...")
print('This may take a while so please be patient!')
weights = [-1,-0.66,-0.33,0,0.33,0.66,1,1.33,1.66,2,2.33,2.66,3]
messagesToTest = len(sorted(data.values(),key=len)[0])
bestResult = [0,[[]]]
results = {testN:{ngramWeight:{probabilityWeight:[] for probabilityWeight in weights} for ngramWeight in weights} for testN in range(1,9)}
for testN in range(1,9):
print(testN,'ngrams:')
for lengthWeight in weights:
print(f' {lengthWeight} length weight:')
originalCounts = getCounts(testN,lengthWeight,False)
for probabilityWeight in weights:
print(' '*8+str(probabilityWeight),'probability weight:')
authorCounts = probabilityCounts(originalCounts,probabilityWeight,False)
correctFirst,correctSecond,correctThird,tests = 0,0,0,0
for k,v in data.items():
for m in v[:messagesToTest]:
tests += 1
m = formatM(m,testN)
result = sorted([(key,sum([(0 if not word in value.keys() else value[word]) for word in m])) for key,value in authorCounts.items()],key=lambda item:item[1],reverse=True)
if result[0][0] == k:
correctFirst += 1
elif result[1][0] == k:
correctSecond += 1
# elif result[2][0] == k:
# correctThird += 1
result = (correctFirst+correctSecond/2)/tests
results[testN][lengthWeight][probabilityWeight] = [correctFirst,correctSecond]
if result > bestResult[0]:
print(' '*12+'!!!!! NEW BEST !!!!!')
bestResult = [result,[[testN,lengthWeight,probabilityWeight]]]
elif result == bestResult[0]:
print(' '*12+'!!! ANOTHER BEST !!!')
bestResult[1] += [[testN,lengthWeight,probabilityWeight]]
# print(f"{' '*12}Accuracy (first try)): {int(correctFirst/tests*1000)/10}%")
# print(f"{' '*12}Accuracy (atleast second try)): {int((correctFirst+correctSecond)/tests*1000)/10}%")
# print(f"{' '*12}Accuracy (atleast third try)): {int((correctFirst+correctSecond+correctThird)/tests*1000)/10}%")
print('Best results:')
print(' Score:',bestResult[0])
for i,result in enumerate(bestResult[1]):
print(f' Result {i+1}:')
print(' '*8+'ns:',result[0])
print(' '*8+'length weight:',result[1])
print(' '*8+'probability weight:',result[2])
with open('accuracyResults.json','w+') as jsonfile:
jsonwrite(results,jsonfile)
| UTF-8 | Python | false | false | 5,039 | py | 18 | generateCounts.py | 17 | 0.636237 | 0.613415 | 0 | 97 | 49.948454 | 199 |
Jaggusms/CipherSchools_assessment | 13,159,779,843,335 | c4fb60e311187e874ef24122da5d4d6fb4f438bd | 02813a22c7fbd40ee5442e10fc41faaad595a3c5 | /Grocery_Items.py | f613db028a8c39639e91af871fd2cb4ec0877d07 | [] | no_license | https://github.com/Jaggusms/CipherSchools_assessment | 329995f5844250fb814111c8932190efadd43a94 | 6232d0f7ef4dbde0fcfd84b1f074eedb4db2eb6c | refs/heads/main | "2023-03-20T22:36:43.490404" | "2021-03-17T20:29:55" | "2021-03-17T20:29:55" | 348,842,241 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
print("Enter Your budget :",end=" ")
Budget=int(input())
product=[]
quantity=[]
price=[]
left=0
def duplicates(lst, item):
return [i for i, x in enumerate(lst) if x == item]
while(Budget > 0):
print("1.Add an item \n2.Exit \nEnter your choice :",end=" ")
choice=int(input())
if choice==1:
print("Enter product :",end=" ")
p=input()
print("Enter quantity :",end=" ")
q=input()
print("Enter Price :",end=" ")
A=int(input())
left=Budget-A
if(Budget <A):
print("Can't Buy the product ###(because budget left is {}))".format(Budget))
continue
else:
product.append(p)
quantity.append(q)
price.append(A)
print("Amoutn left : {}".format(left))
Budget -= A
else:
if left in price:
l=duplicates(price, left)
for i in l:
print("Amount left can buy you "+product[i])
Budget=0
print("GROCERY LIST is: \nProduct name Quantity Price:")
for i in range(len(product)):
print(f"{product[i] : <15}{quantity[i] : ^10}{price[i] : ^10}")
| UTF-8 | Python | false | false | 1,207 | py | 2 | Grocery_Items.py | 2 | 0.505385 | 0.495443 | 0 | 39 | 28.74359 | 89 |
nihao-hit/jianzhiOffer | 8,753,143,390,063 | a6c784bae4053d08de3b26943c37f9876833ce81 | 5573d2a9f1b83ab67ec416e772b3861192d6f1f6 | /test14.py | 9efa8cd032762609449a75fab90bca2765ea8d79 | [] | no_license | https://github.com/nihao-hit/jianzhiOffer | 9512519428aec51ab53dfb805dfa818af749fc46 | 42a15943394ae533dcd0d5bbf52e4366ab0756ab | refs/heads/master | "2020-03-06T22:40:02.823325" | "2019-04-15T11:58:24" | "2019-04-15T11:58:24" | 127,109,414 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''剪绳子'''
class Solution:
# 方法一:DP,定义函数f(n)是把长度为n的绳子剪成若干段后各段长度乘积的最大值。
# 在剪第一刀的时候,我们有n-1种剪法。f(n) = max(f(i)*f(n-i)),得到递归公式。
# 难点:n=2,n=3都是特例。f(2)!=1,f(3)!=2,
# 当f(2),f(3)作为子问题在递归公式右边的时候,可以选择不剪,以长度作为f(n)值。
def cutRope1(self,n):
'''
:type n:int
:rtype:int
'''
if n < 2:
return 0
if n == 2:
return 1
if n == 3:
return 2
result = [0]*(n+1)
result[1],result[2],result[3] = 1,2,3
for i in range(4,n+1):
maxVal = i
for j in range(1,(i+2)//2):
temp = result[j]*result[i-j]
if temp > maxVal:
maxVal = temp
result[i] = maxVal
return result[-1]
# 方法2:贪婪算法,当n>=5时,尽可能多的剪长度为3的绳子;当剩下的绳子长度为4时,剪成2,2。
def cutRope2(self,n):
'''
:type n:int
:rtype:int
'''
if n < 2:
return 0
if n == 2:
return 1
if n == 3:
return 2
result = 1
while n >= 5:
n -= 3
result *= 3
return result*n | UTF-8 | Python | false | false | 1,422 | py | 66 | test14.py | 66 | 0.416667 | 0.375439 | 0 | 45 | 24.355556 | 56 |
ashutoshshanker/ops-sysd | 4,818,953,338,063 | d908cf5f15275077f42a49d5d25222212b0e5fb2 | 070f92b0ba1604a9d59ff64edeeb6d1879096a59 | /ops-tests/component/ops-sysd/test_sysd_ct_os_release/test_sysd_ct_os_release.py | c10511925cf4525eaf0c7b3d7716f3ad1bf8b65d | [
"Apache-2.0"
] | permissive | https://github.com/ashutoshshanker/ops-sysd | 03c037513c9f67b7ba085c507108673836896150 | b354f06f6fb3c3140b7f942c3a2391d76f029114 | refs/heads/master | "2020-12-28T20:19:36.009307" | "2016-07-25T23:15:41" | "2016-07-26T00:21:03" | 65,635,839 | 0 | 1 | null | true | "2016-08-13T20:35:47" | "2016-08-13T20:35:47" | "2016-06-06T12:02:47" | "2016-07-28T21:25:34" | 202 | 0 | 0 | 0 | null | null | null | # -*- coding: utf-8 -*-
# (C) Copyright 2015 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
##########################################################################
"""
OpenSwitch Test for switchd related configurations.
"""
# from pytest import set_trace
from time import sleep
import json
import shutil
import os.path
import pytest
TOPOLOGY = """
# +-------+
# | ops1 |
# +-------+
# Nodes
[type=openswitch name="OpenSwitch 1"] ops1
"""
ovs_vsctl = "/usr/bin/ovs-vsctl "
ovs_appctl = "/usr/bin/ovs-appctl "
ovsdb_tool = "/usr/bin/ovsdb-tool "
default_os_release_file = "os-release.default"
os_release_files_dir = "files/os_releases"
def check_os_name(dut, file_name):
"""
Testing ops-sysd correctly stores switch_info:os_name value.
Test if the ops-sysd correctly parse the os-release file and stores
the information in the OVSDB.
"""
copy_os_release_file(dut, file_name)
# Restart the ovsdb-server and sysd
start(dut)
expected = read_os_release_file(dut, file_name, key='NAME')
result = get_software_info(dut, key='os_name')
assert result == expected
def check_switch_version(dut, file_name):
"""
Testing ops-sysd correctly stores switch_version column.
Test if the ops-sysd correctly parse the os-release file and stores
the information in the OVSDB.
"""
copy_os_release_file(dut, file_name)
# Restart the ovsdb-server and sysd
start(dut)
version_id = read_os_release_file(dut, file_name, 'VERSION_ID')
build_id = read_os_release_file(dut, file_name, 'BUILD_ID')
expected = "{0} (Build: {1})".format(version_id, build_id)
result = get_switch_version(dut)
assert result == expected
def get_software_info(dut, key=None):
out = dut(ovs_vsctl + "--format json list system", shell="bash")
data = json.loads(out)['data']
i = get_system_column_id(dut, 'software_info')
print(data[0][i])
if key:
for (k, v) in data[0][i][1]:
if k == key:
return v
else:
return data[0][i]
def get_switch_version(dut):
out = dut(ovs_vsctl + "--format json list system", shell="bash")
data = json.loads(out)['data']
i = get_system_column_id(dut, 'switch_version')
return data[0][i]
def get_system_column_id(dut, column_name):
out = dut(ovs_vsctl + "--format json list system", shell="bash")
columns = json.loads(out)['headings']
i = 0
for column in columns:
if column == column_name:
return i
i += 1
return None
def read_os_release_file(dut, fname=default_os_release_file, key=None):
"""Read the local os-release file and return the data."""
cur_dir, f = os.path.split(__file__)
path = os.path.join(cur_dir, os_release_files_dir, fname)
d = {}
with open(path) as f:
for line in f:
k, v = line.rstrip().split("=")
d[k] = v
if key:
return d[key]
else:
return d
def copy_os_release_file(dut, fname=default_os_release_file):
"""Copy a given os-release file to /etc/os-release."""
# src = os.path.join(os.path.sep, 'shared', os_release_files_dir, fname)
dst = os.path.join(os.path.sep, 'etc', 'os-release')
dut("/bin/cp /tmp/files/os_releases/" + fname + " " + dst, shell="bash")
def start(dut):
start_ovsdb(dut)
sleep(3)
start_sysd(dut)
wait_until_ovsdb_is_up(dut)
def stop(dut):
stop_sysd(dut)
stop_ovsdb(dut)
sleep(3)
def start_sysd(dut):
dut("/bin/systemctl start ops-sysd", shell="bash")
def stop_sysd(dut):
dut(ovs_appctl + "-t ops-sysd exit", shell="bash")
def start_ovsdb(dut):
"""Create an empty DB file and load it into ovsdb-server."""
dut(ovsdb_tool + "create /var/run/openvswitch/ovsdb.db "
"/usr/share/openvswitch/vswitch.ovsschema", shell="bash")
# Load the newly created DB into ovsdb-server
dut(ovs_appctl + "-t ovsdb-server ovsdb-server/add-db "
"/var/run/openvswitch/ovsdb.db", shell="bash")
def stop_ovsdb(dut):
"""Remove the OpenSwitch DB from ovsdb-server.
It also removes the DB file from the file system.
"""
# Remove the database from the ovsdb-server.
dut(ovs_appctl + "-t ovsdb-server ovsdb-server/remove-db OpenSwitch",
shell="bash")
# Remove the DB file from the file system.
dut("/bin/rm -f /var/run/openvswitch/ovsdb.db", shell="bash")
def wait_until_ovsdb_is_up(dut):
"""Wait until System table is visible in the ovsdb-server."""
cmd = ovs_vsctl + "list System | grep uuid"
wait_count = 20
while wait_count > 0:
out = dut(cmd, shell="bash")
if "_uuid" in out:
break
wait_count -= 1
sleep(1)
assert wait_count != 0
@pytest.fixture(scope="module")
def main_setup(request, topology):
ops1 = topology.get('ops1')
assert ops1 is not None
cur_dir, f = os.path.split(__file__)
src = os.path.join(cur_dir, os_release_files_dir)
dst = os.path.join(ops1.shared_dir, os_release_files_dir)
shutil.copytree(src, dst)
@pytest.fixture()
def setup(request, topology):
ops1 = topology.get('ops1')
assert ops1 is not None
stop(ops1)
def cleanup():
stop(ops1)
copy_os_release_file(ops1)
start(ops1)
request.addfinalizer(cleanup)
@pytest.mark.platform_incompatible(['ostl'])
def test_sysd_ct_os_default_os_name(topology, step, main_setup, setup):
ops1 = topology.get("ops1")
assert ops1 is not None
check_os_name(ops1, "os-release.default")
@pytest.mark.platform_incompatible(['ostl'])
def test_sysd_ct_os_default_version(topology, step, main_setup, setup):
ops1 = topology.get("ops1")
assert ops1 is not None
check_switch_version(ops1, "os-release.default")
@pytest.mark.platform_incompatible(['ostl'])
def test_sysd_ct_os_1_0_0_version(topology, step, main_setup, setup):
ops1 = topology.get("ops1")
assert ops1 is not None
check_switch_version(ops1, "os-release.ops-1.0.0")
@pytest.mark.platform_incompatible(['ostl'])
def test_sysd_ct_os_debian_8_0_name(topology, step, main_setup, setup):
ops1 = topology.get("ops1")
assert ops1 is not None
check_os_name(ops1, "os-release.debian-8.0")
@pytest.mark.platform_incompatible(['ostl'])
def test_sysd_ct_os_debian_8_0_version(topology, step, main_setup, setup):
ops1 = topology.get("ops1")
assert ops1 is not None
check_switch_version(ops1, "os-release.debian-8.0")
| UTF-8 | Python | false | false | 7,083 | py | 30 | test_sysd_ct_os_release.py | 11 | 0.637865 | 0.627418 | 0 | 262 | 26.034351 | 78 |
casamagalhaes/panamah-sdk-python | 5,368,709,134,275 | 7afa4d37d9b7e541beec798c14c5f6e3ce74676e | 9cc66c7d6947fdd64e85fa360bf3d85871af4e68 | /panamah_sdk/nfe.py | cc1a63ba6b9f147372077734428f4cce502bc29f | [
"MIT"
] | permissive | https://github.com/casamagalhaes/panamah-sdk-python | e584b340e56dbebc219c4528d9c60a00bbb8aeee | 6baec7994a5c6b0c3d6b79ddc57ca95b44512388 | refs/heads/master | "2023-08-03T19:55:19.842461" | "2023-07-28T01:26:53" | "2023-07-28T01:26:53" | 182,903,458 | 0 | 0 | MIT | false | "2023-07-28T01:26:55" | "2019-04-23T02:02:12" | "2019-12-16T17:43:51" | "2023-07-28T01:26:53" | 96 | 0 | 0 | 3 | Python | false | false | import re
import os
from dictor import dictor as get_property
from xmltodict import parse as parse_xml_string
from .models.definitions import PanamahLoja, PanamahCliente, PanamahProduto, PanamahVenda, PanamahVendaItem, PanamahProdutoEan
class Nfe:
@classmethod
def parse_xml(cls, filename):
with open(filename, 'r') as fp:
return parse_xml_string(fp.read())
@classmethod
def deserialize_loja(cls, xml):
root = get_property(xml, 'NFe') or get_property(xml, 'nfeProc')
return PanamahLoja(
id=get_property(root, 'NFe.infNFe.emit.CNPJ'),
descricao=get_property(root, 'NFe.infNFe.emit.xNome'),
numero_documento=get_property(root, 'NFe.infNFe.emit.CNPJ'),
logradouro=get_property(root, 'NFe.infNFe.emit.enderEmit.xLgr'),
numero=get_property(root, 'NFe.infNFe.emit.enderEmit.nro'),
uf=get_property(root, 'NFe.infNFe.emit.enderEmit.UF'),
cidade=get_property(root, 'NFe.infNFe.emit.enderEmit.xMun'),
bairro=get_property(root, 'NFe.infNFe.emit.enderEmit.xBairro'),
cep=get_property(root, 'NFe.infNFe.emit.enderEmit.CEP'),
complemento=get_property(root, 'NFe.infNFe.emit.enderEmit.xCpl'),
ativa=True,
matriz=False,
)
@classmethod
def deserialize_cliente(cls, xml):
root = get_property(xml, 'NFe') or get_property(xml, 'nfeProc')
return PanamahCliente(
id=get_property(root, 'NFe.infNFe.dest.CNPJ') or get_property(root, 'NFe.infNFe.dest.CPF'),
nome=get_property(root, 'NFe.infNFe.dest.xNome'),
numero_documento=get_property(root, 'NFe.infNFe.dest.CNPJ') or get_property(root, 'NFe.infNFe.dest.CPF'),
uf=get_property(root, 'NFe.infNFe.dest.enderDest.UF'),
cidade=get_property(root, 'NFe.infNFe.dest.enderDest.xMun'),
bairro=get_property(root, 'NFe.infNFe.dest.enderDest.xBairro'),
)
@classmethod
def deserialize_produto(cls, xml):
root = get_property(xml, 'NFe') or get_property(xml, 'nfeProc')
dets = get_property(root, 'NFe.infNFe.det')
dets = dets if isinstance(dets, list) else [dets]
def get_model(det):
produto = PanamahProduto(
id=get_property(det, 'prod.cProd'),
descricao=get_property(det, 'prod.xProd'),
ativo=True
)
ean_tributado = get_property(det, 'prod.cEANTrib')
ean = get_property(det, 'prod.cEAN')
valid_ean = lambda val: val and val != 'SEM GTIN'
if valid_ean(ean_tributado) or valid_ean(ean):
produto.eans = []
if ean_tributado:
produto.eans.append(PanamahProdutoEan(id=ean_tributado, tributado=True))
if ean and ean != ean_tributado:
produto.eans.append(PanamahProdutoEan(id=ean, tributado=False))
return produto
return [get_model(det) for det in dets]
@classmethod
def deserialize_venda(cls, xml):
root = get_property(xml, 'NFe') or get_property(xml, 'nfeProc')
dets = get_property(root, 'NFe.infNFe.det')
dets = dets if isinstance(dets, list) else [dets]
return PanamahVenda(
id=get_property(root, 'NFe.infNFe.@Id'),
loja_id=get_property(root, 'NFe.infNFe.emit.CNPJ'),
cliente_id=get_property(root, 'NFe.infNFe.dest.CNPJ') or get_property(
root, 'NFe.infNFe.dest.CPF'),
data=get_property(root, 'NFe.infNFe.ide.dhEmi'),
data_hora_venda=get_property(root, 'NFe.infNFe.ide.dhEmi'),
efetiva=True,
quantidade_itens=len(dets) or 0,
valor=get_property(root, 'NFe.infNFe.total.ICMSTot.vNF'),
itens=[PanamahVendaItem(
produto_id=get_property(det, 'prod.cProd'),
quantidade=get_property(det, 'prod.qCom'),
preco=get_property(det, 'prod.vUnCom'),
valor_unitario=get_property(det, 'prod.vUnCom'),
valor_total=get_property(det, 'prod.vProd'),
desconto=get_property(det, 'prod.vDesc'),
efetivo=True
) for det in dets]
)
@classmethod
def read_models_from_file(cls, filename):
basename = os.path.basename(filename)
if not basename.startswith('ID'):
xml = cls.parse_xml(filename)
return [
cls.deserialize_loja(xml),
cls.deserialize_cliente(xml),
cls.deserialize_venda(xml),
*cls.deserialize_produto(xml)
]
return []
@classmethod
def read_models_from_directory(cls, dirname):
files = ['%s/%s' % (dirname, file) for file in os.listdir(dirname) if file.endswith('.xml')]
has_cancel_file = lambda model: next((file for file in files if os.path.basename(file).startswith('ID110111') and re.sub(r'[^0-9]+', '', model.id) in file), False)
model_list_by_file = [cls.read_models_from_file(file) for file in files]
models = [model for model_list in model_list_by_file for model in model_list]
for model in models:
if isinstance(model, PanamahVenda) and has_cancel_file(model):
model.efetiva = False
return models | UTF-8 | Python | false | false | 5,391 | py | 26 | nfe.py | 21 | 0.597848 | 0.596179 | 0 | 115 | 45.886957 | 171 |
ayushbhandari02/Data-Structures | 9,766,755,649,485 | e4af7db258292acdb373794d5799512f75a9cda7 | 30eab9afd92f8a7116884cfe4c71d38c1cb47ac3 | /reverse_linked_list.py | ec88ee3364559017080340d5ad1634d873be5236 | [] | no_license | https://github.com/ayushbhandari02/Data-Structures | d456a8817ff1722dbcae996197742bb3cedfadaf | 81c3793232220e5e87ee64687ae7c02233901913 | refs/heads/master | "2020-07-05T21:47:27.612238" | "2019-09-26T06:17:56" | "2019-09-26T06:17:56" | 202,790,053 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Node:
def __init__(self, data):
self.data = data
self.next = None
def insert_at_last(head, data):
if head is None:
head = Node(data)
else:
temp = head
while temp.next:
temp = temp.next
temp.next = Node(data)
def view_list(head):
temp = head
while temp:
print(temp.data)
temp = temp.next
def reverse_ll(head):
prev = None
temp = head
while temp:
next_temp = temp.next
temp.next = prev
prev = temp
temp = next_temp
return prev
head = Node(1)
insert_at_last(head, 2)
insert_at_last(head, 3)
insert_at_last(head, 4)
insert_at_last(head, 5)
print("Before reversing")
view_list(head)
print("After reversing")
new_head_returned = reverse_ll(head)
view_list(new_head_returned)
| UTF-8 | Python | false | false | 827 | py | 41 | reverse_linked_list.py | 41 | 0.584039 | 0.577993 | 0 | 44 | 17.795455 | 36 |
iuri-pdista/pythonChat | 17,686,675,347,698 | 9c9dba8b7c43c80ccb46f7980fbba8c59e85267c | da6bc095ac2c4c918228d13071779f96dacfd6cb | /server.py | a2544bf271dfd76173712f49ff3a4f459b1e3e15 | [] | no_license | https://github.com/iuri-pdista/pythonChat | d6c1df82bc075231828d5775edd723c0282dac4b | a7dc874c2fcfe0c6af198e5a01f305d0c627dd15 | refs/heads/master | "2022-11-26T15:50:14.935785" | "2020-07-22T19:41:01" | "2020-07-22T19:41:01" | 281,745,973 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import socket
import colorama
try:
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.bind((socket.gethostname(),21000))
serverSocket.listen(3)
except:
print("Could not establish the server")
while True:
clientsocket, address = serverSocket.accept()
print(f"Connection from {address} has been established")
clientsocket.send(bytes("W&LC0M3 T0 T#& C#H4T", "utf-8"))
clientName = clientsocket.recv(15)
print(f"The address: {address} joined the server as: {clientName}")
serverSocket.send(bytes("Mensagem massa","utf-8"))
recvMesage = serverSocket.recv(1024)
print(recvMessage.decode("utf-8"))
clientsocket.close()
serverSocket.close()
| UTF-8 | Python | false | false | 719 | py | 2 | server.py | 2 | 0.709318 | 0.682893 | 0 | 20 | 34.85 | 71 |
silvandeleemput/memcnn | 17,738,214,964,666 | c87838540812199492d2412ae2e2dca8fc1476f7 | 6b1677e3d641ed89b834de5ff1cf070997b6cdba | /memcnn/models/affine.py | 0571a25fa65580de22a57ee7df965b9f097aa431 | [
"MIT"
] | permissive | https://github.com/silvandeleemput/memcnn | e3c7639261eead441d84626c78c3d8b7fb2a9627 | a71169b32d4f8568b333d95a4303d8e05d1e1af5 | refs/heads/master | "2023-05-10T17:01:39.711636" | "2023-05-10T08:04:48" | "2023-05-10T08:08:13" | 122,979,177 | 251 | 29 | MIT | false | "2023-05-10T07:50:35" | "2018-02-26T14:11:31" | "2023-05-09T08:14:00" | "2023-05-10T07:50:34" | 408 | 238 | 24 | 14 | Python | false | false | import torch
import torch.nn as nn
import copy
import warnings
from torch import set_grad_enabled
warnings.filterwarnings(action='ignore', category=UserWarning)
class AffineAdapterNaive(nn.Module):
""" Naive Affine adapter
Outputs exp(f(x)), f(x) given f(.) and x
"""
def __init__(self, module):
super(AffineAdapterNaive, self).__init__()
self.f = module
def forward(self, x):
t = self.f(x)
s = torch.exp(t)
return s, t
class AffineAdapterSigmoid(nn.Module):
""" Sigmoid based affine adapter
Partitions the output h of f(x) = h into s and t by extracting every odd and even channel
Outputs sigmoid(s), t
"""
def __init__(self, module):
super(AffineAdapterSigmoid, self).__init__()
self.f = module
def forward(self, x):
h = self.f(x)
assert h.shape[1] % 2 == 0 # nosec
scale = torch.sigmoid(h[:, 1::2] + 2.0)
shift = h[:, 0::2]
return scale, shift
class AffineCoupling(nn.Module):
def __init__(self, Fm, Gm=None, adapter=None, implementation_fwd=-1, implementation_bwd=-1, split_dim=1):
"""
This computes the output :math:`y` on forward given input :math:`x` and arbitrary modules :math:`Fm` and :math:`Gm` according to:
:math:`(x1, x2) = x`
:math:`(log({s1}), t1) = Fm(x2)`
:math:`s1 = exp(log({s1}))`
:math:`y1 = s1 * x1 + t1`
:math:`(log({s2}), t2) = Gm(y1)`
:math:`s2 = exp(log({s2}))`
:math:`y2 = s2 * x2 + t2`
:math:`y = (y1, y2)`
Parameters
----------
Fm : :obj:`torch.nn.Module`
A torch.nn.Module encapsulating an arbitrary function
Gm : :obj:`torch.nn.Module`
A torch.nn.Module encapsulating an arbitrary function
(If not specified a deepcopy of Gm is used as a Module)
adapter : :obj:`torch.nn.Module` class
An optional wrapper class A for Fm and Gm which must output
s, t = A(x) with shape(s) = shape(t) = shape(x)
s, t are respectively the scale and shift tensors for the affine coupling.
implementation_fwd : :obj:`int`
Switch between different Affine Operation implementations for forward pass. Default = -1
implementation_bwd : :obj:`int`
Switch between different Affine Operation implementations for inverse pass. Default = -1
split_dim : :obj:`int`
Dimension to split the input tensors on. Default = 1, generally corresponding to channels.
"""
super(AffineCoupling, self).__init__()
# mirror the passed module, without parameter sharing...
if Gm is None:
Gm = copy.deepcopy(Fm)
# apply the adapter class if it is given
self.Gm = adapter(Gm) if adapter is not None else Gm
self.Fm = adapter(Fm) if adapter is not None else Fm
self.implementation_fwd = implementation_fwd
self.implementation_bwd = implementation_bwd
self.split_dim = split_dim
if implementation_bwd != -1 or implementation_fwd != -1:
warnings.warn("Other implementations than the default (-1) are now deprecated.",
DeprecationWarning)
def forward(self, x):
args = [x, self.Fm, self.Gm] + [w for w in self.Fm.parameters()] + [w for w in self.Gm.parameters()]
if self.implementation_fwd == 0:
out = AffineBlockFunction.apply(*args)
elif self.implementation_fwd == 1:
out = AffineBlockFunction2.apply(*args)
elif self.implementation_fwd == -1:
x1, x2 = torch.chunk(x, 2, dim=self.split_dim)
x1, x2 = x1.contiguous(), x2.contiguous()
fmr1, fmr2 = self.Fm.forward(x2)
y1 = (x1 * fmr1) + fmr2
gmr1, gmr2 = self.Gm.forward(y1)
y2 = (x2 * gmr1) + gmr2
out = torch.cat([y1, y2], dim=self.split_dim)
else:
raise NotImplementedError("Selected implementation ({}) not implemented..."
.format(self.implementation_fwd))
return out
def inverse(self, y):
args = [y, self.Fm, self.Gm] + [w for w in self.Fm.parameters()] + [w for w in self.Gm.parameters()]
if self.implementation_bwd == 0:
x = AffineBlockInverseFunction.apply(*args)
elif self.implementation_bwd == 1:
x = AffineBlockInverseFunction2.apply(*args)
elif self.implementation_bwd == -1:
y1, y2 = torch.chunk(y, 2, dim=self.split_dim)
y1, y2 = y1.contiguous(), y2.contiguous()
gmr1, gmr2 = self.Gm.forward(y1)
x2 = (y2 - gmr2) / gmr1
fmr1, fmr2 = self.Fm.forward(x2)
x1 = (y1 - fmr2) / fmr1
x = torch.cat([x1, x2], dim=self.split_dim)
else:
raise NotImplementedError("Inverse for selected implementation ({}) not implemented..."
.format(self.implementation_bwd))
return x
class AffineBlock(AffineCoupling):
def __init__(self, Fm, Gm=None, implementation_fwd=1, implementation_bwd=1):
warnings.warn("This class has been deprecated. Use the AffineCoupling class instead.",
DeprecationWarning)
super(AffineBlock, self).__init__(Fm=Fm, Gm=Gm,
implementation_fwd=implementation_fwd,
implementation_bwd=implementation_bwd)
class AffineBlockFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, xin, Fm, Gm, *weights):
"""Forward pass for the affine block computes:
{x1, x2} = x
{log_s1, t1} = Fm(x2)
s1 = exp(log_s1)
y1 = s1 * x1 + t1
{log_s2, t2} = Gm(y1)
s2 = exp(log_s2)
y2 = s2 * x2 + t2
output = {y1, y2}
Parameters
----------
ctx : torch.autograd.function.RevNetFunctionBackward
The backward pass context object
x : TorchTensor
Input tensor. Must have channels (2nd dimension) that can be partitioned in two equal partitions
Fm : nn.Module
Module to use for computation, must retain dimensions such that Fm(X)=Y, X.shape == Y.shape
Gm : nn.Module
Module to use for computation, must retain dimensions such that Gm(X)=Y, X.shape == Y.shape
*weights : TorchTensor
weights for Fm and Gm in that order {Fm_w1, ... Fm_wn, Gm_w1, ... Gm_wn}
Note
----
All tensor/autograd variable input arguments and the output are
TorchTensors for the scope of this function
"""
# check if possible to partition into two equally sized partitions
assert xin.shape[1] % 2 == 0 # nosec
# store partition size, Fm and Gm functions in context
ctx.Fm = Fm
ctx.Gm = Gm
with torch.no_grad():
# partition in two equally sized set of channels
x = xin.detach()
x1, x2 = torch.chunk(x, 2, dim=1)
x1, x2 = x1.contiguous(), x2.contiguous()
# compute outputs
x2var = x2
fmr1, fmr2 = Fm.forward(x2var)
y1 = (x1 * fmr1) + fmr2
x1.set_()
del x1
y1var = y1
gmr1, gmr2 = Gm.forward(y1var)
y2 = (x2 * gmr1) + gmr2
x2.set_()
del x2
output = torch.cat([y1, y2], dim=1).detach_()
# save the (empty) input and (non-empty) output variables
ctx.save_for_backward(xin, output)
return output
@staticmethod
def backward(ctx, grad_output): # pragma: no cover
# retrieve weight references
Fm, Gm = ctx.Fm, ctx.Gm
# retrieve input and output references
xin, output = ctx.saved_tensors
x = xin.detach()
x1, x2 = torch.chunk(x.detach(), 2, dim=1)
GWeights = [p for p in Gm.parameters()]
# partition output gradient also on channels
assert (grad_output.shape[1] % 2 == 0) # nosec
with set_grad_enabled(True):
# compute outputs building a sub-graph
x1.requires_grad = True
x2.requires_grad = True
fmr1, fmr2 = Fm.forward(x2)
y1 = x1 * fmr1 + fmr2
gmr1, gmr2 = Gm.forward(y1)
y2 = x2 * gmr1 + gmr2
y = torch.cat([y1, y2], dim=1)
# perform full backward pass on graph...
dd = torch.autograd.grad(y, (x1, x2) + tuple(Gm.parameters()) + tuple(Fm.parameters()), grad_output)
GWgrads = dd[2:2 + len(GWeights)]
FWgrads = dd[2 + len(GWeights):]
grad_input = torch.cat([dd[0], dd[1]], dim=1)
return (grad_input, None, None) + FWgrads + GWgrads
class AffineBlockInverseFunction(torch.autograd.Function):
@staticmethod
def forward(cty, yin, Fm, Gm, *weights):
"""Forward inverse pass for the affine block computes:
{y1, y2} = y
{log_s2, t2} = Gm(y1)
s2 = exp(log_s2)
x2 = (y2 - t2) / s2
{log_s1, t1} = Fm(x2)
s1 = exp(log_s1)
x1 = (y1 - t1) / s1
output = {x1, x2}
Parameters
----------
cty : torch.autograd.function.RevNetInverseFunctionBackward
The backward pass context object
y : TorchTensor
Input tensor. Must have channels (2nd dimension) that can be partitioned in two equal partitions
Fm : nn.Module
Module to use for computation, must retain dimensions such that Fm(X)=Y, X.shape == Y.shape
Gm : nn.Module
Module to use for computation, must retain dimensions such that Gm(X)=Y, X.shape == Y.shape
*weights : TorchTensor
weights for Fm and Gm in that order {Fm_w1, ... Fm_wn, Gm_w1, ... Gm_wn}
Note
----
All tensor/autograd variable input arguments and the output are
TorchTensors for the scope of this fuction
"""
# check if possible to partition into two equally sized partitions
assert yin.shape[1] % 2 == 0 # nosec
# store partition size, Fm and Gm functions in context
cty.Fm = Fm
cty.Gm = Gm
with torch.no_grad():
# partition in two equally sized set of channels
y = yin.detach()
y1, y2 = torch.chunk(y, 2, dim=1)
y1, y2 = y1.contiguous(), y2.contiguous()
# compute outputs
y1var = y1
gmr1, gmr2 = Gm.forward(y1var)
x2 = (y2 - gmr2) / gmr1
y2.set_()
del y2
x2var = x2
fmr1, fmr2 = Fm.forward(x2var)
x1 = (y1 - fmr2) / fmr1
y1.set_()
del y1
output = torch.cat([x1, x2], dim=1).detach_()
# save input and output variables
cty.save_for_backward(yin, output)
return output
@staticmethod
def backward(cty, grad_output): # pragma: no cover
# retrieve weight references
Fm, Gm = cty.Fm, cty.Gm
# retrieve input and output references
yin, output = cty.saved_tensors
y = yin.detach()
y1, y2 = torch.chunk(y.detach(), 2, dim=1)
FWeights = [p for p in Gm.parameters()]
# partition output gradient also on channels
assert grad_output.shape[1] % 2 == 0 # nosec
with set_grad_enabled(True):
# compute outputs building a sub-graph
y2.requires_grad = True
y1.requires_grad = True
gmr1, gmr2 = Gm.forward(y1) #
x2 = (y2 - gmr2) / gmr1
fmr1, fmr2 = Fm.forward(x2)
x1 = (y1 - fmr2) / fmr1
x = torch.cat([x1, x2], dim=1)
# perform full backward pass on graph...
dd = torch.autograd.grad(x, (y2, y1) + tuple(Fm.parameters()) + tuple(Gm.parameters()), grad_output)
FWgrads = dd[2:2 + len(FWeights)]
GWgrads = dd[2 + len(FWeights):]
grad_input = torch.cat([dd[0], dd[1]], dim=1)
return (grad_input, None, None) + FWgrads + GWgrads
class AffineBlockFunction2(torch.autograd.Function):
@staticmethod
def forward(ctx, xin, Fm, Gm, *weights):
"""Forward pass for the affine block computes:
{x1, x2} = x
{log_s1, t1} = Fm(x2)
s1 = exp(log_s1)
y1 = s1 * x1 + t1
{log_s2, t2} = Gm(y1)
s2 = exp(log_s2)
y2 = s2 * x2 + t2
output = {y1, y2}
Parameters
----------
ctx : torch.autograd.function.RevNetFunctionBackward
The backward pass context object
x : TorchTensor
Input tensor. Must have channels (2nd dimension) that can be partitioned in two equal partitions
Fm : nn.Module
Module to use for computation, must retain dimensions such that Fm(X)=Y, X.shape == Y.shape
Gm : nn.Module
Module to use for computation, must retain dimensions such that Gm(X)=Y, X.shape == Y.shape
*weights : TorchTensor
weights for Fm and Gm in that order {Fm_w1, ... Fm_wn, Gm_w1, ... Gm_wn}
Note
----
All tensor/autograd variable input arguments and the output are
TorchTensors for the scope of this fuction
"""
# check if possible to partition into two equally sized partitions
assert xin.shape[1] % 2 == 0 # nosec
# store partition size, Fm and Gm functions in context
ctx.Fm = Fm
ctx.Gm = Gm
with torch.no_grad():
# partition in two equally sized set of channels
x = xin.detach()
x1, x2 = torch.chunk(x, 2, dim=1)
x1, x2 = x1.contiguous(), x2.contiguous()
# compute outputs
x2var = x2
fmr1, fmr2 = Fm.forward(x2var)
y1 = x1 * fmr1 + fmr2
x1.set_()
del x1
y1var = y1
gmr1, gmr2 = Gm.forward(y1var)
y2 = x2 * gmr1 + gmr2
x2.set_()
del x2
output = torch.cat([y1, y2], dim=1).detach_()
# save the input and output variables
ctx.save_for_backward(xin, output)
return output
@staticmethod
def backward(ctx, grad_output): # pragma: no cover
Fm, Gm = ctx.Fm, ctx.Gm
# are all variable objects now
x, output = ctx.saved_tensors
with set_grad_enabled(False):
y1, y2 = torch.chunk(output, 2, dim=1)
y1, y2 = y1.contiguous(), y2.contiguous()
# partition output gradient also on channels
assert (grad_output.shape[1] % 2 == 0) # nosec
y1_grad, y2_grad = torch.chunk(grad_output, 2, dim=1)
y1_grad, y2_grad = y1_grad.contiguous(), y2_grad.contiguous()
# Recreate computation graphs for functions Gm and Fm with gradient collecting leaf nodes:
# z1_stop, x2_stop, GW, FW
# Also recompute inputs (x1, x2) from outputs (y1, y2)
with set_grad_enabled(True):
z1_stop = y1
z1_stop.requires_grad = True
G_z11, G_z12 = Gm.forward(z1_stop)
x2 = (y2 - G_z12) / G_z11
x2_stop = x2.detach()
x2_stop.requires_grad = True
F_x21, F_x22 = Fm.forward(x2_stop)
x1 = (y1 - F_x22) / F_x21
x1_stop = x1.detach()
x1_stop.requires_grad = True
# compute outputs building a sub-graph
z1 = x1_stop * F_x21 + F_x22
y2_ = x2_stop * G_z11 + G_z12
y1_ = z1
# calculate the final gradients for the weights and inputs
dd = torch.autograd.grad(y2_, (z1_stop,) + tuple(Gm.parameters()), y2_grad)
z1_grad = dd[0] + y1_grad
GWgrads = dd[1:]
dd = torch.autograd.grad(y1_, (x1_stop, x2_stop) + tuple(Fm.parameters()), z1_grad, retain_graph=False)
FWgrads = dd[2:]
x2_grad = dd[1] + y2_grad
x1_grad = dd[0]
grad_input = torch.cat([x1_grad, x2_grad], dim=1)
y1_.detach_()
y2_.detach_()
del y1_, y2_
return (grad_input, None, None) + FWgrads + GWgrads
class AffineBlockInverseFunction2(torch.autograd.Function):
@staticmethod
def forward(cty, yin, Fm, Gm, *weights):
"""Forward pass for the affine block computes:
Parameters
----------
cty : torch.autograd.function.RevNetInverseFunctionBackward
The backward pass context object
y : TorchTensor
Input tensor. Must have channels (2nd dimension) that can be partitioned in two equal partitions
Fm : nn.Module
Module to use for computation, must retain dimensions such that Fm(X)=Y, X.shape == Y.shape
Gm : nn.Module
Module to use for computation, must retain dimensions such that Gm(X)=Y, X.shape == Y.shape
*weights : TorchTensor
weights for Fm and Gm in that order {Fm_w1, ... Fm_wn, Gm_w1, ... Gm_wn}
Note
----
All tensor/autograd variable input arguments and the output are
TorchTensors for the scope of this fuction
"""
# check if possible to partition into two equally sized partitions
assert yin.shape[1] % 2 == 0 # nosec
# store partition size, Fm and Gm functions in context
cty.Fm = Fm
cty.Gm = Gm
with torch.no_grad():
# partition in two equally sized set of channels
y = yin.detach()
y1, y2 = torch.chunk(y, 2, dim=1)
y1, y2 = y1.contiguous(), y2.contiguous()
# compute outputs
y1var = y1
gmr1, gmr2 = Gm.forward(y1var)
x2 = (y2 - gmr2) / gmr1
y2.set_()
del y2
x2var = x2
fmr1, fmr2 = Fm.forward(x2var)
x1 = (y1 - fmr2) / fmr1
y1.set_()
del y1
output = torch.cat([x1, x2], dim=1).detach_()
# save the input and output variables
cty.save_for_backward(yin, output)
return output
@staticmethod
def backward(cty, grad_output): # pragma: no cover
Fm, Gm = cty.Fm, cty.Gm
# are all variable objects now
y, output = cty.saved_tensors
with set_grad_enabled(False):
x1, x2 = torch.chunk(output, 2, dim=1)
x1, x2 = x1.contiguous(), x2.contiguous()
# partition output gradient also on channels
assert (grad_output.shape[1] % 2 == 0) # nosec
x1_grad, x2_grad = torch.chunk(grad_output, 2, dim=1)
x1_grad, x2_grad = x1_grad.contiguous(), x2_grad.contiguous()
# Recreate computation graphs for functions Gm and Fm with gradient collecting leaf nodes:
# z1_stop, y1_stop, GW, FW
# Also recompute inputs (y1, y2) from outputs (x1, x2)
with set_grad_enabled(True):
z1_stop = x2
z1_stop.requires_grad = True
F_z11, F_z12 = Fm.forward(z1_stop)
y1 = x1 * F_z11 + F_z12
y1_stop = y1.detach()
y1_stop.requires_grad = True
G_y11, G_y12 = Gm.forward(y1_stop)
y2 = x2 * G_y11 + G_y12
y2_stop = y2.detach()
y2_stop.requires_grad = True
# compute outputs building a sub-graph
z1 = (y2_stop - G_y12) / G_y11
x1_ = (y1_stop - F_z12) / F_z11
x2_ = z1
# calculate the final gradients for the weights and inputs
dd = torch.autograd.grad(x1_, (z1_stop,) + tuple(Fm.parameters()), x1_grad)
z1_grad = dd[0] + x2_grad
FWgrads = dd[1:]
dd = torch.autograd.grad(x2_, (y2_stop, y1_stop) + tuple(Gm.parameters()), z1_grad, retain_graph=False)
GWgrads = dd[2:]
y1_grad = dd[1] + x1_grad
y2_grad = dd[0]
grad_input = torch.cat([y1_grad, y2_grad], dim=1)
return (grad_input, None, None) + FWgrads + GWgrads
| UTF-8 | Python | false | false | 20,317 | py | 55 | affine.py | 35 | 0.54511 | 0.516956 | 0 | 577 | 34.211438 | 137 |
TobyNonnenmacher/GWT_Algorithm | 11,252,814,362,551 | ca5d3dd8a11ac8888bbaa68fd8b9e251e3b5b597 | 30f24f38685717ce0447616974b37e435385b215 | /Toby_GWT_Algorithm/TwosComplement.py | f22f54b4aabc614aa913307659713adbbdd56b63 | [] | no_license | https://github.com/TobyNonnenmacher/GWT_Algorithm | 481b81a3c8923f698127485b5f741a705a4887dd | 83d5ceee1a9a2d54465ec6c70d09106baa4ab553 | refs/heads/master | "2021-01-10T15:16:43.356952" | "2015-12-02T17:08:32" | "2015-12-02T17:08:32" | 45,616,847 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
from __future__ import print_function
def to_twoscomplement(value):
if value < 0:
value = ( 1<<16 ) + value
formatstring = '{:0%ib}' % 16
return formatstring.format(value)
with open("gwt_pll_1000frames.txt") as f:
lines = f.read()
numbers = lines.replace("\n", " ").split(", ")
binary_numbers = []
for number in numbers:
binary_numbers.append(to_twoscomplement(int(number)))
output = ""
for i in range(0,len(binary_numbers),8):
output+="".join(binary_numbers[i:i+8])+"\n"
with open("python_output.txt", "w") as f:
f.write(output)
#a = to_twoscomplement(-77)
#print(a)
| UTF-8 | Python | false | false | 632 | py | 10 | TwosComplement.py | 5 | 0.629747 | 0.60443 | 0 | 29 | 20.758621 | 57 |
debasishbai/django_blog | 2,439,541,460,660 | b18c864e4cfbd261383ea87bd2ab1b22e9de2b6c | a312f6097e881b5bccb6210370441fc43638cac8 | /news/news/pipelines.py | 2eb8757d4cbfa77b1efb9e127f80b8aa4b302968 | [
"MIT"
] | permissive | https://github.com/debasishbai/django_blog | 2798e5b57642ccc6767c291631934d6c099dd24e | a304fdc8f029499893d8157dfdd8ae29e79d6545 | refs/heads/master | "2022-11-23T08:50:26.731283" | "2017-04-30T10:34:17" | "2017-04-30T10:34:17" | 81,077,552 | 1 | 0 | MIT | false | "2022-11-22T01:29:50" | "2017-02-06T10:51:29" | "2017-05-04T19:16:07" | "2022-11-22T01:29:47" | 111 | 0 | 0 | 8 | Python | false | false | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import psycopg2
import urlparse
import os
import logging
from datetime import datetime
from scrapy.exceptions import DropItem
import re
class NewsPipeline(object):
fmt = '%Y-%m-%d %H:%M:%S'
creation_date_raw = datetime.now()
creation_date = creation_date_raw.strftime(fmt)
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
def __init__(self):
self.conn = psycopg2.connect(database=self.url.path[1:], user=self.url.username,password=self.url.password, host=self.url.hostname, port=self.url.port)
def process_item(self, item, spider):
if item.get("date") is None:
item["date"] = self.creation_date
logging.warning("*******Article Does Not Contain any Date*******")
logging.info("*******Adding Date*******")
if item.get("caption") is None:
item["caption"] = item.get("title")
logging.warning("********Article Does Not Contain any Caption********")
logging.info("********Adding Title as Caption********")
if not (item.get("title") or item.get("story") or item.get("image")):
logging.warning("*******Missing one of Title, Story or Image in %s********" % item)
raise DropItem("******Skipping this Article . . . . .*******")
check_for_duplicates = self.check_dupes(item)
if check_for_duplicates:
logging.info("**********Duplicate Article**********")
logging.info("Skipping . . . . .")
return item
else:
clean_story = self.strip_story(item["story"])
item["story"] = clean_story
item["images"][0]["path"] = item["images"][0]["path"].split("/")[1]
# clean_image = self.strip_images(item["files"][0]["path"])
# item["files"][0]["path"] = clean_image
save_item = self.save_to_database(item)
return save_item
def check_dupes(self, item):
cur = self.conn.cursor()
cur.execute("SELECT count(*) FROM blog_post WHERE title=%s", (item["title"],))
fetch_id = cur.fetchone()[0]
return fetch_id
def save_to_database(self, item):
cur = self.conn.cursor()
cur.execute("""
INSERT INTO blog_post (title,text,creation_date,image_name, caption, author_id) VALUES (%s,%s,%s,%s,%s,%s)""",
(item["title"], item["story"], item["date"], item["images"][0]["path"], item["caption"], 1))
logging.info("*" * 50)
logging.info("*******Found a New Article*******")
logging.info("*******Saving to Database*******")
self.conn.commit()
return item
@staticmethod
def strip_story(story):
raw_story = " ".join(story.strip().split())
stripped_story = re.sub(r"(.+?\..+?\.)\s(.+?)", r"\1\n\n\2", raw_story)
return stripped_story
@staticmethod
def strip_images(images):
if not images.endswith(".jpg"):
logging.warning("*****Article Image Not in JPG Format*****")
logging.info("*****Removing Image*****")
images = None
return images
else:
image_name = images.split("/")[1]
return image_name
| UTF-8 | Python | false | false | 3,455 | py | 33 | pipelines.py | 9 | 0.558032 | 0.553111 | 0 | 91 | 36.967033 | 159 |
shkwsk/AtCoder | 9,096,740,744,761 | 8cd418075b2c1323de997e1aad332a0052b57847 | aac8374bea34358f003590ce28ab7d40675ea599 | /practice/other/python3/abc003_2.py | 7454c074ae7c961f72e5b2e4f8478fc65b36309a | [] | no_license | https://github.com/shkwsk/AtCoder | bda3b341706faca6a6449ddbd81a6c13cad0b161 | bcb976f03004a70e6c3dd67b3668b2901de2d5af | refs/heads/master | "2022-11-26T23:32:34.940797" | "2020-08-10T06:06:52" | "2020-08-10T06:06:52" | 268,196,180 | 0 | 0 | null | false | "2020-05-31T11:59:20" | "2020-05-31T02:34:50" | "2020-05-31T03:47:15" | "2020-05-31T11:59:19" | 18 | 0 | 0 | 1 | Python | false | false | def main():
S = list(input())
T = list(input())
AtCoder = ['a','t','c','o','d','e','r']
win = True
for s,t in zip(S,T):
if s == t:
continue
if s == '@' and t == '@':
continue
if s == '@' and t in AtCoder:
continue
if s in AtCoder and t == '@':
continue
win = False
if win:
print('You can win')
else:
print('You will lose')
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 494 | py | 82 | abc003_2.py | 76 | 0.396761 | 0.396761 | 0 | 22 | 21.454545 | 43 |
RuomeiYan/CCC-My-solutions | 14,121,852,496,029 | ccc53f98d094fb8e2262416387d67e1c869d5656 | 20f35aaf9b3d4f263ed2b98dad3715e397a9ac51 | /2011/11-S1.py | acc98572f0aca01993288f1cd65c6087c91c80fa | [] | no_license | https://github.com/RuomeiYan/CCC-My-solutions | 3639c1161e56bd5484b4e642764060e7b4bff5b2 | f307229d2f8d48035f0d9bc3d772b0158cb7c9f1 | refs/heads/master | "2021-09-04T01:43:18.773418" | "2018-01-14T05:49:44" | "2018-01-14T05:49:44" | 117,403,370 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
n = int(sys.stdin.readline())
t = 0
s = 0
for _ in range(n):
text = sys.stdin.readline()
t += text.count('T') + text.count('t')
s += text.count('S') + text.count('s')
if s >= t:
print('French')
else:
print('English')
| UTF-8 | Python | false | false | 262 | py | 155 | 11-S1.py | 154 | 0.522901 | 0.515267 | 0 | 12 | 19.666667 | 42 |
AtomsForPeace/FishTank | 1,906,965,521,293 | b7f0d98ed3422df1e58747735af123598af24269 | 2e0d72c7fc7f98be7ea177a4258e2aec7f23c8f2 | /FishTank.py | 12557d7d751c8deb20089cb100186ebf6a8aab4d | [] | no_license | https://github.com/AtomsForPeace/FishTank | 8a238f6169b9a8fc1e3f4bd7d9ec1413d9854a4f | 0ae9a8ce50f2c63de4920c44023e2264a5c28382 | refs/heads/master | "2019-07-29T01:25:28.738120" | "2019-03-30T16:10:55" | "2019-03-30T16:10:55" | 23,592,353 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import pygame
import random
pygame.init()
size = width, height = 3850, 1200
blue = 64, 164, 223
screen = pygame.display.set_mode(size)
class Fish:
def __init__(self, rightImage, leftImage):
# loading the two images
self.rightImage = pygame.image.load(rightImage)
self.leftImage = pygame.image.load(leftImage)
# starting positions
self.x = random.randint(100, 1200)
self.y = random.randint(100, 700)
# set default image to fish facing right
self.image = self.rightImage
self.speed = [random.randint(1, 3), random.randint(-1, 1)]
self.rect = self.image.get_rect(center=(self.x, self.y))
def move(self):
self.rect = self.rect.move(self.speed)
# bounces from left to right
if self.rect.left < 0:
self.speed[0] = -self.speed[0]
self.image = self.rightImage
# bounces from right to left
if self.rect.right > width:
self.speed[0] = -self.speed[0]
self.image = self.leftImage
# bounces from top or bottom
if self.rect.top < 0 or self.rect.bottom > height:
self.speed[1] = -self.speed[1]
# change up/down speed every 50 pixels
if (self.rect.right % 50) == 0:
self.speed[1] = random.randint(-1, 1)
# change left/right speed every 100 pixels
if (self.image == self.rightImage) and (self.rect.right % 100) == 0:
self.speed[0] = random.randint(1, 3)
if (self.image == self.leftImage) and (self.rect.right % 100) == 0:
self.speed[0] = random.randint(-3, -1)
def start():
fish1 = Fish("fishgoingright.png", "fishgoingleft.png")
fish2 = Fish("yellowfishgoingright.png", "yellowfishgoingleft.png")
fish3 = Fish("tropicalfishgoingright.png", "tropicalfishgoingleft.png")
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
screen.fill(blue)
fish1.move()
fish2.move()
fish3.move()
screen.blit(fish1.image, fish1.rect)
screen.blit(fish2.image, fish2.rect)
screen.blit(fish3.image, fish3.rect)
pygame.display.flip()
start()
| UTF-8 | Python | false | false | 2,253 | py | 2 | FishTank.py | 1 | 0.594763 | 0.559698 | 0 | 69 | 31.652174 | 76 |
ablange93/sql_task | 15,169,824,511,596 | 5632f70ae956ce10865eb1afa403a94e0a740a9d | ad38bb828a282df890da669943be161535a37f72 | /sql_task/sql_task.py | d08d3620a82e2aad525987f9f127855bc6954a76 | [] | no_license | https://github.com/ablange93/sql_task | a138833d3d0724df0be6c988b9dd84d6746e28d1 | 94055487b9e47f2dca2416ba4dfd431deeddbc70 | refs/heads/master | "2020-05-18T09:47:15.273640" | "2019-05-04T20:22:20" | "2019-05-04T20:22:20" | 184,336,301 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import os
import sqlite3
from sqlite3 import Error
def create_connection(db_file):
# generate database path relative to working directory
current_dir = os.path.dirname(__file__)
database_path = os.path.join(current_dir, db_file)
try:
conn = sqlite3.connect(database_path)
return conn
except Error as e:
print(e)
return None
def generate_list(conn):
# generate the list of top 10 users who visited the most sites
cur = conn.cursor()
query_string = """ SELECT personId, COUNT(DISTINCT siteId) as qtyOfSitesVisited FROM visits GROUP BY personId
ORDER BY qtyOfSitesVisited DESC LIMIT 10;"""
cur.execute(query_string)
rows = cur.fetchall()
freq_users = [row for row in rows]
return freq_users
def insert_row(conn, values):
# insert a tuple into the frequent_browsers table
query_string = """INSERT INTO frequent_browsers(person_id,num_sites_visited)
VALUES(?,?);"""
cur = conn.cursor()
cur.execute(query_string, values)
return cur.lastrowid # should return 15::11
def table_wipe(conn):
# wipe the frequent_browsers table before inserting values
cur = conn.cursor()
query_string = "DELETE FROM frequent_browsers;"
cur.execute(query_string)
def list_table(conn):
# query all values from frequent_browsers table & display neatly in console
cur = conn.cursor()
query_string = "SELECT * FROM frequent_browsers;"
cur.execute(query_string)
rows = cur.fetchall()
freq_users_table_list = [row for row in rows]
print("person_id | num_sites_visited\n-------------------------------")
for row in freq_users_table_list:
if row[0] < 10:
print(str(row[0]) + " " + str(row[1]))
else:
print(str(row[0]) + " " + str(row[1]))
if len(sys.argv) == 1:
if sys.argv[0] == "test_sql_task.py":
# don't print if you're running unit tests
pass
else:
# no command line args returns on-board documentation
print("""
This is a Python module called "sql_task" used query a SQLiteDB database.
Usage:
$ python -m sql_task [options]
Example(s):
$ python -m sql_task --task-one
$ python -m sql_task --task-two
Options:
--task-one
Returns results of a query of the then people
who have visited3 the most sites.
--task-two
Inserts the values returned from the above query
into the 'frequent_users' table. Once all values
are inserted, the table is displayed.
Unit Testing:
To run unit tests enter the following in the command line:
$ python -m unittest test_sql_task.TestDbComponents
""")
elif len(sys.argv) == 2:
# one command line argument entered
print("\nEstablishing database connection...\n")
conn = create_connection('testdb.db')
with conn:
for param in sys.argv:
# iteratively check each parameter
if param == "--task-one":
# TASK ONE | Query 'visits' table to find & display top 10 users who visited most sites
print("Generating list of top 10 frequent users...\n\n")
freq_users = generate_list(conn)
print("person_id | num_sites_visited\n-------------------------------")
for user in freq_users:
if user[0] < 10:
print(str(user[0]) + " " + str(user[1]))
else:
print(str(user[0]) + " " + str(user[1]))
print("\nRun '--task-two' to insert the above values into the frequent_users table")
elif param == "--task-two":
# TASK 2 | Inserts values from TASK ONE into 'frequent_browsers' table & displays table
print("Inserting list into frequent_users table...\n")
table_wipe(conn)
freq_users = generate_list(conn)
for user in freq_users:
insert_row(conn, user)
print("Displaying values from newly created frequent_users table...\n")
list_table(conn)
elif "sql_task" in param:
pass
elif "-m unittest" in param:
pass
else:
print("Parameter " + str(param) + " not recognized. " \
+ "Please try again or type 'python -m sql_task' for help.")
else:
print("Too many parameters entered.\nPlease try again or type" \
+ " 'python -m sql_task' for help.")
| UTF-8 | Python | false | false | 4,679 | py | 5 | sql_task.py | 3 | 0.571062 | 0.563796 | 0 | 131 | 34.717557 | 114 |
PatriqDesigns/Assignments | 3,264,175,187,672 | 3867627e36cb357aaf3a9db2b676267de4e4db96 | 28a8c69bd8fa0a28cb0d98988fda9ee6241271c8 | /projRC/utils.py | 3f9c22fc67931c63071bc7e0b33395082b1ee3f9 | [] | no_license | https://github.com/PatriqDesigns/Assignments | 9a16698165994567109e53334b779c613d376e92 | f74c86426493934b63cd6a00aa24c087d23791e9 | refs/heads/master | "2020-05-21T15:23:25.782586" | "2019-12-14T13:26:52" | "2019-12-14T13:26:52" | 25,074,143 | 1 | 4 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Contains miscellaneous utilities"""
# Grupo 14:
# 81900 - Nuno Anselmo
# 81936 - Liliana Oliveira
# 82047 - Andre Mendes
import time
DEBUG_LEVEL = 0
def log_message(code, message):
"""Formats and prints the given message"""
print '[' + format_timestamp(time.time()) + ' - ' + code.center(5) + '] ' + str(message)
def format_timestamp(timestamp):
"""Formats a timestamp"""
return time.strftime("%H:%M:%S", time.localtime(timestamp))
def get_timestamp():
return time.time()
| UTF-8 | Python | false | false | 500 | py | 318 | utils.py | 147 | 0.66 | 0.622 | 0 | 20 | 24 | 92 |
uhayate/django-dev | 11,123,965,302,205 | 50db7603e3a47ea64801d55518ab4464fec1b67a | 8dce1d9105b38a821317f72b0687be5ab42e5a73 | /backend/base/settings.py | 4eba944931889308255200c4f5eaa5e5c5f9cc74 | [] | no_license | https://github.com/uhayate/django-dev | b1df20781ba4de7cdcb0016194e8d8f80f8e200f | 57d26187196095d6dd52f77daf8661b5ad1ac6cd | refs/heads/master | "2016-09-14T16:26:16.453318" | "2016-09-12T08:36:06" | "2016-09-12T08:36:06" | 60,186,707 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding:utf-8
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'r5b7tg3wz-##x*mlp(c1#m3$^vjwwr_qig&%(fud=10ga3%&qu'
DEBUG = True
# 数据库设置
# ------------- 开发环境设置 DEBUG = TRUE --------------------
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'dev',
'USER': 'root',
'PASSWORD': 'root',
'HOST': '127.0.0.1',
'PORT': '',
}
}
# ------------- 开发环境设置结束 ------------------------------
# ------------- 生产环境设置 DEBUG = FALSE --------------------
if not DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'prod',
'USER': 'root',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# ------------- 生产环境设置结束 ------------------------------
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd-party apps
'crispy_forms',
'reversion',
'xlwt',
'xlsxwriter',
'xadmin',
'easy_thumbnails',
'guardian',
'userena',
'rest_framework',
'django_extensions',
)
# 应用程序设置
project_apps = (
'apps.accounts', # 用户账户
)
INSTALLED_APPS += project_apps
# 中间件设置
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'base.urls'
WSGI_APPLICATION = 'base.wsgi.application'
# 国际化
LANGUAGE_CODE = 'zh-CN'
TIME_ZONE = 'Asia/Shanghai'
DATE_FORMAT = 'Y-m-d'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# 静态文件设置
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'base/static'),
os.path.join(BASE_DIR, 'apps/accounts/static'),
)
# 媒体文件设置
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# 模板文件设置
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 模板文件的位置
'DIRS': [
os.path.join(BASE_DIR, 'base/templates'),
os.path.join(BASE_DIR, 'apps/accounts/templates'),
],
# APP_DIRS为True表示模板文件引用别的文件时优先在当前app里找
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# rest_framework
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
)
}
# django-userena 设置
AUTHENTICATION_BACKENDS = (
'userena.backends.UserenaAuthenticationBackend',
'guardian.backends.ObjectPermissionBackend',
'django.contrib.auth.backends.ModelBackend',
)
ANONYMOUS_USER_ID = -1
AUTH_PROFILE_MODULE = 'accounts.MyProfile'
USERENA_SIGNIN_REDIRECT_URL = '/' # 登陆之后重定向
USERENA_REDIRECT_ON_SIGNOUT = '/' # 注销之后重定向
LOGIN_URL = '/accounts/signin/'
LOGOUT_URL = '/accounts/signout/'
USERENA_SIGNIN_AFTER_SIGNUP = True # 注册之后登陆
USERENA_ACTIVATION_REQUIRED = False # 必须激活 = False
USERENA_USE_MESSAGES = False
USERENA_REMEMBER_ME_DAYS = ['7天']
| UTF-8 | Python | false | false | 4,036 | py | 7 | settings.py | 2 | 0.600317 | 0.594509 | 0 | 160 | 22.675 | 73 |
SharpAI/DeepCamera | 19,207,093,766,546 | 176a751de454165c6fd6bb0abafba8c5b87ad143 | 02bde3948681b41ab5b94553cc87ca2bfd38f159 | /src/yolov2_parking/convert.py | 4e75f5dd34159ab0dc3dc656458e471e8f33b1ad | [
"MIT"
] | permissive | https://github.com/SharpAI/DeepCamera | 84829f615a9fe6e3e3d67cb96b3143304ecbad59 | 6375e7c7cfda90345e9182214e9208a42b5254a7 | refs/heads/master | "2023-06-18T05:07:19.163001" | "2023-01-18T18:03:19" | "2023-01-18T18:03:19" | 173,961,960 | 1,548 | 244 | MIT | false | "2022-10-07T18:23:20" | "2019-03-05T14:29:23" | "2022-10-06T08:01:18" | "2022-10-07T18:21:02" | 244,168 | 1,033 | 201 | 11 | JavaScript | false | false | import sys
import numpy
import _convert
from cffi import FFI
ffi = FFI()
# window size 2
# RGB array (3D)
my_input = numpy.array(
[
[[1,2,3],[5,6,7],[4,5,6],[6,4,5]],
[[4,6,3],[5,8,7],[6,3,6],[5,8,5]],
[[3,7,3],[4,5,7],[5,2,5],[4,5,5]],
[[2,8,3],[3,2,7],[1,2,6],[3,1,5]],
], dtype=numpy.float32
)
window_size = 2
sample_count = 3 * (my_input.shape[0] - window_size + 1) * (my_input.shape[1] - window_size + 1)
print('window_size -> ' + str(window_size) + ' ... sample_count -> ' + str(sample_count))
my_output = numpy.zeros((sample_count, window_size, window_size), dtype=numpy.float32)
_x = _convert.ffi.cast('size_t', my_input.shape[0])
_y = _convert.ffi.cast('size_t', my_input.shape[1])
_window_size = _convert.ffi.cast('size_t', window_size)
_my_input = _convert.ffi.cast('float *', my_input.ctypes.data)
_my_output = _convert.ffi.cast('float *', my_output.ctypes.data)
_convert.lib.sample3d(_x, _y, _window_size, _my_input, _my_output)
print(_my_output)
def float32_convert(output, img_data):
_x = _convert.ffi.cast('size_t', output.shape[0])
_y = _convert.ffi.cast('size_t', output.shape[1])
_z = _convert.ffi.cast('size_t', output.shape[2])
_output = _convert.ffi.cast('float *', output.ctypes.data)
_convert.lib.float32_convert(_x, _y, _z, _output, img_data)
def calc_result(w,h,output):
_shape = _convert.ffi.cast('size_t', output.shape[0])
_w = _convert.ffi.cast('int', w)
_h = _convert.ffi.cast('int', h)
_output = _convert.ffi.cast('float *', output.ctypes.data)
result = ffi.string(_convert.lib.calc_result(_w,_h,_shape,_output))
print('{}'.format(result))
return result
_convert.lib.init_darknet()
| UTF-8 | Python | false | false | 1,711 | py | 491 | convert.py | 304 | 0.608416 | 0.56692 | 0 | 51 | 32.54902 | 96 |
karthikmprakash/Quick-Notes | 9,586,367,034,342 | 7899184891be70aa4a78ec40242fbd6a0bc633a5 | 6097f773a62fef134851491c11c4ab0892755e04 | /website/models.py | 5c06025ec0e90c61cf36199dd07d69ea1309935f | [] | no_license | https://github.com/karthikmprakash/Quick-Notes | d226c9f8152105e5f01058d689c4ee97e85a3f64 | 1c1ed8f0299995d56fd7087ab96ee27c46301ce0 | refs/heads/main | "2023-06-23T14:44:49.016083" | "2021-07-11T15:05:36" | "2021-07-11T15:05:36" | 369,278,747 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from . import db
from flask_login import UserMixin
from sqlalchemy.sql import func # to get date and time
class Note(db.Model):
id = db.Column(db.Integer,primary_key=True)
data = db.Column(db.String(10000))
date = db.Column(db.DateTime(timezone=True),default=func.now())
user_id=db.Column(db.Integer,db.ForeignKey('user.id')) #even though the class is defined as User and not user the SQL stores the data as user
class User(db.Model, UserMixin):
id = db.Column(db.Integer,primary_key=True)
email = db.Column(db.String(150),unique=True)
password = db.Column(db.String(150))
first_name = db.Column(db.String(150))
notes = db.relationship('Note') # when using relationship its uppercase so Note instead of note | UTF-8 | Python | false | false | 747 | py | 6 | models.py | 3 | 0.718876 | 0.700134 | 0 | 16 | 45.75 | 145 |
gavt45/clouddocs-server | 15,822,659,530,450 | 2216f78854ef66123a0f6cfd5f65e3ca1dc0a2d0 | 92b39295fbd0e5f18fd107132de5520ab9330331 | /clouddocs_app/migrations/0008_auto_20201124_1351.py | 0298131da287093ff7c25c7b434bddd9378594cc | [] | no_license | https://github.com/gavt45/clouddocs-server | a9a4145e9bc5dca576b0c50b1e5796657bdadbec | 3f53bce8c6fb6aca5a8868195dfcb5984223f4ff | refs/heads/main | "2023-01-30T14:33:55.493121" | "2020-12-04T04:26:36" | "2020-12-04T04:26:36" | 315,522,057 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.3 on 2020-11-24 13:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clouddocs_app', '0007_auto_20201124_1046'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='id_direction',
),
migrations.AddField(
model_name='event',
name='directions',
field=models.ManyToManyField(to='clouddocs_app.Direction'),
),
]
| UTF-8 | Python | false | false | 522 | py | 15 | 0008_auto_20201124_1351.py | 14 | 0.574713 | 0.515326 | 0 | 22 | 22.727273 | 71 |
glow-mdsol/clinical_trials | 6,210,522,741,275 | 3dbab29e9ca7f7065ec916195778de82eeea31d6 | 14fac8b3cec0cdde1e59a43ec04ed46a9c6df429 | /tests/test_structs.py | daaf7eb258e297b6da17654acb8f3988abfbafce | [] | no_license | https://github.com/glow-mdsol/clinical_trials | 1bee3f3a3c55f7b886d1aa92db6ee9f25c6b92bd | 86bfa15010f262ffe925909b77eff5023b20a5b8 | refs/heads/develop | "2022-12-10T04:33:35.829465" | "2021-04-20T19:47:52" | "2021-04-20T19:47:52" | 136,510,299 | 4 | 1 | null | false | "2022-12-08T02:21:14" | "2018-06-07T17:30:20" | "2021-05-10T23:51:22" | "2022-12-08T02:21:14" | 151 | 4 | 0 | 2 | Python | false | false | import os
from unittest import mock
import pytest
from xmlschema import XMLSchema
from clinical_trials import ClinicalStudy
from clinical_trials.structs import ProvidedDocument
SCHEMA_LOCATION = os.path.join(os.path.dirname(__file__), '..', 'doc', 'schema', 'public.xsd')
@pytest.fixture()
def schema():
_schema = open(SCHEMA_LOCATION)
return XMLSchema(_schema)
def get_study(nctid):
fixture_dir = os.path.join(os.path.dirname(__file__), 'fixtures')
opt = os.path.join(fixture_dir, '{}.xml'.format(nctid))
with open(opt, 'rb') as fh:
content = fh.read()
return content
def test_provided_documents_no_docs(schema):
nct_id = "NCT03723057"
with mock.patch('clinical_trials.clinical_study.get_schema') as donk:
donk.return_value = schema
with mock.patch("clinical_trials.clinical_study.get_study") as dink:
dink.return_value = get_study(nct_id)
study = ClinicalStudy.from_nctid(nct_id)
assert study.provided_docs == []
def test_provided_documents_with_docs(schema):
nct_id = "NCT03982511"
with mock.patch('clinical_trials.clinical_study.get_schema') as donk:
donk.return_value = schema
with mock.patch("clinical_trials.clinical_study.get_study") as dink:
dink.return_value = get_study(nct_id)
study = ClinicalStudy.from_nctid(nct_id)
assert len(study.provided_docs) == 1
provided_doc = study.provided_docs[0] # type: ProvidedDocument
assert provided_doc.type == "Informed Consent Form"
assert provided_doc.has_icf is True
assert provided_doc.has_protocol is False
assert provided_doc.has_sap is False
assert provided_doc.date == "February 22, 2019"
assert provided_doc.url == "https://ClinicalTrials.gov/ProvidedDocs/11/NCT03982511/ICF_000.pdf"
def test_fetch_provided_docs(schema, tmpdir):
nct_id = "NCT03982511"
with mock.patch('clinical_trials.clinical_study.get_schema') as donk:
donk.return_value = schema
with mock.patch("clinical_trials.clinical_study.get_study") as dink:
dink.return_value = get_study(nct_id)
study = ClinicalStudy.from_nctid(nct_id)
assert len(study.provided_docs) == 1
provided_doc = study.provided_docs[0] # type: ProvidedDocument
with mock.patch('clinical_trials.structs.requests.get') as mock_get:
mock_get.return_value = mock.MagicMock(status_code=200, content=b"some content")
provided_doc.fetch_document(tmpdir)
assert len(tmpdir.listdir()) == 1
assert os.path.basename(tmpdir.listdir()[0]) == "ICF_000.pdf"
| UTF-8 | Python | false | false | 2,675 | py | 32 | test_structs.py | 14 | 0.666168 | 0.645607 | 0 | 67 | 38.910448 | 103 |
AhmedKhaled007/coursaty | 1,623,497,663,768 | 5a2ae248a32bce67c5008385b8af5213eb959f6d | ed4c4fbdbc9bfce61a4e9f4b54a45d9ebf863839 | /courses/migrations/0007_auto_20200207_1313.py | f7df430ab8cc49416a53a3cd7c24b42c73bd5246 | [] | no_license | https://github.com/AhmedKhaled007/coursaty | 7f537118035697c9d99c266c67ab8c208a4645df | 26798b4363a37f22700e34e6ba4f304d982564c1 | refs/heads/master | "2020-12-27T08:03:01.147463" | "2020-02-11T09:16:01" | "2020-02-11T09:16:01" | 237,825,632 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.0.2 on 2020-02-07 11:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0006_tracks'),
]
operations = [
migrations.AddField(
model_name='classgroup',
name='days_of_week',
field=models.SmallIntegerField(choices=[(0, 'Monday'), (1, 'Tuesday'), (2, 'Wednesday'), (3, 'Thursday'), (4, 'Friday'), (5, 'Saturday'), (6, 'Sunday')], default=0),
preserve_default=False,
),
migrations.AddField(
model_name='classgroup',
name='sessionCount',
field=models.IntegerField(default=12),
preserve_default=False,
),
]
| UTF-8 | Python | false | false | 742 | py | 62 | 0007_auto_20200207_1313.py | 42 | 0.560647 | 0.521563 | 0 | 25 | 28.68 | 177 |
vikasmech/MotionVectorEstimator | 2,284,922,625,887 | 3accbf38b317028f2558a96dae92966172f9f9fa | ac7d11d74e2e1c958008a546684b7ba2bc66d888 | /imageReader.py | 58d8c577117bde7ee9500cdf61eb54a89da69435 | [] | no_license | https://github.com/vikasmech/MotionVectorEstimator | 2a83c00060f0588469dd0934ec3c3ad7cfbe8228 | 45ec27cf9725aceb8bd32b6bbc4790d7190b309f | refs/heads/master | "2020-03-18T14:07:27.794108" | "2016-06-29T21:34:22" | "2016-06-29T21:34:22" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'davidsiecinski'
from PIL import Image
def loadImage(filepath):
im = Image.open(filepath)
# im=im.convert('LA')# convert to Black and White
image_array=[]
if list(im.getdata())[0] is list:
for r, b, g in list(im.getdata()):
image_array.append(0.2125*r + 0.7154*g + 0.0721*b)
else:
image_array=list(im.getdata())
width, height = im.size
image_list2d = [image_array[i * width:(i + 1) * width] for i in xrange(height)]
# im.rotate(45).show()
return image_list2d
| UTF-8 | Python | false | false | 541 | py | 9 | imageReader.py | 8 | 0.602588 | 0.563771 | 0 | 18 | 28.888889 | 83 |
GustavoSwDaniel/Desafio-Desenvolvedor-Python | 730,144,450,807 | fe97d3b0dd60e713f0eadd28bb44a016c7da35d8 | 6491feffc882a87e84dca6e8f5c20be4d3cd72c2 | /migrations/versions/ff36453c4c77_add_pet_photo_field_in_pets_model.py | 5cc35b931322f212bcdbbcfbf361d6752e5ff3a4 | [] | no_license | https://github.com/GustavoSwDaniel/Desafio-Desenvolvedor-Python | b21074dc12562f6baa62b8c9702e7a69f778467a | 522532c1a1b3154c136ef4f8584a752830c484c1 | refs/heads/master | "2023-06-28T21:04:56.426402" | "2021-08-07T03:55:07" | "2021-08-07T03:55:07" | 391,499,034 | 0 | 0 | null | false | "2021-08-06T16:37:29" | "2021-08-01T01:48:27" | "2021-08-06T05:13:58" | "2021-08-06T16:37:28" | 52 | 0 | 0 | 0 | Python | false | false | """Add pet photo field in Pets model
Revision ID: ff36453c4c77
Revises: 7d6bf0c7c573
Create Date: 2021-08-05 20:46:44.620948
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ff36453c4c77'
down_revision = '7d6bf0c7c573'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('pets', sa.Column('pet_photo', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('pets', 'pet_photo')
# ### end Alembic commands ###
| UTF-8 | Python | false | false | 673 | py | 24 | ff36453c4c77_add_pet_photo_field_in_pets_model.py | 20 | 0.682021 | 0.607727 | 0 | 28 | 23.035714 | 77 |
ibis-project/ibis | 18,537,078,856,633 | 47c67649936f3736daeba64fb7993f7800e2d28a | dbe83cf6c2b78a61def862ca19625c2f78268af8 | /ibis/backends/base/sql/alchemy/datatypes.py | e9ba56472daf9ddb4472e03d93977c91c7f2ea47 | [
"Apache-2.0"
] | permissive | https://github.com/ibis-project/ibis | 56a169d75805db7dfd39192cf0562521c405ff1c | 3866492906d731dc170b560e7d7471bd4855169a | refs/heads/master | "2023-09-01T17:07:38.854510" | "2023-09-01T13:52:08" | "2023-09-01T15:32:04" | 34,139,230 | 2,304 | 384 | Apache-2.0 | false | "2023-09-14T21:52:21" | "2015-04-17T20:43:46" | "2023-09-14T08:01:37" | "2023-09-14T21:52:21" | 447,692 | 3,094 | 439 | 91 | Python | false | false | from __future__ import annotations
from typing import TYPE_CHECKING
import sqlalchemy as sa
import sqlalchemy.types as sat
import toolz
from sqlalchemy.ext.compiler import compiles
import ibis.expr.datatypes as dt
from ibis.backends.base.sql.alchemy.geospatial import geospatial_supported
from ibis.common.collections import FrozenDict
from ibis.formats import TypeMapper
if TYPE_CHECKING:
from collections.abc import Mapping
if geospatial_supported:
import geoalchemy2 as ga
class ArrayType(sat.UserDefinedType):
def __init__(self, value_type: sat.TypeEngine):
self.value_type = sat.to_instance(value_type)
def result_processor(self, dialect, coltype) -> None:
if not coltype.lower().startswith("array"):
return None
inner_processor = (
self.value_type.result_processor(dialect, coltype[len("array(") : -1])
or toolz.identity
)
return lambda v: v if v is None else list(map(inner_processor, v))
@compiles(ArrayType, "default")
def compiles_array(element, compiler, **kw):
return f"ARRAY({compiler.process(element.value_type, **kw)})"
class StructType(sat.UserDefinedType):
cache_ok = True
def __init__(self, fields: Mapping[str, sat.TypeEngine]) -> None:
self.fields = FrozenDict(
{name: sat.to_instance(typ) for name, typ in fields.items()}
)
@compiles(StructType, "default")
def compiles_struct(element, compiler, **kw):
quote = compiler.dialect.identifier_preparer.quote
content = ", ".join(
f"{quote(field)} {compiler.process(typ, **kw)}"
for field, typ in element.fields.items()
)
return f"STRUCT({content})"
class MapType(sat.UserDefinedType):
def __init__(self, key_type: sat.TypeEngine, value_type: sat.TypeEngine):
self.key_type = sat.to_instance(key_type)
self.value_type = sat.to_instance(value_type)
@compiles(MapType, "default")
def compiles_map(element, compiler, **kw):
key_type = compiler.process(element.key_type, **kw)
value_type = compiler.process(element.value_type, **kw)
return f"MAP({key_type}, {value_type})"
class UInt64(sat.Integer):
pass
class UInt32(sat.Integer):
pass
class UInt16(sat.Integer):
pass
class UInt8(sat.Integer):
pass
@compiles(UInt64, "postgresql")
@compiles(UInt32, "postgresql")
@compiles(UInt16, "postgresql")
@compiles(UInt8, "postgresql")
@compiles(UInt64, "mssql")
@compiles(UInt32, "mssql")
@compiles(UInt16, "mssql")
@compiles(UInt8, "mssql")
@compiles(UInt64, "mysql")
@compiles(UInt32, "mysql")
@compiles(UInt16, "mysql")
@compiles(UInt8, "mysql")
@compiles(UInt64, "snowflake")
@compiles(UInt32, "snowflake")
@compiles(UInt16, "snowflake")
@compiles(UInt8, "snowflake")
@compiles(UInt64, "sqlite")
@compiles(UInt32, "sqlite")
@compiles(UInt16, "sqlite")
@compiles(UInt8, "sqlite")
@compiles(UInt64, "trino")
@compiles(UInt32, "trino")
@compiles(UInt16, "trino")
@compiles(UInt8, "trino")
def compile_uint(element, compiler, **kw):
dialect_name = compiler.dialect.name
raise TypeError(
f"unsigned integers are not supported in the {dialect_name} backend"
)
try:
UUID = sat.UUID
except AttributeError:
class UUID(sat.String):
pass
else:
@compiles(UUID, "default")
def compiles_uuid(element, compiler, **kw):
return "UUID"
class Unknown(sa.Text):
pass
_from_sqlalchemy_types = {
sat.BOOLEAN: dt.Boolean,
sat.Boolean: dt.Boolean,
sat.BINARY: dt.Binary,
sat.LargeBinary: dt.Binary,
sat.DATE: dt.Date,
sat.Date: dt.Date,
sat.TEXT: dt.String,
sat.Text: dt.String,
sat.TIME: dt.Time,
sat.Time: dt.Time,
sat.VARCHAR: dt.String,
sat.CHAR: dt.String,
sat.String: dt.String,
sat.SMALLINT: dt.Int16,
sat.SmallInteger: dt.Int16,
sat.INTEGER: dt.Int32,
sat.Integer: dt.Int32,
sat.BIGINT: dt.Int64,
sat.BigInteger: dt.Int64,
sat.REAL: dt.Float32,
sat.FLOAT: dt.Float64,
UInt16: dt.UInt16,
UInt32: dt.UInt32,
UInt64: dt.UInt64,
UInt8: dt.UInt8,
Unknown: dt.Unknown,
sat.JSON: dt.JSON,
UUID: dt.UUID,
}
_to_sqlalchemy_types = {
dt.Null: sat.NullType,
dt.Date: sat.Date,
dt.Time: sat.Time,
dt.Boolean: sat.Boolean,
dt.Binary: sat.LargeBinary,
dt.String: sat.Text,
dt.Decimal: sat.Numeric,
# Mantissa-based
dt.Float16: sat.REAL,
dt.Float32: sat.REAL,
# precision is the number of bits in the mantissa
# without specifying this, some backends interpret the type as FLOAT, which
# means float32 (and precision == 24)
dt.Float64: sat.FLOAT(precision=53),
dt.Int8: sat.SmallInteger,
dt.Int16: sat.SmallInteger,
dt.Int32: sat.Integer,
dt.Int64: sat.BigInteger,
dt.UInt8: UInt8,
dt.UInt16: UInt16,
dt.UInt32: UInt32,
dt.UInt64: UInt64,
dt.JSON: sat.JSON,
dt.Interval: sat.Interval,
dt.Unknown: Unknown,
dt.MACADDR: sat.Text,
dt.INET: sat.Text,
dt.UUID: UUID,
}
_FLOAT_PREC_TO_TYPE = {
11: dt.Float16,
24: dt.Float32,
53: dt.Float64,
}
_GEOSPATIAL_TYPES = {
"POINT": dt.Point,
"LINESTRING": dt.LineString,
"POLYGON": dt.Polygon,
"MULTILINESTRING": dt.MultiLineString,
"MULTIPOINT": dt.MultiPoint,
"MULTIPOLYGON": dt.MultiPolygon,
"GEOMETRY": dt.Geometry,
"GEOGRAPHY": dt.Geography,
}
class AlchemyType(TypeMapper):
@classmethod
def to_string(cls, dtype: dt.DataType):
dialect_class = sa.dialects.registry.load(cls.dialect)
return str(
sa.types.to_instance(cls.from_ibis(dtype)).compile(dialect=dialect_class())
)
@classmethod
def from_ibis(cls, dtype: dt.DataType) -> sat.TypeEngine:
"""Convert an Ibis type to a SQLAlchemy type.
Parameters
----------
dtype
Ibis type to convert.
Returns
-------
SQLAlchemy type.
"""
if dtype.is_decimal():
return sat.NUMERIC(dtype.precision, dtype.scale)
elif dtype.is_timestamp():
return sat.TIMESTAMP(timezone=bool(dtype.timezone))
elif dtype.is_array():
return ArrayType(cls.from_ibis(dtype.value_type))
elif dtype.is_struct():
fields = {k: cls.from_ibis(v) for k, v in dtype.fields.items()}
return StructType(fields)
elif dtype.is_map():
return MapType(
cls.from_ibis(dtype.key_type), cls.from_ibis(dtype.value_type)
)
elif dtype.is_geospatial():
if geospatial_supported:
if dtype.geotype == "geometry":
return ga.Geometry
elif dtype.geotype == "geography":
return ga.Geography
else:
return ga.types._GISType
else:
raise TypeError("geospatial types are not supported")
else:
return _to_sqlalchemy_types[type(dtype)]
@classmethod
def to_ibis(cls, typ: sat.TypeEngine, nullable: bool = True) -> dt.DataType:
"""Convert a SQLAlchemy type to an Ibis type.
Parameters
----------
typ
SQLAlchemy type to convert.
nullable : bool, optional
Whether the returned type should be nullable.
Returns
-------
Ibis type.
"""
if dtype := _from_sqlalchemy_types.get(type(typ)):
return dtype(nullable=nullable)
elif isinstance(typ, sat.Float):
if (float_typ := _FLOAT_PREC_TO_TYPE.get(typ.precision)) is not None:
return float_typ(nullable=nullable)
return dt.Decimal(typ.precision, typ.scale, nullable=nullable)
elif isinstance(typ, sat.Numeric):
return dt.Decimal(typ.precision, typ.scale, nullable=nullable)
elif isinstance(typ, ArrayType):
return dt.Array(cls.to_ibis(typ.value_type), nullable=nullable)
elif isinstance(typ, sat.ARRAY):
ndim = typ.dimensions
if ndim is not None and ndim != 1:
raise NotImplementedError("Nested array types not yet supported")
return dt.Array(cls.to_ibis(typ.item_type), nullable=nullable)
elif isinstance(typ, StructType):
fields = {k: cls.to_ibis(v) for k, v in typ.fields.items()}
return dt.Struct(fields, nullable=nullable)
elif isinstance(typ, MapType):
return dt.Map(
cls.to_ibis(typ.key_type),
cls.to_ibis(typ.value_type),
nullable=nullable,
)
elif isinstance(typ, sa.DateTime):
timezone = "UTC" if typ.timezone else None
return dt.Timestamp(timezone, nullable=nullable)
elif isinstance(typ, sat.String):
return dt.String(nullable=nullable)
elif geospatial_supported and isinstance(typ, ga.types._GISType):
name = typ.geometry_type.upper()
try:
return _GEOSPATIAL_TYPES[name](geotype=typ.name, nullable=nullable)
except KeyError:
raise ValueError(f"Unrecognized geometry type: {name}")
else:
raise TypeError(f"Unable to convert type: {typ!r}")
| UTF-8 | Python | false | false | 9,299 | py | 1,338 | datatypes.py | 1,089 | 0.623078 | 0.60942 | 0 | 319 | 28.15047 | 87 |
jacbeekers/excel2json | 12,378,095,783,515 | ba007e2d971b8ff73b2945fb617b483a06bfa535 | b3daf65781c995b6798033e19db488803f932115 | /excelform2json/utils/check_schema.py | daf76cc6d1ddb523ac242a3b7feaab61ad103bff | [
"MIT"
] | permissive | https://github.com/jacbeekers/excel2json | 1e90cfc26539e1979dce0aa3806f9fc8abe8904a | 393375aefa99545f380df7124d172245e53d3b58 | refs/heads/main | "2023-03-20T07:48:17.599304" | "2021-03-06T19:09:02" | "2021-03-06T19:09:02" | 345,165,468 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import jsonschema
from lineage_excel2meta_interface.utils import messages
class CheckSchema:
"""
Checks the JSON schema of a given JSON file
"""
code_version = "0.1.0"
def __init__(self):
self.json_file = "not provided"
self.json_data = ""
self.meta_type = "unknown"
self.meta_version = "unknown"
self.schema_file = "unknown"
def check_schema(self, base_schema_folder, data):
"""
Checks the JSON to determine which JSON schema is used and which version
"""
module = "CheckSchema.check_schema"
self.json_data = data
try:
self.meta_type = data["meta"]
self.meta_version = data["meta_version"]
except KeyError as e:
result = messages.message["meta_error"]
result["info"] = "Key error. meta and meta_version must be in JSON file."
return result
except jsonschema.exceptions.SchemaError as e:
result = messages.message["json_schema_error"]
result["info"] = e.message
return result
except jsonschema.exceptions.ValidationError as e:
result = messages.message["json_validation_error"]
result["info"] = e.message
return result
except json.decoder.JSONDecodeError as e:
result = messages.message["json_parse_error"]
result["info"] = e.msg
return result
schema_directory = base_schema_folder + self.meta_version + "/"
self.schema_file = schema_directory + self.meta_type + ".json"
try:
with open(self.schema_file) as f:
schema = json.load(f)
try:
jsonschema.validate(data, schema)
except jsonschema.exceptions.SchemaError as e:
result = messages.message["jsonschema_validation_error"]
result["info"] = e.message
return result
except jsonschema.exceptions.ValidationError as e:
result = messages.message["jsonschema_validation_error"]
result["info"] = e.message
return result
except FileNotFoundError:
return messages.message["schema_file_not_found"]
return messages.message["ok"]
| UTF-8 | Python | false | false | 2,369 | py | 24 | check_schema.py | 7 | 0.572393 | 0.570705 | 0 | 64 | 36.015625 | 85 |
DarioCozzuto/list | 13,305,808,721,585 | c64b4e06735bb715a0b9457e40040f7f34652e16 | 5ef868819ba6bb4ea5dde9a84e6a9fa65b084198 | /type of collection data.py | 93e30c492ac86a19b438918c96529b72e2475e99 | [] | no_license | https://github.com/DarioCozzuto/list | 5ca9c124b00e5731920020effd82ff157405f84a | 8e2846b015ec2e85a4068eed410bca889e4b5f58 | refs/heads/main | "2023-03-07T09:06:58.333157" | "2021-02-21T23:05:00" | "2021-02-21T23:05:00" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #In python there are four collection data types
#List is a collection which is ordered and changeable. Allows duplicate members.
#Tuple is a collection which is ordered and unchangeable. Allows duplicate members.
#Set is a collection which is unordered and unindexed. No duplicate members.
#Dictionary is a collection which is unordered and changeable. No duplicate members.
thislist = list(("apple", "banana", "cherry"))
thistuple = tuple(("apple", "banana", "cherry"))
thisset = set(("apple", "banana", "cherry"))
print(type(thislist))
print(type(thistuple))
print(type(thisset)) | UTF-8 | Python | false | false | 595 | py | 11 | type of collection data.py | 10 | 0.746218 | 0.746218 | 0 | 13 | 43.923077 | 84 |
uriyyo/coding-challenges | 15,908,558,887,625 | cc5c599ed1929a6ecfd62e610006c62aa4801d95 | 00f92127592f423733122a8e4fad4acd037fbceb | /codewars/solutions/python/2 kyu/Regular expression parser/solution.py | 7e99ed01ebfb395673d3c533b67f4df438b0eb88 | [] | no_license | https://github.com/uriyyo/coding-challenges | 8dba7dc7d9ba396b66af72b1fbd0221c67c11b68 | c42de61025003c9b451bcc4ff2bcd1b867ff004e | refs/heads/master | "2020-06-16T18:42:18.607654" | "2020-06-16T07:37:31" | "2020-06-16T07:37:31" | 195,667,266 | 5 | 2 | null | false | "2020-07-17T18:37:45" | "2019-07-07T15:27:02" | "2020-07-17T14:47:32" | "2020-07-17T18:37:44" | 2,893 | 3 | 2 | 0 | Python | false | false | def node(str_method):
return type("Node", (), {'__init__': lambda self, *args: setattr(self, 'args', args), '__str__': str_method})
Normal = node(lambda self: self.args[0])
ZeroOrMore = node(lambda self: f'{self.args[0]}*')
Or = node(lambda self: f'({self.args[0]}|{self.args[1]})')
Any = node(lambda self: ".")
Str = node(lambda self: '(' + ''.join(map(str, self.args[0])) + ')')
def parseRegExp(s: str):
# Don't understand why we have such limitation for "|" operator
# As for me this limitation should be removed
if s in {"a|t|y", ""}:
return ""
tokens = [*s]
def _peek():
return tokens[0] if tokens else None
def _eat(c):
assert _peek() == c
tokens.pop(0)
def regex():
t = term()
if _peek() == '|':
_eat('|')
return Or(t, regex())
else:
return t
def term():
sequence = []
while _peek() not in {')', '|', None}:
sequence.append(factor())
return sequence[0] if len(sequence) == 1 else Str(sequence)
def factor():
b = base()
while _peek() == '*':
_eat('*')
assert not isinstance(b, ZeroOrMore)
b = ZeroOrMore(b)
return b
def base():
p = _peek()
assert p != "*"
if p == '(':
_eat('(')
assert _peek() != ')'
r = regex()
_eat(')')
return r
else:
_eat(p)
return Normal(p)
try:
n = regex()
assert not tokens
return n
except AssertionError:
return "" | UTF-8 | Python | false | false | 1,645 | py | 1,116 | solution.py | 519 | 0.465046 | 0.459574 | 0 | 67 | 23.567164 | 113 |
pleimann/slack-email-digest | 4,063,039,100,628 | b60b5c033d59096245f5f762c455c7b44d59ec03 | 18ba3372fa1163df76ede8d6e179b6bbcd754cff | /slack_email_digest/HTMLRenderer.py | d861de1766c5b3ed251848cb3ab63560d017b244 | [
"Unlicense"
] | permissive | https://github.com/pleimann/slack-email-digest | 0566623ac575e9a3103f737d42146713f8e057b4 | 6d061aa7c0adaea581c988f24c1bf41b95658449 | refs/heads/master | "2020-05-05T10:49:59.571931" | "2016-08-30T04:22:03" | "2016-08-30T04:22:03" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import datetime
import pprint
import re
import emoji
import jinja2
import pyshorteners
import pytz
from .memoize import memoize1_to_json_file
TEMPLATES = {
'header_text': """\
Slack Digest for {{ date }}{% if parts > 1 %} [Part {{ part + 1 }} of {{ parts }}]{% endif %}\
""",
'full_html': """\
<div style="font-family: Slack-Lato,appleLogo,sans-serif; font-size: .9375rem; line-height: 1.375rem;">
<h2>{{ header_text }}</h2>
<h3><font color="#7f7f7f">\
(Click <a href="{{ visit_url }}">here</a> to view the chat live.\
{% if invite_url %} For an invite, click <a href="{{ invite_url }}">here</a>.{% endif %})</h3>
{{ messages }}
</div>\
""",
'message': """\
<table><tr><td valign="top"><img {% if avatar %}src="{{ avatar }}"{% endif %} width="32"></td>
<td><b>{{ user }}</b> <font color="#7f7f7f">{{ timestamp }}</font><br>
{{ text }}
</td>
</table>\
""",
'attachment': """\
{% if title -%}{% if service_icon -%}
<img src="{{ service_icon }}" width=16>
{%- endif %}{% if service_name -%}
{{ service_name }}
<br>{%- endif %}{% if title_link -%}
<a href="{{ title_link }}">{%-
endif %}<b>{{ title }}</b>{% if title_link -%}
</a>
{%- endif %}
<br>{%- endif %}{% if text -%}
{{ text }}<br>
{%- endif %}
{% if image_url -%}
<img src="{{ image_url }}" width="{{ image_width }}" height="{{ image_height }}">
{%- endif -%}\
""",
'at': """\
<font color="#2a80b9">@{{ user }}</font>\
""",
'channel_ref': """\
<font color="#2a80b9">#{{ channel }}</font>\
""",
'code': """\
<code style="color: #c25; border: 1px solid #e1e1e8">{{ text }}</code>{{ after }}\
""",
'pre': """\
<pre style="margin: .5rem 0 .2rem; border: 1px solid rgba(0, 0, 0, .15);">{{ text }}</pre>{{ after }}\
""",
# don't use <blockquote> as email clients don't show it nicely
'blockquote': """\
<div style="margin: 10px 0px; padding: 5px 10px; border-left: 5px solid #ccc">{{ text }}</div>{{ after }}\
""",
'bold': """\
<b>{{ text }}</b>{{ after }}\
""",
'italic': """\
<i>{{ text }}</i>{{ after }}\
""",
'strikethrough': """\
<strike>{{ text }}</strike>{{ after }}\
""",
}
ANNOUNCEMENT_TYPES = ['channel_join', 'file_share', 'channel_topic']
@memoize1_to_json_file('shortened_url_cache.json')
def get_shortened_url(url):
import sys
print("Getting shortened URL for %s..." % (url,), file=sys.stderr)
res = pyshorteners.Shortener('Isgd', timeout=5).short(url)
print(" ... %s" % (res,), file=sys.stderr)
return res
def fix_emoji():
"""Fix emoji's aliases as they have some typos."""
from emoji import unicode_codes
for key, val in list(unicode_codes.EMOJI_UNICODE.items()):
unicode_codes.EMOJI_UNICODE[key.replace('-', '_')] = val
for key, val in list(unicode_codes.EMOJI_ALIAS_UNICODE.items()):
unicode_codes.EMOJI_ALIAS_UNICODE[key.replace('-', '_')] = val
unicode_codes.UNICODE_EMOJI = {v: k for k, v in unicode_codes.EMOJI_UNICODE.items()}
unicode_codes.UNICODE_EMOJI_ALIAS = {v: k for k, v in unicode_codes.EMOJI_ALIAS_UNICODE.items()}
fix_emoji()
class HTMLRenderer:
"""Given a SlackScraper, render messages to HTML suitable for display in
an email client.
"""
def __init__(self, scraper, timezone, redact_users=None):
"""
:param scraper: A SlackScraper to get channel names, user names, etc.
:param timezone: Timezone info to render messages in
:param redact_users: List of users to redact. Defaults to ['mailclark'] to avoid
recursion.
:param redact_avatars: List of users whose avatar not to include. Defaults to nobody.
"""
self.redact_users = redact_users or ['mailclark']
self.scraper = scraper
self.timezone = timezone
self.env = jinja2.Environment()
self.env.filters['username'] = self.filter_username
self.templates = {name: self.env.from_string(template) for name, template in TEMPLATES.items()}
# map usernames to avatars
self.avatars = {}
self.load_avatars()
def load_avatars(self):
for name, info in self.scraper.users.items():
self.avatars[name] = get_shortened_url(info['profile']['image_72'])
def filter_username(self, user_id):
return self.scraper.get_username(user_id)
def process_text(self, text):
def sub_at(m):
return self.templates['at'].render(user=self.scraper.get_username(m.group(1)))
def sub_channel(m):
return self.templates['channel_ref'].render(channel=self.scraper.get_channel_name(m.group(1)))
# # first all the < ... > specials
# sub @ references without username
text = re.sub(r'<@(\w+)>', sub_at, text)
# sub @ references with username, look up the most recent username anyway
text = re.sub(r'<@(\w+)\|[^>]+>', sub_at, text)
# sub channel references with/without the name
text = re.sub(r'<#(\w+)>', sub_channel, text)
text = re.sub(r'<#(\w+)\|[^>]+>', sub_channel, text)
# link with sub
text = re.sub(r'<([^\| ]+)\|([^>]+)>', lambda m: '<a href="%s">%s</a>' % (
m.group(1), m.group(2),
), text)
# link without sub
text = re.sub(r'<([^/])([^> ]+)>', lambda m: '<a href="%s%s">%s%s</a>' % (
m.group(1), m.group(2), m.group(1), m.group(2),
), text)
# # message formatting
def sub_fmt(which):
return lambda m: self.templates[which].render(text=m.group(1), after=m.group(2))
# multi-line blockquotes
text = re.sub(r'^\W*>>>(.*)()', sub_fmt('blockquote'), text, flags=re.DOTALL | re.MULTILINE)
# multi-tick
text = re.sub(r'```\n?(.*)```()', sub_fmt('pre'), text, flags=re.DOTALL)
# bold
text = re.sub(r'\*(\w[^\*]+)\*(\b|\W|$)', sub_fmt('bold'), text)
# italic
text = re.sub(r'_(\w[^_]+)_(\b|\W|$)', sub_fmt('italic'), text)
# strike-through
text = re.sub(r'~(\w[^~]+\w)~(\b|\W|$)', sub_fmt('strikethrough'), text)
# tick
text = re.sub(r'`(\w[^`]+)`(\b|\W|$)', sub_fmt('code'), text)
# blockquotes
text = re.sub(r"\n?^\W*>(.*\w.*)\n?\n?()", sub_fmt('blockquote'), text, flags=re.MULTILINE)
# newline
text = text.replace('\n', '<br>')
# spacing
text = re.sub(r' ', ' ', text)
# emojis
def sub_standard_emoji(m):
text = m.group(1)
subbed = emoji.emojize(text, use_aliases=True)
if subbed != text:
return "<span title='%s'>%s</span>" % (text, subbed)
else:
return text
# first, standard emoji
text = re.sub(r'(:[a-zA-Z0-9\+\-_&.ô’Åéãíç]+:)', sub_standard_emoji, text)
# then, custom emojis
# hackily replace colons in the title, so they don't get re-replaced
# by an image later.
HACKY_COLON_SUB = "---- pleaze < > forgive < > me ----04QQ!!!{{{"
def sub_custom_emoji(m, big=False):
text = m.group(1)
if text[1:-1] in self.scraper.emojis:
return '<img width="%s" src="%s" title="%s">' % (
32 if big else 20,
self.scraper.emojis[text[1:-1]],
text.replace(":", HACKY_COLON_SUB),
)
return text
# nothing but whitespace - big emoji
text = re.sub(r'^\W*(:[a-zA-Z0-9\+\-_&.ô’Åéãíç]+:)\W*$', lambda m: sub_custom_emoji(m, True), text)
# otherwise, small emoji
text = re.sub(r'(:[a-zA-Z0-9\+\-_&.ô’Åéãíç]+:)', sub_custom_emoji, text)
# fix the colon preserving hack
text = text.replace(HACKY_COLON_SUB, ":")
return text
def _render_reactions(self, reactions, text="Reactions"):
if not reactions:
return ""
# use process_text to help with the emojis
return "<span style='color: #777;'>(%s: %s)</span>" % (
text, self.process_text(
", ".join(":%s: %s from %s" % (
reaction['name'], ("x%d " % len(reaction['users'])) if len(reaction['users']) > 1 else '',
", ".join("<@%s>" % user for user in reaction['users'])
) for reaction in reactions)
)
)
def render_message(self, msg):
"""Render a message. Also recursively called with 'fake' messages to render attachments.
:param msg: The message, from Slack, to render. Only difference from that returned
by the Slack API is a potential '_override_username' parameter, which we use instead
of looking up the user id.
:return Text of the rendered message.
"""
if '_override_username' in msg:
username = msg['_override_username']
elif 'user' in msg:
username = self.scraper.get_username(msg['user'])
elif 'bot_id' in msg:
bot_username = msg['username'] if 'username' in msg else self.scraper.get_bot_name(msg['bot_id'])
username = "%s (BOT)" % bot_username
elif msg.get('subtype') == 'file_comment':
username = self.scraper.get_username(msg['comment']['user'])
else:
raise ValueError("Don't know how to handle this message:\n%s" % (pprint.pformat(msg),))
text = msg['text']
which = 'message'
redact = False
if msg.get('subtype') in ANNOUNCEMENT_TYPES:
pass
else:
if username in self.redact_users:
redact = True
# process markdown
if redact:
text = "<i>[redacted]</i>"
else:
text = self.process_text(text)
# append reactions
reactions = msg.get('reactions')
if msg.get('subtype') == 'file_comment':
reactions = msg['comment'].get('reactions')
if reactions:
text += "<br>" + self._render_reactions(reactions)
# file share, append preview
if msg.get('subtype') == 'file_share' and msg['file'].get('preview'):
if redact:
text += "<br><br><span style='color: #777'>File preview redacted.</span>"
else:
text += "<br><br><span style='color: #777'>File preview:</span><br>%s" % (
self.templates['blockquote'].render(text=msg['file']['preview']),
)
text += self._render_reactions(msg['file'].get('reactions'), "File reactions")
# attachments
if redact:
text += "<br><br><span style='color: #777'>Attachments redacted.</span>"
else:
for attachment in msg.get('attachments', []):
attachment = dict(attachment) # copy
text += "<br><br><span style='color: #777'>Attachment:</span>"
if attachment.get('is_msg_unfurl'):
# render messages as blockquotes
text += self.templates['blockquote'].render(text=self.render_message({
'text': attachment['text'],
'ts': attachment['ts'],
'type': 'message',
'_override_username': attachment['author_subname'],
}))
else:
if 'text' in attachment.get('mrkdwn_in', []):
attachment['text'] = self.process_text(attachment['text'])
text += "<br>" + self.templates['attachment'].render(**attachment)
# render template
message_utc_dt = datetime.datetime.utcfromtimestamp(float(msg['ts']))
message_dt = pytz.utc.localize(message_utc_dt).astimezone(self.timezone)
return self.templates[which].render(
user=username,
timestamp=message_dt.strftime("%I:%M %p"),
avatar=self.avatars.get(username, None), # bot users won't have an avatar
text=text,
)
def render_header_text(self, messages, part=0, parts=1, date_hint=None,
short=False):
"""Given a list of messages, render the appropriate header text.
:param messages: List of slack messages to render.
:param part: Which part of the total number of messages this is.
:param parts: The total number of parts.
:param date_hint: Date hint in case there are no messages
:param short: If short, provide a shortened header, as suitable for
a subject line in an email.
:return: Text appropriate for the header/subject line
"""
date_fmt = '%B %d, %Y' if short else '%A, %B %d, %Y'
if not messages:
if not date_hint:
raise ValueError("Can't get header text from no messages and no date hint")
return self.templates['header_text'].render(date=date_hint.strftime(date_fmt), part=0, parts=1)
# get boundary datetimes
start_dt = datetime.datetime.utcfromtimestamp(min(float(msg['ts']) for msg in messages))
start_dt = pytz.utc.localize(start_dt).astimezone(self.timezone)
end_dt = datetime.datetime.utcfromtimestamp(max(float(msg['ts']) - 1 for msg in messages))
end_dt = pytz.utc.localize(end_dt).astimezone(self.timezone)
# format the boundaries
start = start_dt.strftime(date_fmt)
end = end_dt.strftime(date_fmt)
# make the header
if start == end:
date_str = start
else:
date_str = "%s to %s" % (start, end)
# add timezone
date_str = "%s (%s)" % (date_str, start_dt.strftime("%Z"))
return self.templates['header_text'].render(date=date_str, part=part, parts=parts)
def render_messages(self, messages, part=0, parts=1):
"""Render messages.
:param messages: List of slack messages to render.
:param part: Which part of the total number of messages this is.
:param parts: The total number of parts.
:return HTML text of the rendered messages.
"""
if not messages:
header_text = "There was no Slack activity"
message_bits = []
else:
# format header
header_text = self.render_header_text(messages, part=part, parts=parts)
# render the messages
message_bits = []
last_ts = float(messages[0]['ts'])
for msg in messages:
# break up conversations
if float(msg['ts']) - last_ts >= 30 * 60:
message_bits.append("<hr>")
last_ts = float(msg['ts'])
try:
this_bit = self.render_message(msg)
except Exception as e:
import traceback
print("ERROR handling message!\n%s" % (traceback.format_exc()))
this_bit = "<ERROR HANDLING MESSAGE -- please alert your local programmer!><br>%s" % (
self.templates['pre'].render(text=traceback.format_exc()
.replace("&", "&").replace("<", "<").replace(">", ">")
),
)
# format it as a message
this_bit = self.templates['message'].render(
user="ERROR",
text=this_bit,
)
message_bits.append(this_bit)
# finalize
return self.templates['full_html'].render(
header_text=header_text,
messages="\n".join(message_bits),
visit_url="https://%s.slack.com" % self.scraper.get_team_subdomain(),
invite_url=self.scraper.get_invite_link(),
)
| UTF-8 | Python | false | false | 15,947 | py | 9 | HTMLRenderer.py | 7 | 0.532626 | 0.525278 | 0 | 417 | 37.184652 | 112 |
lanPN85/ML-Text-Classification | 8,684,423,875,410 | 2421fab7017153b805e5e7f9bafead31762b3073 | 372d105981f2c2d8916136dd9308fa2effcf4ad0 | /models/reuters_[250]/settings.py | dc32f390e931c2c1283c4a28699afb65383731e3 | [] | no_license | https://github.com/lanPN85/ML-Text-Classification | edf3fcc4098f7c9f3b2537e165cde5f15ad1a64d | 13b20badad76843b950bbfdee1c637a0166278c3 | refs/heads/master | "2021-01-22T10:45:51.082193" | "2017-05-14T02:30:41" | "2017-05-14T02:30:41" | 82,030,745 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | LEARNING_RATE = 0.001
N_EPOCH = 200
BATCH_SIZE = 100
VOCABULARY_SIZE = 60000
TITLE_LEN = 30
CONTENT_LEN = 150
TITLE_OUTPUT = 200
CONTENT_OUTPUT = 400
DENSE_NEURONS = [250] # Test values: 250, 500, 1000.
DATASET = 'reuters'
| UTF-8 | Python | false | false | 224 | py | 36 | settings.py | 19 | 0.705357 | 0.53125 | 0 | 10 | 21.4 | 53 |
jerome-f/finemapping-insights | 3,822,520,920,705 | 25b4eff280704508e0cd6f8fc49a03afc6d8dea4 | 2969d8d7f174ff880509d8e2dfd124eec65ee4e7 | /fm_insights/resources/generic.py | c241b2f2f69128b78a3d79f56905955803265bb4 | [] | no_license | https://github.com/jerome-f/finemapping-insights | 5ff9686a0ec61355b0e1ab58eebcee4763bcf0cd | b0608a65a905e1a5c0ea44d1c85204db0fd445f3 | refs/heads/master | "2023-07-16T00:38:17.827171" | "2021-09-06T05:18:43" | "2021-09-06T05:19:58" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | bucket = 'finemapping-insights'
bucket_tmp = 'ukbb-hail-tmp'
POPS = ['BBJ', 'FG', 'UKBB']
| UTF-8 | Python | false | false | 90 | py | 62 | generic.py | 42 | 0.644444 | 0.644444 | 0 | 3 | 29 | 31 |
schmat18/csf | 3,212,635,539,807 | f7496ab81edede3964dfe6618a2af341f4b0dcc7 | 96a10d289e63b4639ba502a6a897b19caa94a851 | /hw1.py | 6d602075f65b6f28cb525fc460545d1abb81b427 | [] | no_license | https://github.com/schmat18/csf | edd29fdad371930069f1f490ab51611b35c5a906 | 6fab072f6af10373ebc50bf4bf155e2599ef048a | refs/heads/master | "2020-03-30T14:57:49.309201" | "2013-12-12T09:57:49" | "2013-12-12T09:57:49" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Name: Matthew schult
# Evergreen Login: schmat18
# Computer Science Foundations
# Programming as a Way of Life
# Homework 1
# You may do your work by editing this file, or by typing code at the
# command line and copying it into the appropriate part of this file when
# you are done. When you are done, running this file should compute and
# print the answers to all the problems.
import math # makes the math.sqrt function available
###
### Problem 1
###
print "Problem 1 solution follows:"
# ... write your code and comments here (and remove this line)
##Problem ax^2+bx+cx
##x2-5.86 x+ 8.5408
a=1
b=-5.86
c=8.5408
partOne=-b/(2*a)
partTwo=(math.sqrt((b**2)-(4*a*c)))/(2*a)
solution = (str(partOne)+"+-"+str(partTwo))
print solution
###
### Problem 2
###
print "Problem 2 solution follows:"
# ... write your code and comments here (and remove this line)
from hw1_test import *
letters =[a,b,c,d,e,f]
for index in range(len(letters)):
print letters[index]
###
### Problem 3
###
print "Problem 3 solution follows:"
# ... write your code and comments here (and remove this line)
print "(("+str(letters[0])+" and "+str(letters[1])+") or (not "+str(letters[2])+") and not ("+str(letters[3])+" or "+str(letters[4])+" or "+str(letters[5])+"))"
###
### Collaboration
###
# ... List your collaborators and other sources of help here (websites, books, etc.),
# ... as a comment (on a line starting with "#").
#Think python
#http://www.tutorialspoint.com/python/python_for_loop.htm
###
### Reflection
###
# ... Write how long this assignment took you, including doing all the readings
# ... and tutorials linked to from the homework page. Did the readings, tutorials,
# ... and lecture contain everything you needed to complete this assignment?
#This assignment didn't take me long at all, I have done programming classes already
#but nothing in python. By taking the examples from think python and the tutorialspoint website
# I was able to quickly complete this assignment. I was planning on working on the optional problems
#but due to time constraints and deleting my programs twice I will make due with this. | UTF-8 | Python | false | false | 2,158 | py | 9 | hw1.py | 6 | 0.696015 | 0.677943 | 0 | 77 | 27.038961 | 161 |
personalbrian/self-playing-snake | 10,711,648,467,449 | 51c0f451fe94a893dbf44aefe2a591c821ce2351 | 4cc751e4dc2688af3709c9ed26075f627ea33944 | /venv/Lib/site-packages/pip/_vendor/resolvelib/structs.py | 1ba7793efe6e9ec87425ab582f782c940e80aa98 | [] | no_license | https://github.com/personalbrian/self-playing-snake | 6a485328292d4971636cc4d033839ffc16d7f6ec | db4cf9b88b73c2494c35e599c2c23faaff7a3b73 | refs/heads/main | "2023-04-17T08:11:06.501719" | "2021-05-03T08:06:32" | "2021-05-03T08:06:32" | 362,734,451 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | version https://git-lfs.github.com/spec/v1
oid sha256:835767179488336e9f816d60d3d3a43676e453290b22a11ec78c5add2b0d8b4c
size 4648
| UTF-8 | Python | false | false | 129 | py | 3,892 | structs.py | 3,212 | 0.883721 | 0.472868 | 0 | 3 | 42 | 75 |
hoik92/Algorithm | 15,659,450,775,210 | e8819e5d1a51e2bb1b49e9e3cb44d7f97d743cb5 | ff7e133648566b8a705cb5a214be8a82df5101d9 | /algorithm/work_6/5097_spin.py | aa39d1f87c4584a87d47ce9c9123841c617458b8 | [] | no_license | https://github.com/hoik92/Algorithm | 231433193ecba4a48ef830cab2c5b0115fa7246d | 4085b83a692a211e10503949d4518205d404dcaf | refs/heads/master | "2020-04-27T06:27:06.777255" | "2019-04-18T23:48:30" | "2019-04-18T23:48:30" | 174,108,507 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # import sys
# sys.stdin = open('input5097.txt', 'r')
def isEmpty():
global front, rear
return front == rear
def isFull():
global front, rear, N
return front == (rear + 1) % (N + 1)
def enQueue(i):
global rear, N
if not isFull():
rear = (rear + 1) % (N + 1)
queue[rear] = i
def deQueue():
global front, N
if not isEmpty():
front = (front + 1) % (N + 1)
i = queue[front]
queue[front] = 0
return i
for tc in range(1, int(input())+1):
N, M = map(int, input().split())
N_list = list(map(int, input().split()))
# m = M % N
# print(f"#{tc} {N_list[m]}")
queue = [0] * (N + 1)
front, rear = 0, 0
for i in range(1, M+1):
j = i % N
if isFull():
result = deQueue()
enQueue(N_list[j])
print(f"#{tc} {result}") | UTF-8 | Python | false | false | 854 | py | 248 | 5097_spin.py | 244 | 0.483607 | 0.461358 | 0 | 41 | 19.853659 | 44 |
amacharla/AirBnB_clone | 188,978,594,988 | e1b1616994ddbaae140683ca39a2f9cec93c9193 | 18c918a2321b5d744767f50592dcc7d0cf9b2c50 | /models/review.py | dc8e26cf34471cb01d131617dcd8d04b2795c540 | [] | no_license | https://github.com/amacharla/AirBnB_clone | fd3aa220a7b0e7b0264fc61d7998816bc3a75d03 | b2c7eb9a8eb9beca88ba7760c120f7e9359844d0 | refs/heads/master | "2021-07-14T02:22:53.536413" | "2017-10-16T07:13:12" | "2017-10-16T07:13:12" | 105,109,925 | 1 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python3
""" Module with `Review` class that inherates from BaseModel """
from models import BaseModel
class Review(BaseModel):
""" Review subclass """
def __init__(self, *args, **kwargs):
""" Init method for `Review` class """
review_attr = {"place_id": "", "user_id": "", "text": ""}
[setattr(self, key, kwargs.pop(key, value))
for key, value in review_attr.items()]
super().__init__(*args, **kwargs)
| UTF-8 | Python | false | false | 468 | py | 27 | review.py | 11 | 0.574786 | 0.57265 | 0 | 17 | 26.529412 | 65 |
sarocu/forecast | 10,960,756,563,265 | 2ec9b68e67d5cab2918700d0a6c67b8e31b4f733 | d11a4c88622424eb6026d8da0bfcba677f27526e | /lib/test_forecast.py | 5ad175d4275197b7cf1847055d3d633dd32ce19e | [] | no_license | https://github.com/sarocu/forecast | 24f190b682e5d2b7996da2a02b670da42507009b | 59a25a83209b4e98dbc9be23118bac35afda77f1 | refs/heads/master | "2021-01-19T02:48:18.184737" | "2016-07-24T23:53:20" | "2016-07-24T23:53:20" | 62,973,818 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from forecast import Forecast
weather = Forecast()
# weather.clean_tmy('../data/golden.csv')
weather.load_tmy('../data/golden_clean.csv')
weather.add_predictor('Dry-bulb (C)').add_predictor('Dew-point (C)')
predictions = weather.auto_regressive(12, 2)
print(predictions)
weather.persist()
| UTF-8 | Python | false | false | 294 | py | 3 | test_forecast.py | 2 | 0.727891 | 0.717687 | 0 | 13 | 21.615385 | 68 |
burhanok/pyimagedata | 14,139,032,367,637 | 3c7350de18dd0999bffd85f7454a3f5247466c6a | fd945519087f965cb13a29ed76c5f9a31fdfb5d0 | /header/aiurls.py | 4e5ba21761d0ab70f2cfb522e3fe659e85e87493 | [] | no_license | https://github.com/burhanok/pyimagedata | 285ac8c9fbe38819aa22864e29eec81280ae313d | f45afedd49c1a0577a4511fb478843d8991100ba | refs/heads/master | "2023-02-07T20:25:08.656799" | "2020-12-25T16:35:49" | "2020-12-25T16:35:49" | 324,395,068 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from . import views
urlpatterns = [
#path("",views.index, name="index"),
path("machinelearning/",views.machinelearning, name="machinelearning"),
path("deeplearning/",views.deeplearning, name="deeplearning"),
path("rflearning/",views.rflearning, name="rflearning"),
path("datascience/",views.datascience, name="datascience"),
path("selfdrivingcar/",views.selfdrivingcar, name="selfdrivingcar"),
]
| UTF-8 | Python | false | false | 450 | py | 12 | aiurls.py | 10 | 0.72 | 0.72 | 0 | 11 | 39.909091 | 75 |
anandijain/gym-sips | 1,924,145,365,874 | d5bf2b317049743b754937251318917afc5670e6 | 5938753551dbedd3db5908710bec3bc8b8c38389 | /gym_sips/utils.py | 02c1f990ac2e7077582e5004f75cb26f0123ece2 | [] | no_license | https://github.com/anandijain/gym-sips | 3aa14ea9bf62550997e4fff590a6386d747c30ed | 46ff00ed3920ec455fb65bdccf5f5193d545728b | refs/heads/master | "2020-04-23T01:06:58.784374" | "2020-02-23T21:52:46" | "2020-02-23T21:52:46" | 170,803,166 | 0 | 0 | null | false | "2020-02-23T21:52:48" | "2019-02-15T04:45:33" | "2019-11-11T09:23:00" | "2020-02-23T21:52:47" | 169 | 0 | 0 | 0 | Python | false | false | import random
from collections import namedtuple
def act(a):
"""
simple function to easily change the action number into a string
returns string
"""
if a == 0:
return "BOUGHT AWAY"
elif a == 1:
return "BOUGHT HOME"
elif a == 2:
return "SKIP"
else:
return "action outside of defined actions"
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
| UTF-8 | Python | false | false | 1,019 | py | 14 | utils.py | 9 | 0.582924 | 0.578018 | 0 | 42 | 23.261905 | 68 |
egor-bogomolov/optimizations | 10,170,482,590,716 | 116af90f9dec32d510fb75847d77e29a697d3761 | 3b616d8aeaf3589818d87d2706cc18cc8a1cc03d | /hw07/task5.py | 94397e938bbb27b49d4d0f5f6e9873f8538f35ef | [] | no_license | https://github.com/egor-bogomolov/optimizations | a333ec64aa2748e423ad64a4fa587d712423988c | 3517c5839bcadde96b531a428769e51339c7850f | refs/heads/master | "2021-05-07T07:00:14.108134" | "2017-12-02T12:01:25" | "2017-12-02T12:01:25" | 111,784,540 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Задание 7.5
#
# Формат файла:
# n m
# m строк, на i-ой концы i-ого ребра, нумерация с 1
# sigma, n чисел
#
# Пронумеруем ребра числами от 1 до m.
# Запишем требуемое равенство так:
# M * w = sigma
# M -- матрица инцидентности, n строк, m столбцов, M_{i, j} равно 1 если ребро j входит в вершину i, -1 если выходит из
# неё и 0 иначе. w -- вектор весов ребер
# Это все попадает под определение "произвольной вещественной матрицы", можем решать с помощью предыдущих заданий.
import numpy as np
from hw06.task3 import solve_axb
def main():
precision = 1e-9
f = open('input', "r")
n, m = map(int, f.readline().split())
M = np.zeros([n, m])
edges = []
for i in range(m):
a, b = map(int, f.readline().split())
edges.append((a, b))
M[a - 1][i] = -1
M[b - 1][i] = 1
sigma = np.fromstring(f.readline(), dtype=float, sep=' ')
w = solve_axb(M, sigma)
if np.linalg.norm(np.dot(M, w) - sigma) > precision:
print("There is no solution")
else:
for i in range(m):
print(edges[i], "w = %0.3f" % w[i])
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,434 | py | 12 | task5.py | 11 | 0.586726 | 0.570796 | 0 | 41 | 26.560976 | 119 |
binun/predict | 283,467,875,547 | 1d762fc79d49f05c7410542e2bcd1f858a380127 | 1df0b8f8f85cc2e7e26022716fdecfe0ffdff97e | /predictors/negationPredictor.py | 3eabb4d03c10e1650c62826bdaef7d0939cb3dc5 | [] | no_license | https://github.com/binun/predict | 277eb6a230db37c6b50b29d7d4f3905a4adecfc4 | 4d63706396011e0824fd0bb012e3783b550f4b08 | refs/heads/master | "2020-03-23T16:22:08.894860" | "2018-07-30T13:23:24" | "2018-07-30T13:23:24" | 141,806,365 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from predictors import Predictor
class negationPredictor(Predictor.Predictor):
def __init__(self, origpred):
nm="neg({0})".format(origpred.name[8:])
dm=origpred.dataManager
hist = origpred.histlen
super().__init__(nm,dm,hist)
self.predictor_=origpred
self.stickers = origpred.stickers
self.predictorList = origpred.predictorList
def runAll(self,since=None,until=None):
self.preprocess(since,until)
self.bestpredictors = self.predictor_.bestpredictors
def predict(self,sticker,timestamp,context):
if self.predictor_.preprocessed:
prediction=self.predictor_.getPrediction(timestamp,sticker)
conf=self.predictor_.getConfidence(timestamp,sticker)
negconf=self.predictor_.getnegConfidence(timestamp,sticker)
skip=self.predictor_.getSkip(timestamp,sticker)
else:
(prediction,(conf,negconf),skip) = self.predictor_.predict(sticker,timestamp,context)
return (-prediction,(negconf,conf),skip)
| UTF-8 | Python | false | false | 1,128 | py | 513 | negationPredictor.py | 32 | 0.632979 | 0.631206 | 0 | 28 | 37.928571 | 97 |
daniel-dondiego/Subject | 12,781,822,680,921 | bc1a0c8b8c5eeb17a27a200804a10f9a83aed0da | 36c4946809f9a1ea5d4adc7e74724d847e1998c6 | /web-server/Modelo/Paises.py | a703f54b9ac1afab8803aec7067d41cfa2cc17c5 | [] | no_license | https://github.com/daniel-dondiego/Subject | 275ba1d7805358c4327baf8e8c64479d39068208 | 7042803e3d54419ed390cc244ecfd5d0bccc9df1 | refs/heads/master | "2021-01-22T07:02:45.754846" | "2015-01-02T05:02:51" | "2015-01-02T05:02:51" | 26,295,343 | 0 | 0 | null | false | "2014-11-13T02:49:48" | "2014-11-06T23:42:44" | "2014-11-11T04:18:33" | "2014-11-12T23:54:20" | 0 | 0 | 2 | 1 | Python | null | null | #Clase que abstrae la informacion de un pais
class Pais(object):
def __init__(self,id,pais,nacionalidad):
'''
Inicializa una instancia de tipo pais.
id: el id del pais en la base de datos
pais: el nombre del pais
nacionalidad: gentilicio de las personas que pertenecen al pais
'''
self.id = id
self.__pais = pais
self.__nacionalidad = nacionalidad
def __str__(self):
'''
Regresa el pais como cadena.
returns: el nombre del pais
'''
return self.__pais
def get_pais(self):
'''
Regresa el nombre del pais.
returns: el nombre del pais
'''
return self.__pais
def get_nacionalidad(self):
'''
Regresa la nacionalidad de las personas del pais.
returns: la nacionalidad de las personas del pais
'''
return self.nacionalidad
| UTF-8 | Python | false | false | 782 | py | 47 | Paises.py | 25 | 0.675192 | 0.675192 | 0 | 34 | 22 | 65 |
Koliuchiy/myproject | 11,390,253,286,741 | 832285a4ea926f0687d57011d41a05c163a8a227 | 355cc6deca1e19b8a52d208c3b0c8cc3d16e669c | /cart/cart.py | f9bbd74072b80ec1c0402f2fc91aadae0dca7a06 | [] | no_license | https://github.com/Koliuchiy/myproject | 2c2689d5bdd4b79b56e4d59aa126f20aa63dda5a | 1b8d1afafd6d88e0fa2b69bb0c915f8093214869 | refs/heads/master | "2021-01-20T02:04:16.926787" | "2017-05-02T14:13:10" | "2017-05-02T14:13:10" | 89,334,275 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from decimal import Decimal
from django.conf import settings
from shop.models import Article
class Cart(object):
def __init__(self, request):
"""Initialize the cart."""
self.session = request.session
cart = self.session.get(settings.CART_SESSION_ID)
if not cart:
# save an empty cart in the session
cart = self.session[settings.CART_SESSION_ID] = {}
self.cart = cart
def add(self, article, quantity=1, update_quantity=False):
"""Add a article to the cart or update its quantity."""
article_id = str(article.id)
if article_id not in self.cart:
self.cart[article_id] = {'quantity': 0, 'price': str(article.price)}
if update_quantity:
self.cart[article_id]['quantity'] = quantity
elif self.cart[article_id]['quantity'] + quantity > 20:
self.cart[article_id]['quantity'] = 20
else:
self.cart[article_id]['quantity'] += quantity
self.save()
def save(self):
# update the session cart
self.session[settings.CART_SESSION_ID] = self.cart
# mark the session as "modified" to make sure it is saved
self.session.modified = True
def remove(self, article):
"""Remove a product from the cart."""
article_id = str(article.id)
if article_id in self.cart:
del self.cart[article_id]
self.save()
def __iter__(self):
"""Iterate over the items in the cart and get the products
from the database."""
article_ids = self.cart.keys()
# get the product objects and add them to the cart
articles = Article.objects.filter(id__in=article_ids)
for article in articles:
self.cart[str(article.id)]['article'] = article
for item in self.cart.values():
item['price'] = Decimal(item['price'])
item['total_price'] = item['price'] * item['quantity']
yield item
def __len__(self):
"""Count all items in the cart."""
return sum(item['quantity'] for item in self.cart.values())
def get_total_price(self):
return sum(Decimal(item['price']) * item['quantity'] for item in self.cart.values())
def clear(self):
# remove cart from session
del self.session[settings.CART_SESSION_ID]
self.session.modified = True
| UTF-8 | Python | false | false | 2,407 | py | 15 | cart.py | 8 | 0.594516 | 0.592023 | 0 | 66 | 35.469697 | 92 |
space-concordia-robotics/robotics-rover | 15,728,170,273,343 | 3240329cd83868a005d80ef0f10d9a88e0a5bdec | 7d81a0a1bed8b1905df709d3cc23553090c6afe6 | /roboticsrov/camera/video_stream.py | 57220a8b84fb65947e1423a5cc8a0459b13a55df | [] | no_license | https://github.com/space-concordia-robotics/robotics-rover | f316839c7ee389364a82e10cb077b87a977886d8 | 5dc616d7aea157f073b893b3d60637ea03c45281 | refs/heads/master | "2021-01-18T13:35:56.137022" | "2016-11-18T20:01:09" | "2016-11-18T20:01:09" | 35,299,274 | 1 | 0 | null | false | "2017-01-27T20:13:21" | "2015-05-08T20:02:54" | "2016-11-18T20:01:10" | "2017-01-27T20:13:08" | 261 | 1 | 0 | 10 | C++ | null | null | import subprocess
from camera_constants import *
class VideoStream:
"""
Video stream that is broadcast on the network on a specified port, using the program mjpg_streamer
Once the stream is up, this can be accessed over http (as an MJPEG) at http://[rover host]:[port]/?action=stream
"""
def __init__(self, device='/dev/video0', port=DEFAULT_STREAM_PORT):
"""
Parameters:
device - the webcam to stream from. Should be a string, ex. '/dev/video0'
port - the port to stream over. Should be an integer.
"""
self.device = device
self.port = port
self.streaming = False
# Args to pass to subprocess - determined from arg string via shlex.split()
self.stream_args = ['mjpg_streamer', '-i', '/usr/local/lib/input_uvc.so -d ' + self.device + ' -n -f ' + str(STREAM_FRAMERATE) + ' -r ' + STREAM_RESOLUTION,
'-o', '/usr/local/lib/output_http.so -p ' + str(self.port) + ' -n']
def __del__(self):
if (self.streaming):
self.end()
def start(self):
"""
Starts the stream broadcast.
"""
if (self.streaming):
print "Stream already up."
else:
print "Trying stream at ", self.stream_args
self.stream = subprocess.Popen(self.stream_args)
self.streaming = True
def end(self):
"""
Ends the stream broadcast..
"""
if (self.streaming):
print "Stopping stream..."
self.stream.terminate()
self.streaming = False
else:
print "No stream to terminate."
| UTF-8 | Python | false | false | 1,695 | py | 19 | video_stream.py | 15 | 0.551032 | 0.549853 | 0 | 48 | 34.3125 | 164 |
MuhammadNsearaty/profile-rest-api | 6,176,163,015,649 | 1ed3b5f1a63a1eed23f1825830713f4573624cdc | cfe87bb00d3a1326909266fc5232bf4bf2046ad7 | /planning_app/urls.py | 5e36aa192e1bdb379a3da82774942869f284104d | [
"MIT"
] | permissive | https://github.com/MuhammadNsearaty/profile-rest-api | 0b2b32749de52fd0aa00d6aef9904cc02b455c36 | d44e73551f095c7a149915d3418fb6c3fd14c9e2 | refs/heads/B1 | "2023-07-29T20:18:14.511460" | "2021-09-17T12:42:35" | "2021-09-17T12:42:35" | 343,479,472 | 1 | 2 | MIT | false | "2021-08-13T18:20:25" | "2021-03-01T16:19:00" | "2021-08-13T18:11:06" | "2021-08-13T18:20:24" | 13,022 | 0 | 0 | 0 | Jupyter Notebook | false | false | from rest_framework.routers import DefaultRouter
from planning_app import views
router = DefaultRouter()
# router.register('hotel', views.HotelViewSet, basename='hotels')
# router.register('place', views.PlaceViewSet, basename='places')
router.register('hotels-db', views.HotelDbViewSet, basename='hotels')
router.register('places-db', views.PlaceDbViewSet, basename='places')
router.register('places-review', views.PlacesReviewsViewSet, basename='places_review')
router.register('trips', views.TripViewSet, basename='trips')
| UTF-8 | Python | false | false | 529 | py | 71 | urls.py | 44 | 0.78828 | 0.78828 | 0 | 12 | 43.083333 | 86 |
xiaotuzixuedaima/PythonProgramDucat | 19,267,223,293,212 | 7ee331d5cc031c8985dbb23eab1b575f7e082332 | 4acc08d2c165b5d88119df6bb4081bcfaca684f7 | /PythonPrograms/python_program/multiple_dict_item.py | 93f2c60f524b6e1468459571ce4c2215c7b1b064 | [] | no_license | https://github.com/xiaotuzixuedaima/PythonProgramDucat | 9059648f070db7304f9aaa45657c8d3df75f3cc2 | 90c6947e6dfa8ebb6c8758735960379a81d88ae3 | refs/heads/master | "2022-01-16T04:13:17.849130" | "2019-02-22T15:43:18" | "2019-02-22T15:43:18" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #118. multiply all the items in dictionary.??
d={'A':10,'B':10,'C':239}
total =1
for i in d:
total =total *d[i]
print(total)
| UTF-8 | Python | false | false | 131 | py | 543 | multiple_dict_item.py | 537 | 0.610687 | 0.526718 | 0 | 7 | 17.714286 | 46 |
clivejan/python_basic | 2,929,167,708,204 | 56f9ecabe1a4e837adf8c1f9a04b89521d45dbd2 | e017eca53dbe0d35977546df1bb36a59915f6899 | /regular_expressions/dot_star_nongreedy_mode.py | 63dc4b5cb005d6041933f6f2e32e9c6f2816283f | [] | no_license | https://github.com/clivejan/python_basic | 7d14b7335f253658f8814acbdb753a735481e377 | 773de644a87792b872e38017dcac34c1691ccc87 | refs/heads/master | "2020-12-04T17:44:24.737370" | "2020-01-09T14:43:36" | "2020-01-18T03:11:20" | 231,856,419 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
non_greedy_regex = re.compile(r'<.*?>')
print(non_greedy_regex.search('<To servve man> for dinner.>').group())
greedy_regex = re.compile(r'<.*>')
print(greedy_regex.search('<To servve man> for dinner.>').group())
| UTF-8 | Python | false | false | 225 | py | 70 | dot_star_nongreedy_mode.py | 69 | 0.666667 | 0.666667 | 0 | 7 | 31.142857 | 70 |
google/EarlGrey | 19,688,130,095,389 | 2f094bffc1c6b7bd3314655827ef11c1797a0923 | e025360a687164f9a6b2a69caefe8d905ad8c30a | /Scripts/rename-ochamcrestIOS.py | 997a2abed976c281fe3194be185cfa4a1637e1db | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | https://github.com/google/EarlGrey | 7a0602cacd1f7d7da9614cc31618b730b071daf8 | c3732d1bcfa6ed7d25f0e091a7571d4bf604379c | refs/heads/master | "2023-08-28T16:28:01.519826" | "2021-08-31T18:16:06" | "2021-08-31T18:16:06" | 51,094,210 | 6,034 | 939 | Apache-2.0 | false | "2023-09-09T20:16:52" | "2016-02-04T17:55:29" | "2023-09-07T07:38:10" | "2023-09-09T20:16:51" | 48,264 | 5,540 | 772 | 228 | Objective-C | false | false | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rename the prebuilt OCHamcrest framework to not use the IOS suffix.
Script to rename 'OCHamcrestIOS' to 'OCHamcrest' in the
OCHamcrestIOS framework. We use 'OCHamcrest' as our imports in EarlGrey
and using the OCHamcrestIOS.framework breaks these imports. This changes
the name of the framework and the public files and their imports to
'OCHamcrest'.
Ensure that this script is located in the same folder as where you
have the OCHamcrestIOS.framework file.
"""
import glob
import os
import sys
def _ChangeFrameworkName():
"""Change OCHamcrestIOS.framework to OCHamcrest.framework."""
file_path = _FilePathRelativeToScriptDirectory('../OCHamcrest.framework')
if os.path.isdir(file_path):
print '''
OCHamcrest.framework is already present in the script directory: %s/../.
Please remove the file since we do not over-write it.
''' % _CurrentScriptDirectory()
exit(1)
file_path = _FilePathRelativeToScriptDirectory('OCHamcrestIOS.framework')
if not os.path.isdir(file_path):
print '''
OCHamcrestIOS.framework not present in the script directory: %s/../.
Please make sure that the rename-ochamcrest.py script is present in
the same folder as the OCHamcrestIOS.framework file.
''' % _CurrentScriptDirectory()
exit(1)
real_path = _FilePathRelativeToScriptDirectory('OCHamcrestIOS.framework')
os.rename(real_path,
real_path.replace('OCHamcrestIOS.framework',
'OCHamcrest.framework'))
def _ChangeFrameworkHeaderFileNames():
"""Change OCHamcrestIOS.h files to OCHamcrest.h."""
script_header_dir = 'OCHamcrest.framework/Headers/*'
script_dir = 'OCHamcrest.framework/*'
file_glob = glob.glob(_FilePathRelativeToScriptDirectory(script_header_dir))
extension_glob = glob.glob(_FilePathRelativeToScriptDirectory(script_dir))
file_glob.extend(extension_glob)
for oc_file in file_glob:
os.rename(oc_file, oc_file.replace('OCHamcrestIOS', 'OCHamcrest'))
def _ChangeFrameworkTextInFiles():
"""Change instances of OCHamcrestIOS to OCHamcrest."""
abs_path = _FilePathRelativeToScriptDirectory('OCHamcrest.framework')
for dname, _, files in os.walk(abs_path):
for fname in files:
relative_file_path = os.path.join(dname, fname)
if relative_file_path.endswith('.h'):
_ReplaceInFile(relative_file_path,
'#import <OCHamcrestIOS',
'#import <OCHamcrest')
elif relative_file_path.endswith('.plist'):
_ReplaceInFile(relative_file_path, 'OCHamcrestIOS', 'OCHamcrest')
def _ReplaceInFile(filepath, original, replacement):
"""Replaces original text to the replacement in a file."""
with open(filepath) as oc_file:
data = None
with open(filepath, 'rt') as input_file:
if input_file:
data = oc_file.read().replace(original, replacement)
with open(filepath, 'wt') as out_file:
out_file.write(data)
def _FilePathRelativeToScriptDirectory(file_name):
"""Returns the path of the file with respect to the script directory."""
return os.path.join(_CurrentScriptDirectory(), file_name)
def _CurrentScriptDirectory():
"""Returns the directory where the script is located."""
return os.path.dirname(os.path.realpath(sys.argv[0]))
if __name__ == '__main__':
print 'Changing OCHamcrestIOS.framework to OCHamcrest.framework...'
_ChangeFrameworkName()
_ChangeFrameworkHeaderFileNames()
_ChangeFrameworkTextInFiles()
print 'Done. OCHamcrest.framework is now present in the EarlGrey directory.'
exit(0)
| UTF-8 | Python | false | false | 4,166 | py | 387 | rename-ochamcrestIOS.py | 10 | 0.721315 | 0.718435 | 0 | 109 | 37.220183 | 78 |
w341000/PythonTheWord | 15,487,652,074,368 | 7e2593013ac25d0cd80e42c5e4413348514f3c89 | 9c2a906b8bc2fbeb4fa5b98b68753a8beb0b7c4f | /capt_crack_densenet/realdata.py | 61ea344ab85611b044265795c908fd6aeb14af33 | [] | no_license | https://github.com/w341000/PythonTheWord | 702c7670195b4b1c886198bdc0b388a489b26fe1 | 3150c10b2d8f54a2b42c9baa7fd2aa1caed6235f | refs/heads/master | "2020-03-11T18:08:11.650318" | "2019-11-26T01:44:47" | "2019-11-26T01:44:47" | 130,168,239 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from io import BytesIO
import numpy as np
from PIL import Image,ImageFilter
from capt_crack_densenet.cfg import IMAGE_HEIGHT, IMAGE_WIDTH
woker_number=5
save_path='./pics'
payload = {'callback': 'jQuery110205660534614759527_1508391681367',
'fpdm': '1100172320',
'r':'0.1375104796068471',
'_':'None'}
clolr_map={'00':'void','01':'red','02':'yellow','03':'blue'}
headers = {'Connection':'keep-alive',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.91 Safari/537.36',
'Accept':'*/*',
'Referer':'https://inv-veri.chinatax.gov.cn/',
'Accept-Encoding':'gzip, deflate, br',
'Accept-Language':'zh-CN,zh;q=0.8,en;q=0.6',
'Host': 'zjfpcyweb.bjsat.gov.cn'
}
def enhance_color(im,mode):
width, _ = im.size
threshold=30
maxthreshold=180
minthreshold=105
COLOR=213
for i, px in enumerate(im.getdata()):
y = int(i / width)
x = i % width
r,g,b=px
if mode=='blue':
if b>maxthreshold and g<minthreshold and r<minthreshold:
im.putpixel((x, y), (COLOR, COLOR, COLOR))
else:
im.putpixel((x, y), (0, 0, 0))
if mode=='yellow':
if r>maxthreshold and g>maxthreshold and b<minthreshold:
im.putpixel((x, y), (COLOR, COLOR, COLOR))
else:
im.putpixel((x, y), (0, 0, 0))
if mode=='red':
if r>maxthreshold and g<minthreshold and b<minthreshold:
im.putpixel((x, y), (COLOR, COLOR, COLOR))
else:
im.putpixel((x, y), (0, 0, 0))
if mode=='void':
if np.array([r,g,b]).mean()<120 and np.array([r,g,b]).std()<30:
im.putpixel((x, y), (COLOR, COLOR, COLOR))
else:
im.putpixel((x, y), (0, 0, 0))
if mode==None:
if np.array([r,g,b]).mean()<170:
im.putpixel((x, y), (COLOR, COLOR, COLOR))
else:
im.putpixel((x, y), (0, 0, 0))
def pic_convert(png_data,mode,isfromfile=False):
if isfromfile==True:
fileobj=png_data
else:
fileobj = BytesIO()
fileobj.write(png_data)
image=Image.open(fileobj)
image=image.resize((IMAGE_WIDTH,IMAGE_HEIGHT))
enhance_color(image,mode)
image= image.filter(ImageFilter.SMOOTH)
#image,_,_ = image.split()
image=image.convert('L')
return image
| UTF-8 | Python | false | false | 2,429 | py | 79 | realdata.py | 78 | 0.562783 | 0.506793 | 0 | 75 | 31.333333 | 130 |
mvargas33/CC5401-Tablero-digital | 15,831,249,492,663 | cd28460f7546f1770d4aa607bffb9715110539a5 | db9ba481dc81dd568ef52bd89d7b3f395bd8d692 | /app/migrations/0005_auto_20191204_1006.py | 58dd98cd1c9a9fb83acee56f3f5652fda88b3705 | [] | no_license | https://github.com/mvargas33/CC5401-Tablero-digital | 2659e5d3d55b183ca7ad81196ed868abbebd0c21 | f459add856640123e3604f4dba899255156bbf33 | refs/heads/master | "2022-11-30T04:39:25.265669" | "2020-04-22T18:12:19" | "2020-04-22T18:12:19" | 287,434,465 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.2.6 on 2019-12-04 13:06
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0004_auto_20191117_1853'),
]
operations = [
migrations.AlterUniqueTogether(
name='votein',
unique_together={('user', 'postit')},
),
]
| UTF-8 | Python | false | false | 449 | py | 41 | 0005_auto_20191204_1006.py | 20 | 0.625835 | 0.556793 | 0 | 19 | 22.631579 | 66 |
fogleman/Carolina | 1,382,979,514,643 | d58e4876c6cb3a5a1b00f693fab663636ab853dd | cc7cbea13eebc004f22d771be0b0ab7d105ba982 | /generate.py | 5964524b0faf2fe72f01d64bcff328c6651451e1 | [] | no_license | https://github.com/fogleman/Carolina | 1b1b42921b11227822fc89c2ec8a903adcdeb8ad | 0c77536353f28140beaa66ab0fc82da1310e76c0 | refs/heads/master | "2016-09-06T14:38:03.212812" | "2015-12-22T18:42:19" | "2015-12-22T18:42:19" | 36,829,244 | 3 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from counties import COUNTIES
from gcode import GCode, pack_gcodes
from math import radians, asinh, tan, sin, cos
from operator import attrgetter
from shapely.geometry import Polygon
from shapely.affinity import translate
import shapefile
SHAPEFILE = 'shapefiles/cb_2013_us_county_5m/cb_2013_us_county_5m.shp'
SCALE = 525
F = 45
G0Z = 0.2
G1Z_BEVEL = -0.125
G1Z_TEXT = -0.05
G1Z_THRU1 = -0.4
G1Z_THRU2 = -0.65
HEADER = GCode(['G90', 'G20', 'G0 Z%s' % G0Z, 'M4', 'G4 P2.0', 'F%s' % F])
FOOTER = GCode(['G0 Z%s' % G0Z, 'M8', 'G0 X3 Y6'])
TEXT_SIZE = 0.375
TEXT_SIZES = {
'Transylvania': 0.3,
}
TEXT_OFFSETS = {
'Alleghany': (0, 0.125),
'Beaufort': (0, 1),
'Cabarrus': (0, 0.25),
'Camden': (-0.5, 0.625),
'Carteret': (-1.5, -0.75),
'Cherokee': (-0.25, 0),
'Chowan': (-0.25, 0),
'Cleveland': (0, -0.25),
'Currituck': (-0.75, 0.5),
'Cumberland': (-0.375, 0),
'Davidson': (-0.125, 0),
'Durham': (0.25, -0.5),
'Edgecombe': (0.125, 0),
'Henderson': (0, 0.375),
'Hertford': (0, -0.25),
'Jackson': (0, 0.25),
'Lenoir': (-0.25, 0),
'Mitchell': (0, 0.25),
'Mecklenburg': (0.25, -0.5),
'Martin': (0, -0.25),
'Montgomery': (0, -0.75),
'New Hanover': (0.25, 0),
'Northampton': (0.375, 0.375),
'Perquimans': (0.15, 0.45),
'Richmond': (0.25, 0.25),
'Rutherford': (0, 0.25),
'Scotland': (-0.075, 0),
'Transylvania': (0.25, 0),
}
TEXT_ANGLES = {
'Camden': -45,
'Chowan': -90,
'Currituck': -45,
'Mitchell': -45,
'New Hanover': 60,
'Pasquotank': -45,
'Perquimans': -45,
}
def mercator(lat, lng, scale):
x = radians(lng) * scale
y = asinh(tan(radians(lat))) * scale
return (x, y)
def load_county_shapes(statefp):
result = []
sf = shapefile.Reader(SHAPEFILE)
for item in sf.shapeRecords():
if item.record[0] != statefp:
continue
result.append((item.record[5], item.shape))
return result
def get_polygons(shape, scale):
result = []
parts = list(shape.parts) + [len(shape.points)]
for i1, i2 in zip(parts, parts[1:]):
points = [mercator(y, x, scale) for x, y in shape.points[i1:i2]]
polygon = Polygon(points)
bounds = polygon.bounds
polygon = translate(polygon, -bounds[0], -bounds[1])
result.append(polygon)
return result
def best_scale(width, height):
result = None
shapes = load_county_shapes('37')
for county in COUNTIES:
shape = shapes[county.name]
polygons = get_polygons(shape, 1)
for polygon in polygons:
sizes = []
g = GCode.from_geometry(polygon, 0, 0)
for angle in range(0, 180, 5):
w, h = g.rotate(angle).size
size = min(width / w, height / h)
sizes.append((size, angle))
size = max(sizes)
print county.name, size
if result is None or size < result:
result = size
print result
return result
def fit_text(polygon, x, y, w, h):
result = scale = 0.1
while True:
x1 = x - w * scale / 2
y1 = y - h * scale / 2
x2 = x + w * scale / 2
y2 = y + h * scale / 2
box = Polygon([(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)])
if not polygon.contains(box):
return result
result = scale
scale += 0.1
def position_text(polygon, w, h, n):
items = []
minx, miny, maxx, maxy = polygon.bounds
for i in range(n):
for j in range(n):
x = minx + (maxx - minx) * (float(i) / (n - 1))
y = miny + (maxy - miny) * (float(j) / (n - 1))
s = fit_text(polygon, x, y, w, h)
items.append((s, x, y))
return max(items)
def generate_text(name, x, y, scale, angle):
g = GCode.from_file('text/%s.nc' % name)
g = g.depth(G0Z, G1Z_TEXT)
g = g.scale(scale, scale)
g = g.rotate(angle)
g = g.move(x, y, 0.5, 0.5)
return g
def generate_county(shape, name, text):
result = []
polygons = get_polygons(shape, SCALE)
max_polygon = max(polygons, key=attrgetter('area'))
for i, polygon in enumerate(polygons):
g = GCode.from_geometry(polygon, G0Z, G1Z_BEVEL)
if text and polygon == max_polygon:
x, y = polygon.centroid.coords[0]
dx, dy = TEXT_OFFSETS.get(name, (0, 0))
scale = TEXT_SIZES.get(name, TEXT_SIZE)
angle = TEXT_ANGLES.get(name, 0)
g += generate_text(name, x + dx, y + dy, scale, angle)
g = g.origin()
g.name = ('%s %d' % (name, i)) if i else name
result.append(g)
return result
def generate_counties(shapes, text):
result = []
for name, shape in shapes:
result.extend(generate_county(shape, name, text))
return result
def render_counties(counties):
for g in counties:
name = g.name
g = g.move(5, 5, 0.5, 0.5)
surface = g.render(0, 0, 10, 10, 96)
surface.write_to_png('pngs/%s.png' % name)
if __name__ == '__main__':
seed = 44654645
shapes = load_county_shapes('37')
big = [
'Northampton', 'Halifax', 'Pitt', 'Beaufort', 'Craven', 'Carteret',
'Wake', 'Johnston', 'Sampson', 'Robeson', 'Columbus', 'Brunswick',
]
shapes = [(k, v) for k, v in shapes if k in big]
counties = generate_counties(shapes, True)
for i, g in enumerate(counties):
if g.name == 'Carteret':
g = g.rotate(35).origin()
g = g.clamp(0, 0, 6, 8)
counties[i] = g
bins = pack_gcodes(counties, 6, 8, 0.0, seed)
for i, g in enumerate(bins):
g = HEADER + g + FOOTER
g.save('pass1/bin%02d.nc' % i)
surface = g.render(0, 0, 6, 8, 96)
surface.write_to_png('bins/%02d.png' % i)
counties = generate_counties(shapes, False)
for i, g in enumerate(counties):
if g.name == 'Carteret':
g = g.rotate(35).origin()
g = g.clamp(0, 0, 6, 8)
counties[i] = g
bins = pack_gcodes(counties, 6, 8, 0.0, seed)
for i, g in enumerate(bins):
g1 = g.depth(G0Z, G1Z_THRU1)
g2 = g.depth(G0Z, G1Z_THRU2)
g = HEADER + g1 + g2 + FOOTER
g.save('pass2/bin%02d.nc' % i)
| UTF-8 | Python | false | false | 6,284 | py | 152 | generate.py | 24 | 0.53676 | 0.485041 | 0 | 214 | 28.364486 | 75 |
hdjsjyl/machine_learning_interview | 2,035,814,521,238 | a508e5122fdb6dda26cb7c3ec769d33a5ce79f6e | 36a7fb6642349e4b52776605739e81c0d4fcdca6 | /regularization.py | 9a44ecd8a6b57ad65ad7f9865f6f46180db9d037 | [] | no_license | https://github.com/hdjsjyl/machine_learning_interview | b99ceda7a548cd46af8b0759d00d349a57fde847 | ea6ea02f23f27afceb0caea17a43e994c68e8901 | refs/heads/master | "2022-08-05T10:11:47.704856" | "2020-05-27T05:21:43" | "2020-05-27T05:21:43" | 267,227,419 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
import scipy.io
def get_case_data(number=200):
np.random.seed(6)
x, y = sklearn.datasets.make_moons(number, noise=0.3)
return x, y
def sigmoid(z):
return 1/(1+np.exp(-z))
def model(x, w1, w2, b):
# print(x.shape)
# print(w1, w2, b)
z = (x[:, 0])[:, None]*w1 + (x[:, 1])[:, None]*w2 + b
# print(z.shape)
# z = np.sum(z, axis=1)
return sigmoid(z)
def sigmoidCrossEntropy(y_hat, y):
l1 = y*np.log(y_hat) + (1-y)*np.log(1-y_hat)
l = np.sum(-1*l1, axis=0)/len(y)
return l
def optimizer(x, y_hat, y):
dw1 = (y_hat-y)*(x[:, 0])[:, None]
dw1 = np.sum(dw1, axis=0)/len(x)
dw1 = (dw1)[:, None]
dw2 = (y_hat-y)*(x[:, 1])[:, None]
dw2 = np.sum(dw2, axis=1)/len(x)
dw2 = (dw2)[:, None]
db = (y_hat - y)
db = np.sum(db)/len(x)
# db = db
return dw1, dw2, db
def accuracy(x, y, w1, w2, b):
# print('accuracy: ')
y_hat = model(x, w1, w2, b)
masks = (y_hat > 0.5)
masks2 = (masks == y)
return np.sum(masks2)/len(x)
def dataLoader(x, y, batchsize):
indices = [i for i in range(len(x))]
np.random.shuffle(indices)
for i in range(len(x)):
x1 = x[indices[i:min(i+batchsize, len(x))]]
y1 = y[indices[i:min(i+batchsize, len(x))]]
yield x1, y1
if __name__ == '__main__':
x, y = get_case_data()
y = (y)[:, None]
iters = 3000
lr = 0.05
batchsize = 32
w1 = 1
w2 = 1
b = 0
lambd = 0.01
for iter in range(iters):
tmp = 0
acc2 = 0
for bx, by in dataLoader(x, y, batchsize):
output = model(bx, w1, w2, b)
loss = sigmoidCrossEntropy(output, by)
l2_regularization_cost = loss + lambd * (np.square(w1) + np.square(w2))/batchsize
dw1, dw2, db = optimizer(bx, output, by)
## l2 regularization gradient
w1 -= lr*lambd * w1
w1 -= lr*dw1[0, 0]
w2 -= lr*lambd * w2
w2 -= lr*dw2[0, 0]
b -= lr*db
acc = accuracy(bx, by, w1, w2, b)
tmp += l2_regularization_cost
acc2 += acc
print('iter: {}'.format(iter), 'loss: {}'.format(tmp/(len(x)/batchsize)), 'acc: {}'.format(acc2/(len(x)/batchsize))) | UTF-8 | Python | false | false | 2,331 | py | 37 | regularization.py | 36 | 0.513943 | 0.472758 | 0 | 82 | 27.439024 | 124 |
boxter007/boxter007.github.io | 2,164,663,551,882 | 8ab0ba319528ac91777fe5e793d313bc248db685 | 6f3246b73f76805e3161b690ad8498d9050ca311 | /Bingle/compiler.py | 42801e410e3de38e9c889f71a7792fdd424338b0 | [] | no_license | https://github.com/boxter007/boxter007.github.io | 8878e79a9716d873fe3b55fd8602917b4dacf257 | 3b3546e3b1c43a2806670032147835918345cd4e | refs/heads/master | "2022-12-11T14:21:18.831200" | "2019-07-02T04:48:42" | "2019-07-02T04:48:42" | 194,791,868 | 1 | 0 | null | false | "2022-12-08T00:02:41" | "2019-07-02T05:08:39" | "2021-06-10T07:44:01" | "2022-12-08T00:02:41" | 45,989 | 1 | 0 | 3 | JavaScript | false | false | import os, sys, subprocess, tempfile, time
from BackGround import models
import logging
import time
log = logging.getLogger("collect")
class compiler:
code = ""
codetype = "c"
stdin = ""
stdout = ""
user = 0
issue = 0
runtime = 0
def __init__(self,ctype,c,s,i,u):
self.code = c
self.codetype = ctype
self.stdin = s
self.issue = i
self.user = u
def run(self):
outdata = ""
#log.info(self.code)
if self.codetype == "python":
try:
TempFile = tempfile.mkdtemp(suffix='_test', prefix='python_')
FileNum = "%d.py" % int(time.time() * 1000)
fpath = os.path.join(TempFile, FileNum)
with open(fpath, 'w', encoding='utf-8') as f:
f.write(self.code)
starttime = time.process_time()
outdata = subprocess.check_output([sys.executable, fpath,self.stdin], stderr=subprocess.STDOUT, timeout=50)
self.runtime = time.process_time() - starttime
except Exception as e:
outdata = e.output
self.runtime = "TimeOut"
elif self.codetype == "text/x-go":
try:
TempFile = tempfile.mkdtemp(suffix='_test', prefix='go_')
FileNum = "%d.go" % int(time.time() * 1000)
fpath = os.path.join(TempFile, FileNum)
with open(fpath, 'w', encoding='utf-8') as f:
f.write(self.code)
outdata = subprocess.check_output(["go","run", fpath , self.stdin], shell=False , stderr=subprocess.STDOUT, timeout=50)
except subprocess.CalledProcessError as e:
outdata = e.output
self.runtime = "TimeOut"
elif self.codetype == "text/x-c++src":
try:
TempFile = tempfile.mkdtemp(suffix='_test', prefix='cpp_')
exefile = os.path.join(TempFile, "%d" % int(time.time() * 1000) )
codefile = exefile + ".cpp"
with open(codefile, 'w', encoding='utf-8') as f:
f.write(self.code)
#编译
subprocess.check_output(["g++", codefile,"-o",exefile], shell=False , stderr=subprocess.STDOUT, timeout=50)
#授予权限
subprocess.check_output(["chmod","+x",exefile], shell=False , stderr=subprocess.STDOUT, timeout=50)
#执行
outdata = subprocess.check_output([exefile, self.stdin], shell=False , stderr=subprocess.STDOUT, timeout=50)
except subprocess.CalledProcessError as e:
outdata = e.output
self.runtime = "TimeOut"
elif self.codetype == "text/x-csrc":
try:
TempFile = tempfile.mkdtemp(suffix='_test', prefix='c_')
exefile = os.path.join(TempFile, "%d" % int(time.time() * 1000) )
codefile = exefile + ".c"
with open(codefile, 'w', encoding='utf-8') as f:
f.write(self.code)
#编译
subprocess.check_output(["gcc", codefile,"-o",exefile], shell=False , stderr=subprocess.STDOUT, timeout=50)
#授予权限
subprocess.check_output(["chmod","+x",exefile], shell=False , stderr=subprocess.STDOUT, timeout=50)
#执行
outdata = subprocess.check_output([exefile, self.stdin], shell=False , stderr=subprocess.STDOUT, timeout=50)
except subprocess.CalledProcessError as e:
outdata = e.output
self.runtime = "TimeOut"
elif self.codetype == "text/x-ruby":
try:
TempFile = tempfile.mkdtemp(suffix='_test', prefix='rb_')
FileNum = "%d.rb" % int(time.time() * 1000)
fpath = os.path.join(TempFile, FileNum)
with open(fpath, 'w', encoding='utf-8') as f:
f.write(self.code)
outdata = subprocess.check_output(["ruby", fpath , self.stdin], shell=False , stderr=subprocess.STDOUT, timeout=50)
except subprocess.CalledProcessError as e:
outdata = e.output
self.runtime = "TimeOut"
elif self.codetype == "text/x-java":
try:
TempFile = tempfile.mkdtemp(suffix='_test', prefix='java_')
FileNum = "%d.java" % int(time.time() * 1000)
fpath = os.path.join(TempFile, FileNum)
with open(fpath, 'w', encoding='utf-8') as f:
f.write(self.code)
outdata = subprocess.check_output(["java", fpath , self.stdin], shell=False , stderr=subprocess.STDOUT, timeout=50)
except subprocess.CalledProcessError as e:
outdata = e.output
self.runtime = "TimeOut"
elif self.codetype == "text/typescript":
try:
TempFile = tempfile.mkdtemp(suffix='_test', prefix='js_')
FileNum = "%d.js" % int(time.time() * 1000)
fpath = os.path.join(TempFile, FileNum)
with open(fpath, 'w', encoding='utf-8') as f:
f.write(self.code)
outdata = subprocess.check_output(["node", fpath , self.stdin], shell=False , stderr=subprocess.STDOUT, timeout=50)
except subprocess.CalledProcessError as e:
outdata = e.output
self.runtime = "TimeOut"
elif self.codetype == "text/x-perl":
try:
TempFile = tempfile.mkdtemp(suffix='_test', prefix='pl_')
FileNum = "%d.pl" % int(time.time() * 1000)
fpath = os.path.join(TempFile, FileNum)
with open(fpath, 'w', encoding='utf-8') as f:
f.write(self.code)
outdata = subprocess.check_output(["perl", fpath , self.stdin], shell=False , stderr=subprocess.STDOUT, timeout=50)
except subprocess.CalledProcessError as e:
outdata = e.output
self.runtime = "TimeOut"
elif self.codetype == "text/x-swift":
try:
TempFile = tempfile.mkdtemp(suffix='_test', prefix='swift_')
FileNum = "%d.swift" % int(time.time() * 1000)
fpath = os.path.join(TempFile, FileNum)
with open(fpath, 'w', encoding='utf-8') as f:
f.write(self.code)
outdata = subprocess.check_output(["swift", fpath , self.stdin], shell=False , stderr=subprocess.STDOUT, timeout=50)
except subprocess.CalledProcessError as e:
outdata = e.output
self.runtime = "TimeOut"
elif self.codetype == "text/x-csharp":
try:
TempFile = tempfile.mkdtemp(suffix='_test', prefix='cs_')
exefile = os.path.join(TempFile, "%d" % int(time.time() * 1000) )
codefile = exefile + ".cs"
with open(codefile, 'w', encoding='utf-8') as f:
f.write(self.code)
#编译
subprocess.check_output(["mcs", codefile,"-out:" + exefile], shell=False , stderr=subprocess.STDOUT, timeout=50)
#执行
outdata = subprocess.check_output(["mono" , exefile, self.stdin], shell=False , stderr=subprocess.STDOUT, timeout=50)
except subprocess.CalledProcessError as e:
outdata = e.output
self.runtime = "TimeOut"
elif self.codetype == "text/x-fortran":
try:
TempFile = tempfile.mkdtemp(suffix='_test', prefix='f_')
exefile = os.path.join(TempFile, "%d" % int(time.time() * 1000) )
codefile = exefile + ".f90"
with open(codefile, 'w', encoding='utf-8') as f:
f.write(self.code)
#编译
subprocess.check_output(["gfortran", codefile,"-o",exefile], shell=False , stderr=subprocess.STDOUT, timeout=50)
#授予权限
subprocess.check_output(["chmod","+x",exefile], shell=False , stderr=subprocess.STDOUT, timeout=50)
#执行
outdata = subprocess.check_output([exefile, self.stdin], shell=False , stderr=subprocess.STDOUT, timeout=50)
except subprocess.CalledProcessError as e:
outdata = e.output
self.runtime = "TimeOut"
elif self.codetype == "text/x-pascal":
try:
TempFile = tempfile.mkdtemp(suffix='_test', prefix='pas_')
exefile = os.path.join(TempFile, "%d" % int(time.time() * 1000) )
codefile = exefile + ".pas"
with open(codefile, 'w', encoding='utf-8') as f:
f.write(self.code)
#编译
subprocess.check_output(["fpc", codefile,"-o" + exefile], shell=False , stderr=subprocess.STDOUT, timeout=50)
#授予权限
subprocess.check_output(["chmod","+x",exefile], shell=False , stderr=subprocess.STDOUT, timeout=50)
#执行
outdata = subprocess.check_output([exefile, self.stdin], shell=False , stderr=subprocess.STDOUT, timeout=50)
except subprocess.CalledProcessError as e:
outdata = e.output
self.runtime = "TimeOut"
elif self.codetype == "text/x-objectivec":
try:
TempFile = tempfile.mkdtemp(suffix='_test', prefix='m_')
exefile = os.path.join(TempFile, "%d" % int(time.time() * 1000) )
codefile = exefile + ".m"
with open(codefile, 'w', encoding='utf-8') as f:
f.write(self.code)
#编译
subprocess.check_output(["clang", codefile,"-o",exefile], shell=False , stderr=subprocess.STDOUT, timeout=50)
#授予权限
subprocess.check_output(["chmod","+x",exefile], shell=False , stderr=subprocess.STDOUT, timeout=50)
#执行
outdata = subprocess.check_output([exefile, self.stdin], shell=False , stderr=subprocess.STDOUT, timeout=50)
except subprocess.CalledProcessError as e:
outdata = e.output
self.runtime = "TimeOut"
return {'runtime':self.runtime,'outdata':outdata.decode('utf-8')}
def submit(self):
#获取问题
issueobj = models.Issue.objects.filter(id=self.issue)
totalcost = 0
curcost = 0
result = ""
i = 0
if(issueobj.exists()):
curIssue = issueobj[0]
#先保存提交的信息
submit = curIssue.Submit.objects.create(
codetype=self.codetype,
code = self.code,
user = models.User.objects.filter(id=self.user)[0],
cost = totalcost,
issue = curIssue)
for check in curIssue.check_set.all():
#验证提交的代码是否执行正确
self.stdin = check.input
outdata = self.run()
checksubmit = self.equaloutput(check.output, outdata['outdata'])
log.info(checksubmit)
curcost = curIssue.cost*check.percent/100 if checksubmit else 0
totalcost = totalcost + curcost
i += 1
result += '验证{0}:\nScore:{4}\nRuntime:\n{1}秒\nInput:\n--------\n{2}\n--------\nOutput:\n--------\n{3}\n--------\n'.format(i,outdata['runtime'],self.stdin,outdata['outdata'],curcost)
#保存验证结果
curIssue.SubmitCheck.objects.create(
submitid = submit,
checkid = check,
ispass = checksubmit,
cost = curcost)
#更新提交的信息中的总分
curIssue.Submit.objects.filter(id=submit.id).update(
cost = totalcost)
return result
def equaloutput(self,checkout,programout):
return checkout.strip() == programout.strip() | UTF-8 | Python | false | false | 12,414 | py | 243 | compiler.py | 153 | 0.518552 | 0.507601 | 0 | 255 | 46.988235 | 197 |
cocobear/LeetCode_in_Python | 14,267,881,391,326 | 36743880a90b6e46ee7dd937b815396f9d5ab71a | dd84b4d63ac68fd17b88ee9f221a6ec4921af05d | /solutions/0136.single-number/single-number.py | 14f76ed07ca6b23093d3352f8f11c631dab43dce | [
"MIT"
] | permissive | https://github.com/cocobear/LeetCode_in_Python | bf56b5f003c469d64f76643564a2eeca421833da | b4ecd5cb7122467ee479f38497faaabb17e6025e | refs/heads/master | "2021-07-17T05:44:26.129992" | "2021-01-28T12:21:49" | "2021-01-28T12:21:49" | 232,519,839 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #
# @lc app=leetcode id=136 lang=python3
#
# [136] Single Number
#
from __future__ import annotations
# @lc code=start
class Solution:
def singleNumber(self, nums: List[int]) -> int:
res = nums[0]
for i in nums[1:]:
res ^= i
return res
tests = [
([2,2,1], 1),
([4,1,2,1,2], 4)
]
# @lc code=end
| UTF-8 | Python | false | false | 370 | py | 94 | single-number.py | 87 | 0.497297 | 0.445946 | 0 | 21 | 16.571429 | 51 |
OneDayMac/OneDayMac.github.io | 7,447,473,328,779 | 25945ab24b7e6222129e4f7ba7cf23c0e620e302 | 3f2cced782afe3018899f9868260a4b0028cace6 | /update_readme.py | daf90482d130e79190f18c2bac995e6b8d817d39 | [] | no_license | https://github.com/OneDayMac/OneDayMac.github.io | 32babcf0e71c6f83bd19016edbdaeb09cf14b126 | bade9207703721b9cde7432a95477bb8b753a974 | refs/heads/master | "2020-12-27T13:25:11.717375" | "2020-02-28T10:31:25" | "2020-02-28T10:31:25" | 237,917,781 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import fileinput
import re
import sys
fn = "index.md"
regex = r"https:\/\/github\.com\/OneDayMac\/OneDayMac\.github\.io\S+\.tar\.gz"
new_content = sys.argv[-1]
f = open(fn, 'r')
old_content = f.read()
f.close()
output = re.sub(regex, "%s](%s" % (new_content, new_content), old_content)
f = open(fn, 'w')
f.write(output)
f.close()
| UTF-8 | Python | false | false | 333 | py | 3 | update_readme.py | 1 | 0.642643 | 0.63964 | 0 | 16 | 19.8125 | 78 |
Yullissa/oj | 18,554,258,743,907 | 9ae0e70214545e0482d5c64ccde69a29d8fd3a9e | 0f0b91b27ea57cfbc58dee21e8aaabd7f10f60d7 | /6.15-6.28/6.28星际穿越.py | 16dcc2e687921e06923f8d0b6ff377c64f1fa1b8 | [] | no_license | https://github.com/Yullissa/oj | 64573d41d6ced8f2dc58012e239257573c520e60 | 09b1daab87b3c4c4375e839d2ccb53b89c3b2ae2 | refs/heads/master | "2020-03-21T05:48:06.579600" | "2018-10-21T06:53:44" | "2018-10-21T06:53:44" | 138,147,083 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import traceback
import math
try:
n = int(input())
if n ==1:
print(0)
else:
for i in range(int(math.sqrt(n)), 0,-1):
if i * i + i <= n:
print(i)
break
except:
traceback.print_exc()
pass
| UTF-8 | Python | false | false | 268 | py | 84 | 6.28星际穿越.py | 83 | 0.447761 | 0.432836 | 0 | 16 | 15.75 | 48 |
iABC2XYZ/abc | 17,901,423,724,023 | 9385af7f1f10337208f60f51f7e1473beddae407 | 67be8d282abd8d7abb92f8227a09cb636f4e4cad | /DM_RFGAP/RFCal.py | 050e6e9390b75842a1a76a359fbb71d304b42320 | [] | no_license | https://github.com/iABC2XYZ/abc | 3b6b1e9fc7c9a472aa6fc8c0bb71188061fb89e0 | 13386cf2079113127a6f44d53beb9153b252fe9a | refs/heads/master | "2020-12-30T11:03:07.611408" | "2017-12-04T04:42:01" | "2017-12-04T04:42:01" | 98,806,854 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 28 17:14:30 2017
@author: A
"""
from ConstPhysics import *
import tensorflow as tf
def CalWavelength(freqMHz):
return tf.div(cLight,freqMHz*1.e6)
| UTF-8 | Python | false | false | 225 | py | 19,148 | RFCal.py | 206 | 0.675556 | 0.604444 | 0 | 13 | 16.153846 | 38 |
gohackfelipe/luiza | 13,348,758,378,612 | bb1e973a8ef7e22ba3dfb545fd0c5bdbb3e6fc9d | 4dbc74a6cd2681e5ddd81beb4ba260108653e680 | /receiver.py | f75e4687d683286f161c0ca219b24f5c40ffd6c3 | [
"MIT"
] | permissive | https://github.com/gohackfelipe/luiza | 6a1d5db7ecb82666f2d3d163012f2a45bd1dfb4c | 503451134b99bb6f331ce3eea9f67a11353aaa26 | refs/heads/master | "2018-02-06T15:35:15.876787" | "2017-07-03T21:18:44" | "2017-07-03T21:18:44" | 95,606,855 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import serial
import config
import util
import sys
import sender
port = serial.Serial()
port.port = config._serial['port']
port.baudrate = config._serial['baudrate']
port.bytesize = config._serial['bytesize']
port.parity = config._serial['parity']
port.stopbits = config._serial['stopbits']
port.xonxoff = config._serial['xonxoff']
port.rtscts = config._serial['rtscts']
port.timeout = config._serial['timeout']
async def start():
try:
port.open()
value = serial.read(size=10)
# value = util.formatMessage(1,2,'Testing message')
await verify(value)
# print(value)
port.close()
# return value
except serial.SerialException:
print('Data could not be read')
async def verify(text):
_obj = text.split(";")
messageTo = _obj[0]
# 0 1 2 3
# ['1', '2', 'Testing message', '3']
if(int(_obj[3]) == (int(_obj[0]) + int(_obj[1]))):
print('The message not contains errors.')
if(messageTo == sys.argv[2]):
print(text)
sender.message(_obj[0],_obj[0],_obj[2], True)
else:
print("Re-send message to with data {}".format(_obj))
else:
print('The message contains errors.')
#
# print("Message received to {}".format(_obj)) | UTF-8 | Python | false | false | 1,321 | py | 5 | receiver.py | 5 | 0.588191 | 0.573808 | 0 | 46 | 27.73913 | 65 |
Saif807380/Form-Data-Extractor | 13,924,283,988,987 | 1310f5b6a8cd5a52cd4691a6f310be840b539042 | c1e70416e51d6b592147887a151bf71f3720f815 | /ResumeParser/field_extraction.py | da252de89a6607d2d2bb524139b81b528e6ad8b9 | [] | no_license | https://github.com/Saif807380/Form-Data-Extractor | 2ff366a4369a492d8c542841609baccc0d3e7b7e | 0fa44ce0b489b2c4d768b538614e7ccdc0b9a7b7 | refs/heads/master | "2022-09-14T09:53:21.277096" | "2020-05-31T07:34:13" | "2020-05-31T07:34:13" | 246,602,707 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
from gensim.utils import simple_preprocess
from ResumeParser.lib import *
def candidate_name_extractor(input_string, nlp):
doc = nlp(input_string)
doc_entities = doc.ents
doc_persons = filter(lambda x: x.label_ == 'PERSON', doc_entities)
doc_persons = filter(lambda x: len(x.text.strip().split()) >= 2, doc_persons)
doc_persons = map(lambda x: x.text.strip(), doc_persons)
doc_persons = list(doc_persons)
if len(doc_persons) > 0:
return doc_persons[0]
return "NOT FOUND"
def extract_fields(df,resume_string,nlp):
for extractor, items_of_interest in get_conf('extractors').items():
df[extractor] = extract_skills(resume_string, extractor, items_of_interest,nlp)
return df
def extract_skills(resume_text, extractor, items_of_interest, nlp):
potential_skills_dict = dict()
matched_skills = set()
for skill_input in items_of_interest:
if type(skill_input) is list and len(skill_input) >= 1:
potential_skills_dict[skill_input[0]] = skill_input
elif type(skill_input) is str:
potential_skills_dict[skill_input] = [skill_input]
else:
logging.warn('Unknown skill listing type: {}. Please format as either a single string or a list of strings'
''.format(skill_input))
for (skill_name, skill_alias_list) in potential_skills_dict.items():
skill_matches = 0
for skill_alias in skill_alias_list:
skill_matches += term_count(resume_text, skill_alias.lower(), nlp)
# If at least one alias is found, add skill name to set of skills
if skill_matches > 0:
matched_skills.add(skill_name)
if len(matched_skills)>0:
return matched_skills
else:
return {"None"}
| UTF-8 | Python | false | false | 1,797 | py | 17 | field_extraction.py | 10 | 0.64719 | 0.642738 | 0 | 44 | 39.840909 | 119 |
allenalvin333/Hackerrank_Python | 15,504,831,978,737 | 7b21e7af1d30630dc0464b1f2f9c4fc2380b2f8d | 8bec345535bb48386669abaeb3ff2ce87450b257 | /Itertools_06.py | 3f0edc0ec1b72b327ddaa2f7a76b937a734d3058 | [
"MIT"
] | permissive | https://github.com/allenalvin333/Hackerrank_Python | c3c362258160d56a2eb828246a36470b9ace9b33 | c5328d265470d4d72f5f5b45961ebceccacc5d5f | refs/heads/master | "2023-07-11T22:57:35.364254" | "2022-11-21T03:53:09" | "2022-11-21T03:53:09" | 288,521,910 | 0 | 0 | MIT | false | "2021-08-06T02:18:42" | "2020-08-18T17:34:52" | "2021-08-04T01:29:20" | "2021-08-06T02:14:38" | 12 | 0 | 0 | 0 | Python | false | false | # https://www.hackerrank.com/challenges/iterables-and-iterators/problem
from itertools import combinations
_, s = int(input()), list(combinations(input().split(), int(input())))
print(len([z for z in s if("a" in z)])/len(s)) | UTF-8 | Python | false | false | 227 | py | 77 | Itertools_06.py | 76 | 0.700441 | 0.700441 | 0 | 6 | 37 | 71 |
Saborage/LowVoltageTool | 3,444,563,823,120 | 7944969167f1bd680c9655396e7b14ac904a5d54 | 63d3b61d3cc502e4d990b25d0ffc0f9d50e5ce35 | /app/wxviews/panels/__init__.py | e17684bf96378dad501613cc2f4278fd42ccbdd3 | [] | no_license | https://github.com/Saborage/LowVoltageTool | aac40cdcfbea2105f10b64964e285c2dce175150 | efd1c1d958d7c8a29904f1b737272cbc3c072b6a | refs/heads/master | "2022-06-26T00:28:06.404954" | "2018-02-06T13:00:47" | "2018-02-06T13:00:47" | 120,452,963 | 1 | 0 | null | false | "2022-06-21T21:16:50" | "2018-02-06T12:28:50" | "2019-12-16T01:57:00" | "2022-06-21T21:16:49" | 3,046 | 1 | 0 | 4 | Tcl | false | false | import wx
from app.wxviews.core.engine import Engine
import wx.lib.floatcanvas.FloatCanvas as FC | UTF-8 | Python | false | false | 96 | py | 36 | __init__.py | 29 | 0.84375 | 0.84375 | 0 | 3 | 31.333333 | 43 |
jacobmee/Eastworld | 6,382,321,420,352 | bf9f239e36dd02a6bf022caf0e232a5f4bcff6b0 | 510b9f9794a0c1b5d821c1e5fd26e6c944e2d198 | /Actions/RaspCarUltrasonicScan.py | ebe1065373afab1b424363f47a551d384a930322 | [] | no_license | https://github.com/jacobmee/Eastworld | d4243f23b9d32c6b5e3843a4aaa5bc1329c99198 | bdd14d3b1512fc5bccf1dfbe64fbf7993e8c0528 | refs/heads/master | "2022-02-10T06:19:09.687879" | "2022-02-02T02:11:48" | "2022-02-02T02:11:48" | 77,199,637 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import time
import threading
import Queue
import math
import RPi.GPIO as GPIO
import logging.config
from Actions.Action import Action
from Actions.RaspCarMovement import *
class RaspUltrasonicScan(Action, threading.Thread):
# HC-SR04, the farest it can get is 4.5m. Pin03 (GPIO02) for "Trig" and Pin05 (GPIO03) for "Echo"
PIN_TRIGGER = 3
PIN_ECHO = 5
PIN_ROTATION = 36
def __init__(self, adjustment_events, data):
super(RaspUltrasonicScan, self).__init__()
threading.Thread.__init__(self)
self.adjustment_events = adjustment_events
self.ultrasonic_data = data
def checkdist(self):
# Set trigger
#logging.debug("Ultrasonic checkdist")
GPIO.output(self.PIN_TRIGGER, GPIO.HIGH)
# Wait for 15us
time.sleep(0.000015)
GPIO.output(self.PIN_TRIGGER, GPIO.LOW)
while not GPIO.input(self.PIN_ECHO):
pass
# Start counting
t1 = time.time()
while GPIO.input(self.PIN_ECHO):
pass
# Stop counting
t2 = time.time()
# Return cm
distance = ((t2 - t1) * 34000 / 2)
# Anything more than 450, means too far.
if distance > 450:
distance = 450
#logging.debug("Ultrasonic checkdist: %d", distance)
return distance
def execute(self):
super(RaspUltrasonicScan, self).execute()
logging.debug('Ultrasonic scan initialize...')
GPIO.setmode(GPIO.BOARD)
# PIN3 for trigger
GPIO.setup(self.PIN_TRIGGER, GPIO.OUT, initial=GPIO.LOW)
# PIN5 for Echo
GPIO.setup(self.PIN_ECHO, GPIO.IN)
GPIO.setup(self.PIN_ROTATION, GPIO.OUT)
GPIO.output(self.PIN_ROTATION, GPIO.HIGH)
p = GPIO.PWM(36, 50) # 50HZ
p.start(0)
logging.info("GPIO SET: TRIGGER PIN[%s]" % self.PIN_TRIGGER)
logging.info("GPIO SET: ECHO PIN[%s]" % self.PIN_ECHO)
count = 0
while self.running:
scanning_data = {}
#logging.debug("Ultrasonic Scan started")
start_point = 2.5
length = 10.0
if count % 2 == 0:
for i in range(30, 151, 15):
cycle = start_point + (length * (i)) / 180
p.ChangeDutyCycle(cycle)
time.sleep(0.02)
distance = 0
for j in range(0, 3):
distance = distance + self.checkdist()
scanning_data[i] = round(distance / 3)
p.ChangeDutyCycle(0)
time.sleep(0.02)
else:
for i in range(150, 29, -15):
cycle = start_point + (length * (i)) / 180
p.ChangeDutyCycle(cycle)
time.sleep(0.02)
distance = 0
for j in range(0, 3):
distance = distance + self.checkdist()
scanning_data[i] = round(distance / 3)
p.ChangeDutyCycle(0)
time.sleep(0.02)
count += 1
logging.debug("One ultrasonic scan finished")
self.ultrasonic_data[time.time()] = scanning_data.copy()
time.sleep(0.3)
def finish(self):
super(RaspUltrasonicScan, self).finish()
# Clean up the GPIO3, set to input.
GPIO.setup(self.PIN_TRIGGER, GPIO.IN)
GPIO.setup(self.PIN_ROTATION, GPIO.IN)
logging.debug("Ultrasonic Scan cleaned: PIN[%d] & PIN[%d]" % (self.PIN_TRIGGER, self.PIN_ROTATION)) | UTF-8 | Python | false | false | 3,622 | py | 31 | RaspCarUltrasonicScan.py | 28 | 0.537548 | 0.508007 | 0 | 120 | 29.191667 | 107 |
seungchan-mok/solvedPS | 16,174,846,883,829 | eaafd3fb395aae723aaa5832e5b2ec7de3c565bd | 770f2a2121c44586b393f0e3de37d069c7a412f0 | /boj_11004.py | 08a29be469259c20a06d9250998f90eb6f6f5f18 | [] | no_license | https://github.com/seungchan-mok/solvedPS | dae18a712d7e59abcacd17dcac880f928dc3d500 | f34a5e12504868f52bf54904b4c57860c65541f5 | refs/heads/main | "2023-06-30T11:29:15.685219" | "2021-07-25T10:40:19" | "2021-07-25T10:40:19" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | n,k = map(int,input().split())
s = list(map(int,input().split()))
s.sort()
print(s[k-1])
| UTF-8 | Python | false | false | 89 | py | 51 | boj_11004.py | 50 | 0.58427 | 0.573034 | 0 | 4 | 21.25 | 34 |
andrewzrant/tql-Python | 8,899,172,246,377 | 639fd714c76f43b4e0e347b9fc363fb14907324c | 59a1bf4b4fd294595be5d5793e608cc6631285a9 | /tql/algo_ml/eda/SimpleEDA.py | 928c3e11042eb1bdb5bd62d7122ec02ae50ec662 | [
"Apache-2.0"
] | permissive | https://github.com/andrewzrant/tql-Python | 1249cfd600c4a04f839334c55fcb5c840907f910 | 0bcf95f0de0506cf8c2f429ead569d16c5a85846 | refs/heads/master | "2020-06-10T18:57:37.888882" | "2019-06-24T10:10:59" | "2019-06-24T10:10:59" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = 'SimpleEDA'
__author__ = 'JieYuan'
__mtime__ = '2019-04-22'
"""
import pandas as pd
from utils import cprint
class SimpleEDA(object):
"""
1. 缺失值
2. 方差/去重类别数/确定类别特征
"""
def __init__(self, df: pd.DataFrame, exclude=None):
self.df = df.drop(exclude, 1, errors='ignore') if exclude else df
# self.df.head().T.add_prefix('row_')
def summary(self, desc_rows=10):
self._na(desc_rows)
self._unique(desc_rows)
def _na(self, desc_rows=10):
cprint("\n1. 统计缺失率...")
self.s_na = self.df.isnull().sum()[lambda x: x > 0].sort_values(0, False) \
/ self.df.__len__() * 100
print(self.s_na.head(desc_rows))
def _unique(self, desc_rows=10):
cprint("\n2. 统计类别数...")
self.s_unique = self.df.nunique(dropna=False)[lambda x: x < 1024].sort_values()
print(self.s_unique.head(desc_rows))
| UTF-8 | Python | false | false | 1,017 | py | 51 | SimpleEDA.py | 45 | 0.556477 | 0.526425 | 0 | 37 | 25.081081 | 87 |
Jionee/Programmers | 5,652,177,001,905 | ad59f8521a1afe1b153e3a2998fb4ce58b98a891 | 714f03fa7f36cee750d08984b365567c51cf7e22 | /Boj/1920.py | 71c7e6711db0746224c489fc156bb60423c564b7 | [] | no_license | https://github.com/Jionee/Programmers | f301b964e22c8f6f4d84a182d8345a63c0ade6e0 | 291e9f77b462c9482d6781927e495ec865283faa | refs/heads/master | "2023-03-13T18:00:20.526220" | "2022-06-16T10:22:00" | "2022-06-16T10:22:00" | 329,168,518 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
def input():
return sys.stdin.readline().rstrip()
N = int(input())
A = set(list(map(int,input().split(" "))))
M = int(input())
num = list(map(int,input().split(" ")))
#print(A,num)
for n in num:
if n in A:
print(1)
else:
print(0)
| UTF-8 | Python | false | false | 272 | py | 273 | 1920.py | 239 | 0.551471 | 0.544118 | 0 | 16 | 16 | 42 |
DaHuO/Supergraph | 18,811,956,789,335 | 03925d1072342595c24697fb57864a507b61c268 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2_neat/16_0_2_davidhao3300_pancake.py | edb5b3e57664e7841ebd992a81aac04e0b322714 | [] | no_license | https://github.com/DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | "2021-06-14T16:07:52.405091" | "2016-08-21T13:39:13" | "2016-08-21T13:39:13" | 49,829,508 | 2 | 0 | null | false | "2021-03-19T21:55:46" | "2016-01-17T18:23:00" | "2019-02-21T15:42:08" | "2021-03-19T21:55:45" | 38,414 | 0 | 0 | 2 | Python | false | false | with open('pancake.in') as f:
data = f.read().split('\n')
with open('pancake.out', 'w') as f:
for i in range(1, len(data)):
row = data[i]
count = 0
is_plus = True
for c in reversed(row):
if c == '-' and is_plus:
is_plus = False
count += 1
elif c == '+' and not is_plus:
is_plus = True
count += 1
f.write('Case #{}: {}\n'.format(i, count)) | UTF-8 | Python | false | false | 373 | py | 30,073 | 16_0_2_davidhao3300_pancake.py | 16,513 | 0.541555 | 0.530831 | 0 | 16 | 22.375 | 44 |
Gabrielsuncin/webcrawler-noticias | 9,972,914,083,386 | 548ad3891aca7766a526960983dfc2874aa0f364 | 286610a1b5205d0fbc3630c195fd24cb698ed14d | /noticias/models.py | ccad7859efa23981f4ee1b8249b94cbd8f7a0e14 | [] | no_license | https://github.com/Gabrielsuncin/webcrawler-noticias | 9da52bb3451c5da5bc07c4ed957f418255a92899 | 5e92dbcd047ac8161aaaf3d54f2d681902f655a1 | refs/heads/main | "2023-04-11T20:16:52.721718" | "2021-05-18T21:14:46" | "2021-05-18T21:14:46" | 304,403,221 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.db import models
class Jornal(models.Model):
nome = models.CharField(max_length=255, null=False)
slug = models.SlugField()
class Noticia(models.Model):
titulo = models.CharField(max_length=500, null=False)
link = models.URLField()
descricao = models.TextField()
imagem = models.URLField()
data_import = models.DateTimeField(auto_now_add=True)
jornal = models.CharField(max_length=200)
def __str__(self):
return self.titulo
| UTF-8 | Python | false | false | 484 | py | 14 | models.py | 10 | 0.690083 | 0.671488 | 0 | 18 | 25.888889 | 57 |
neoinmatrix/neocode | 5,763,846,158,073 | a0289b7240f5b9d13912e0e646d939aa7797a95e | 2090d2aeb5cc5db1299c346cdde4ee08f323324c | /ml/20170531mr/over/0605trying.py | d862012e0c7a2b4d1dbdbe38057325741e0ebbe8 | [] | no_license | https://github.com/neoinmatrix/neocode | a66eda52e4918350b7cf6f672f19d0a299927386 | 0da5c2ed6cc2be585d84726b9a3da43c2b16740d | refs/heads/master | "2021-01-19T13:18:16.095195" | "2018-03-12T02:31:38" | "2018-03-12T02:31:38" | 82,374,716 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # coding=utf-8
import dataset
import datadeal
import datadraw
import numpy as np
from sklearn.model_selection import KFold
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn import metrics
import matplotlib.pyplot as plt
# this is demo about 1.how to get data 2.how test model
def demotest():
# test data
ds=dataset.DataSet()
ds.getTrainData()
dt=datadeal.DataTrain()
clf = SVC(C=1)
X=ds.getPosOfMouse(0)
y=ds.train["labels"]
dt.trainTest(clf,X,y)
# use the start mouse position to predict with ann
def getReulst1():
ds=dataset.DataSet()
ds.getTrainData()
dt=datadeal.DataTrain()
clf = MLPClassifier(alpha=1e-5, hidden_layer_sizes=(10), random_state=1)
y=ds.train["labels"]
mouses=ds.train["mouses"]
X=[]
for i in range(ds.train["size"]):
xs=mouses[i][0]
ys=mouses[i][1]
X.append([xs[0],ys[0]])
X=np.mat(X)
# dt.trainTest(clf,X,y)
dt.train(clf,X,y)
def f(idx,mouse,goal,label):
if idx==False:
return False
xarr=mouse[0]
yarr=mouse[1]
return np.array([xarr[0],yarr[0]]).reshape([1,2])
dt.testResultAll(ds,f,savepath='./data/ann_mouse_start.txt')
# use the start mouse position to predict with svm
def getResult2():
ds=dataset.DataSet()
ds.getTrainData()
dt=datadeal.DataTrain()
clf = SVC(C=1.5)
y=ds.train["labels"]
mouses=ds.train["mouses"]
X=[]
for i in range(ds.train["size"]):
xs=mouses[i][0]
ys=mouses[i][1]
X.append([xs[0],ys[0]])
X=np.mat(X)
# dt.trainTest(clf,X,y)
dt.train(clf,X,y)
def f(idx,mouse,goal,label):
if idx==False:
return False
xarr=mouse[0]
yarr=mouse[1]
return np.array([xarr[0],yarr[0]]).reshape([1,2])
dt.testResultAll(ds,f,savepath='./data/svm_mouse_start.txt')
# use the goal mouse position to predit with ann
def getReuslt3():
ds=dataset.DataSet()
ds.getTrainData()
dt=datadeal.DataTrain()
clf = MLPClassifier(alpha=1e-5, hidden_layer_sizes=(20), random_state=1)
y=ds.train["labels"]
X=ds.train["goals"]
X=np.mat(X)
# dt.trainTest(clf,X,y)
dt.train(clf,X,y)
def f(idx,mouse,goal,label):
if idx==False:
return False
return np.array(goal).reshape([1,2])
dt.testResultAll(ds,f,savepath='./data/ann_goal.txt')
# use the goal mouse y mse and x vector msc to predit with ann
def getReuslt_mse():
ds=dataset.DataSet()
ds.getTrainData()
dt=datadeal.DataTrain()
mouses=ds.train["mouses"]
labels=ds.train["labels"]
def getReuslt_mse_getdata(mouses):
mse=[]
for v in mouses:
mse.append([v[0][0],v[1][0],np.std(v[1]),np.std(v[2])])
return np.array(mse)
mse=getReuslt_mse_getdata(mouses)
# drawing picture to analyst ==================================
# dw=datadraw.DataDraw(typex='3d')
# print goals.shape
# dw.drawgoal([mse[:2600,0],mse[:2600,1]],'b')
# dw.drawgoal([mse[2600:,0],mse[2600:,1]],'r')
# for i in range(2600):
# dw.draw3dgoal(mse[i],c='b')
# for i in range(2600,3000):
# dw.draw3dgoal(mse[i],c='r')
# plt.show()
# dw.drawbatchgoal(,'r')
# drawing picture to analyst ==================================
# clf=SVC(C=1)
clf = MLPClassifier(alpha=1e-3, hidden_layer_sizes=(40), random_state=1)
y=ds.train["labels"]
X=mse
X=np.mat(X)
dt.trainTest(clf,X,y)
exit()
dt.train(clf,X,y)
def f(idx,mouse,goal,label):
if idx==False:
return False
tmp=[mouse[0][0],mouse[1][0],np.std(mouse[1]),np.std(mouse[2])]
return np.array(tmp).reshape([1,4])
dt.testResultAll(ds,f,savepath='./data/xytmse0605.txt')
# calc the right item number
def calcRightNumbers():
print datadeal.calcScore(15268.0,15268.0)
print datadeal.calcScoreRerve(0.7245,16776.0)
print datadeal.calcScoreRerve(0.7508,20561.0)
# print datadeal.calcScoreRerve(0.2941,100000.0)
# draw the PR rate map
def drawPRmap():
x=[]
labels=[]
for i in range(10000,20000,1000):
rm=i+1000
labels.append(rm)
for j in range(10000,20000,1000):
jm=j+1000
if rm>jm:
tmp=0.0
else:
tmp=datadeal.calcScore(float(rm),float(jm))
# x.append("%.2f"%tmp)
x.append(tmp)
r=np.array(x).reshape(10,10)
# print r
labels=np.array(labels)
datadraw.plot_confusion_matrix(r,labels,'a','b',1)
if __name__=="__main__":
# demotest()
# getReuslt_mse()
# calcRightNumbers()
drawPRmap()
pass | UTF-8 | Python | false | false | 4,690 | py | 210 | 0605trying.py | 176 | 0.59339 | 0.552665 | 0 | 159 | 28.503145 | 76 |
karthikpappu/pyc_source | 9,225,589,764,588 | 80d0cd31b58b361a11a203296d550ef6027088cd | 91fa095f423a3bf47eba7178a355aab3ca22cf7f | /pycfiles/smarty-0.3.3-py3.3/cmd_line_parser.cpython-33.py | bf6892e0e09c30d53068bb8f85d1898dbe6dce4c | [] | no_license | https://github.com/karthikpappu/pyc_source | 0ff4d03e6d7f88c1aca7263cc294d3fa17145c9f | 739e7e73180f2c3da5fd25bd1304a3fecfff8d6e | refs/heads/master | "2023-02-04T11:27:19.098827" | "2020-12-27T04:51:17" | "2020-12-27T04:51:17" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.3 (3230)
# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04)
# [GCC 8.4.0]
# Embedded file name: build/bdist.linux-x86_64/egg/smarty/cmd_line_parser.py
# Compiled at: 2014-01-17 04:54:24
# Size of source mod 2**32: 1094 bytes
import argparse, os
parser = argparse.ArgumentParser(description='Smart playlist generator written in python.')
parser.add_argument('-i', '--ip', help='IP address of mpd server', type=str, metavar='<ip>', default=os.getenv('MPD_HOST', 'localhost'))
parser.add_argument('-p', '--port', help='Port mpd server is listening on', type=int, metavar='<port>', default=6600)
parser.add_argument('--maxnum', help='Maximal number of songs in playlist', type=int, metavar='<num>', dest='max_num', default=50)
parser.add_argument('--dist', help='Add new song if only <x> songs are left to play in current playlist', type=int, metavar='<num>', dest='songs_to_end', default=5)
parser.add_argument('--norepeat', help="Don't add songs which are already in playlist.", action='store_true')
parser.add_argument('-v', '--verbose', help='Print information about running process', action='store_true')
parser.add_argument('--exclude', help='Never add these genres to playlist', nargs='+', metavar='genre', default=[])
def get_args():
return parser.parse_args() | UTF-8 | Python | false | false | 1,331 | py | 114,545 | cmd_line_parser.cpython-33.py | 111,506 | 0.711495 | 0.666416 | 0 | 19 | 69.105263 | 164 |
AlexanderGroeger/KappaCraft | 11,931,419,171,975 | f3e9516e3991e0c67e2b083b0079bcfd725d4311 | 672c538f8dcafb58891b06401f000bd9de86f8d9 | /Loot/loot_api.py | e8a8d1fc5148fe9b030bff07bd4c6f005c21f8f4 | [] | no_license | https://github.com/AlexanderGroeger/KappaCraft | 271c2ac883eac3515ac40e69c2e693703a36fcd0 | 1c563b31fd31d1c72f940bdd1600fcca0e34be79 | refs/heads/master | "2023-03-14T16:44:15.099455" | "2021-03-10T19:59:02" | "2021-03-10T19:59:02" | 264,510,986 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
def Save(data,name):
with open(name, 'w') as fp:
json.dump(data, fp, indent=4)
def NewTable(pools = []):
return {"pools": pools}
def NewPool(rolls = 1,bonusRolls = 1, conditions = [], entries = []):
pool = {
"rolls": rolls,
"bonus_rolls": bonusRolls,
"entries": entries
}
if conditions:
pool["conditions"] = conditions
return pool
def NewItem(weight, name, quality = 1, conditions = [], functions = []):
item = {
"type": "item",
"weight": weight,
"name": "minecraft:" + name,
"quality": quality,
}
if conditions:
item["conditions"] = conditions
if functions:
item["functions"] = functions
return item
def NewCondition(condition, **kwargs):
c = {"condition": condition}
for name, value in kwargs.items():
c[name] = value
return c
def NewFunction(function, **kwargs):
c = {"function": function}
for name, value in kwargs.items():
if name == "count":
c[name] = NewRolls(value)
else:
c[name] = value
return c
def NewRolls(rolls):
if type(rolls) is list:
if len(rolls) == 2:
rolls = {"min": rolls[0], "max": rolls[1]}
elif len(rolls) == 1:
rolls = rolls[0]
else:
rolls = 0
elif type(rolls) != int:
rolls = 0
return rolls
def GenerateTable(data, fname):
pools = []
for pool in data:
rolls = NewRolls(pool["rolls"])
bonusRolls = NewRolls(pool["bonus"])
poolConds = []
if "chance" in pool:
poolConds.append(NewCondition(condition = "random_chance", chance = pool["chance"]))
entries = []
for entry in pool["items"]:
name = ""
weight = 0
quality = 1
conds = []
funcs = []
for key, value in entry.items():
if key == "name":
name = value
elif key == "weight":
weight = value
elif key == "quality":
quality = value
elif key == "chance":
conds.append(NewCondition(condition = "random_chance", chance = value))
elif key == "count":
funcs.append(NewFunction(function = "set_count", count = value))
elif key == "nbt":
funcs.append(NewFunction(function = "set_nbt", tag = value))
elif key == "enchant_randomly":
funcs.append(NewFunction(function = "enchant_randomly"))
entries.append(NewItem(weight,name,quality,conds,funcs))
pools.append(NewPool(rolls,bonusRolls,poolConds,entries))
Save(NewTable(pools),fname)
# Save(NewTable([NewPool(entries = [NewItem(weight = 1, name = "diamond", quality = 1, conditions = [NewCondition("random_chance",chance = .5)])])]), "test.json")
| UTF-8 | Python | false | false | 2,978 | py | 45 | loot_api.py | 30 | 0.523506 | 0.518133 | 0 | 93 | 31.021505 | 162 |
chenlei65368/algorithm004-05 | 1,142,461,330,170 | 7b84f12cc3bc09dccf96722948ba189c756846c1 | 9c3bb98eb9d0a587a302bdfa811f7b5c6a5a0a37 | /Week 7/id_510/bit/Leetcode_51_510.py | f4bd0a03a7450f360cc0b67a748a5b55ae089807 | [] | permissive | https://github.com/chenlei65368/algorithm004-05 | 842db9d9017556656aef0eeb6611eec3991f6c90 | 60e9ef1051a1d0441ab1c5484a51ab77a306bf5b | refs/heads/master | "2020-08-07T23:09:30.548805" | "2019-12-17T10:48:22" | "2019-12-17T10:48:22" | 213,617,423 | 1 | 0 | Apache-2.0 | true | "2019-12-17T10:48:24" | "2019-10-08T10:50:41" | "2019-12-08T14:22:35" | "2019-12-17T10:48:23" | 72,897 | 1 | 0 | 0 | Java | false | false | """
n 皇后问题研究的是如何将 n 个皇后放置在 n×n 的棋盘上,并且使皇后彼此之间不能相互攻击。
上图为 8 皇后问题的一种解法。
给定一个整数 n,返回所有不同的 n 皇后问题的解决方案。
每一种解法包含一个明确的 n 皇后问题的棋子放置方案,该方案中 'Q' 和 '.' 分别代表了皇后和空位。
示例:
输入: 4
输出: [
[".Q..", // 解法 1
"...Q",
"Q...",
"..Q."],
["..Q.", // 解法 2
"Q...",
"...Q",
".Q.."]
]
解释: 4 皇后问题存在两个不同的解法。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/n-queens
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from typing import List
class Solution:
def solveNQueens(self, n: int) -> List[List[str]]:
if n < 1: return []
self.ppp = []
self.count = 0
self.DFS(n,0,0,0,0,[0 for _ in range(n)])
for i in range(len(self.ppp)):
for j in range(len(self.ppp[i])):
self.ppp[i][j] = self.ppp[i][j].replace("0", ".").replace("1", "Q")
self.ppp.reverse()
return self.ppp
def DFS(self, n, row, cols, pie, na, pp):
if row >= n:
self.count +=1
self.ppp.append(pp.copy())
return
bits = (~(cols | pie | na)) & ((1 << n) - 1) #得到当前所有的空位 cols, pie, na 中的1代表攻击的位置 bits 1代表空位 排除已占的位置
while bits:
p = bits & (-bits) # 获取到最低的空位 取到最低位的1
bits = bits & (bits - 1) #占位最低位 设为0 表示在p位置上放入皇后
pp[row] = format(p, 'b').rjust(n,'.')
# cols | p 列占位 (pie | p) << 1 撇占位 到下一层以后要左移动一位 (na | p) >> 1 捺占位 到下一层要右移动一位
self.DFS(n, row + 1, cols | p, (pie | p) << 1, (na | p) >> 1, pp)
print(Solution().solveNQueens(6)) | UTF-8 | Python | false | false | 2,056 | py | 2,256 | Leetcode_51_510.py | 2,024 | 0.510013 | 0.491322 | 0 | 62 | 23.177419 | 118 |
AlbertTLChen/PythonMinecraft | 16,114,717,306,336 | 7915a7e16078293dfe100f0c5e996ca8f07195ba | e885cebb12ff3faee9b2c8d4dd81351bf7cbaf55 | /2-1.py | 7dafa2d3ed7c0e2c64870dea1cdbe8e239834dac | [] | no_license | https://github.com/AlbertTLChen/PythonMinecraft | 885d34fa13893629382004be81820e9a88b70dd3 | 26f57a47e1e9bef9128a1a6ab010cf4f35e68759 | refs/heads/main | "2023-02-22T06:43:57.886109" | "2021-01-26T08:14:49" | "2021-01-26T08:14:49" | 333,015,617 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from mcpi.minecraft import Minecraft
mc= Minecraft.create()
x,y,z= mc.player.getTilePos()
mc.setBlock(x,y,z+1,103)
mc.setBlock(x,y,z-1,103)
mc.setBlock(x-1,y,z,103)
mc.setBlock(x+1,y,z,103)
mc.setBlock(x+1,y,z+1,103)
mc.setBlock(x-1,y,z+1,103)
mc.setBlock(x-1,y,z+1,103)
mc.setBlock(x+1,y,z-1,103)
mc.setBlock(x-1,y,z-1,103) | UTF-8 | Python | false | false | 337 | py | 8 | 2-1.py | 8 | 0.676558 | 0.554896 | 0 | 13 | 24.076923 | 36 |
BirajKhanal/EasyAuction | 14,267,881,390,570 | c525b8a9985bc55e855e49265eefb2c129d97803 | f38722a8618fc908aca5f78fe2c6eb92da7574e4 | /backend/app/modules/user/schemas/notification.py | 11c8e68513f3a112975c64a0e6d2f2618c0243e1 | [] | no_license | https://github.com/BirajKhanal/EasyAuction | b81a5e566ae00605cf1c6fcb655a59f536aa14b1 | 38f24c04c189f676ee9dbcc88cd8dd30b05c2466 | refs/heads/master | "2023-08-14T07:06:24.692944" | "2021-10-06T04:51:11" | "2021-10-06T04:51:11" | 363,978,790 | 0 | 0 | null | true | "2021-05-03T15:39:48" | "2021-05-03T15:39:48" | "2021-05-03T15:02:30" | "2021-05-03T15:02:28" | 0 | 0 | 0 | 0 | null | false | false | from typing import Optional
from pydantic import BaseModel
class NotificationBase(BaseModel):
title: Optional[str]
detail: Optional[str] = ''
sender_id: Optional[int]
reciever_id: Optional[int]
class NotificationCreate(NotificationBase):
title: str
sender_id: int
reciever_id: int
class NotificationUpdate(NotificationBase):
pass
class NotificationInDB(NotificationBase):
id: Optional[int] = None
active: Optional[bool]
class Config:
orm_mode = True
class Notification(NotificationInDB):
pass
| UTF-8 | Python | false | false | 561 | py | 92 | notification.py | 77 | 0.71123 | 0.71123 | 0 | 32 | 16.53125 | 43 |
BC-COMS-2710/summer21-material | 4,063,039,091,590 | 6e1b27cf9f4c23581cc105b70cfec66ea5999a0f | 6151a3c25988eb4eac0c00289b90b2e42d4960eb | /tutorial/week2/2.2/tests/q2_26.py | 6acbef969f75e4cedf637bd516f8d81ea6ef0b82 | [] | no_license | https://github.com/BC-COMS-2710/summer21-material | 8e3bbbb375433fa0b575cf39eed4bdf2d3b486d3 | f07ffc1595d7a924a5fca0636161ded26b28e655 | refs/heads/master | "2023-05-28T22:52:25.941752" | "2021-06-10T14:44:42" | "2021-06-10T14:44:42" | 353,820,186 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | test = { 'name': 'q2_26',
'points': [0.5, 0.5],
'suites': [ { 'cases': [ {'code': ">>> assert 'days_in_office' in trump_100_days_tweet_df\n", 'hidden': False, 'locked': False},
{ 'code': ">>> # Trump did not Tweet on his 77th day in office;\n>>> assert 77 not in trump_100_days_tweet_df['days_in_office'].unique()\n",
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| UTF-8 | Python | false | false | 639 | py | 94 | q2_26.py | 52 | 0.386541 | 0.359937 | 0 | 10 | 62.9 | 177 |
dthermadom/apparate | 979,252,558,742 | fe9728bdaca5fa211b2aaf878dc557beed59e72f | 15514e327d561fc13acf1461a69fd20d3085ac32 | /tests/unittest_helpers.py | 5b335022bc9f7d8203e89f01aa31c22df679455a | [
"BSD-3-Clause"
] | permissive | https://github.com/dthermadom/apparate | 27cbdd86ee6bf97cbb6f4c29d23b518748627d87 | d10859240bf9e7f453dbaa33f029ab0dda4e161e | refs/heads/master | "2022-05-26T12:11:20.897578" | "2020-04-24T06:01:21" | "2020-04-29T09:52:25" | 257,890,244 | 0 | 0 | BSD-3-Clause | true | "2020-04-22T12:08:42" | "2020-04-22T12:08:42" | "2020-04-06T09:48:59" | "2019-10-21T17:25:30" | 67 | 0 | 0 | 0 | null | false | false | def strip_whitespace(string_value):
"""
Return the input string without space, tab,
or newline characters (for comparing strings)
"""
return ''.join(
[c for c in string_value if c != ' ' and c != '\n' and c != '\t']
)
| UTF-8 | Python | false | false | 250 | py | 8 | unittest_helpers.py | 3 | 0.572 | 0.572 | 0 | 8 | 30.25 | 73 |
kingtelepuz5/python3_code | 2,551,210,579,850 | d0c000773df1ce6dbc1a12a54d56a3405536a10f | f3648d48650e56a0647163fa664ae5f7cf4b3ae0 | /drun_kit/respones.py | 8b025c8b8ccad62d159794b7f27023ed646d4915 | [] | no_license | https://github.com/kingtelepuz5/python3_code | 3f6bc5af1620e08f7baecdaf064e7d23464e66ee | 0b4273835aea4dbaaef77a03729bfa6528dbf2ff | refs/heads/master | "2023-05-02T22:35:22.211611" | "2021-05-25T15:55:40" | "2021-05-25T15:55:40" | 348,477,750 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import urllib.request
search_pharse = 'crypto'
with urllib.request.urlopen('https://www.wired.com/') as response:
html = response.read().decode('utf-8') # convert to string
first_pos = html.find(search_pharse)
print(html[first_pos-10:first_pos+10])
| UTF-8 | Python | false | false | 261 | py | 48 | respones.py | 42 | 0.708812 | 0.689655 | 0 | 6 | 42.5 | 66 |
VitaliyRogach/Social_something | 9,809,705,345,729 | 8d1e7e774070f6cb3ab6428f6bfd404d69fda7a8 | 215e6ff2978396f24039862bf7e527073eef3b81 | /instagram/main/models.py | f0e152004f5d57c552aa6c55ba16e11c271a095c | [] | no_license | https://github.com/VitaliyRogach/Social_something | c5ed7d031823843af171f7f5ae7b3dacdbef3201 | 2e83d8edf9c0215e41544b2820de5bf791cf3877 | refs/heads/master | "2023-02-07T16:01:16.044550" | "2021-01-03T08:21:06" | "2021-01-03T08:21:06" | 322,406,425 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.contrib.auth.forms import User
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.urls import reverse
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, verbose_name='Пользователь')
avatar = models.ImageField('Аватар', upload_to='static/images/profiles', blank=True)
phone = models.CharField(max_length=30)
name = models.CharField(max_length=30)
surname = models.CharField(max_length=40)
slug = models.SlugField(max_length=50, blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("profile", kwargs={"slug": User.username})
class Meta:
verbose_name = "Профиль"
@receiver(post_save, sender=User)
def save_or_create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
else:
try:
instance.profile.save()
except ObjectDoesNotExist:
Profile.objects.create(user=instance)
| UTF-8 | Python | false | false | 1,173 | py | 11 | models.py | 7 | 0.705575 | 0.698606 | 0 | 35 | 31.8 | 92 |
goxberry/tau2 | 14,740,327,804,343 | 08502b5def84c09f21981bafcfec0150fff75f0f | 471c54686dc6a641fb77e0e9c75c4783ca326e15 | /tools/src/perfexplorer/examples/HardwareCounterBasicAnalysis/getTopX.py | be0bce1366e60d41f93ea792b4f2d18d13707136 | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-permissive"
] | permissive | https://github.com/goxberry/tau2 | ec63911d3970b5330a798c08941e59bf7ca045d1 | d1f34119dc69ad3eda5c939cddda00a86bb303a3 | refs/heads/master | "2020-06-26T12:39:31.496300" | "2019-07-21T23:17:02" | "2019-07-21T23:22:10" | 199,632,738 | 0 | 0 | NOASSERTION | true | "2019-07-30T10:47:59" | "2019-07-30T10:47:59" | "2019-07-22T07:03:27" | "2019-07-22T07:03:25" | 231,478 | 0 | 0 | 0 | null | false | false | from edu.uoregon.tau.perfexplorer.glue import *
from edu.uoregon.tau.perfexplorer.client import PerfExplorerModel
from edu.uoregon.tau.perfdmf import *
from java.util import *
from java.lang import *
True = 1
False = 0
parameterMap = None
tauData = ""
threshold = 10
functions = "function-list.txt"
gprof = False
def getTopX(inTrial, threshold, timerType, metric=None, filterMPI=True):
inTrial.setIgnoreWarnings(True)
extracted = inTrial
# extract computation code (remove MPI)
if filterMPI:
myEvents = ArrayList()
print "Filtering out MPI calls..."
for event in inTrial.getEvents():
if not event.startswith("MPI_"):
myEvents.add(event)
extractor = ExtractEventOperation(inTrial, myEvents)
extracted = extractor.processData().get(0)
# put the top X events names in a list
myEvents = ArrayList()
# get the top X events
print "Extracting top events..."
extracted.setIgnoreWarnings(True)
if metric is None:
metric = extracted.getTimeMetric()
topper = TopXEvents(extracted, metric, timerType, threshold)
topped = topper.processData().get(0)
for event in topped.getEvents():
shortEvent = Utilities.shortenEventName(event)
tmp = extracted.getInclusive(0,extracted.getMainEvent(),metric)
exclusivePercent = 0.0
if tmp > 0:
exclusivePercent = topped.getDataPoint(0,event,metric, timerType) / tmp * 100.0
if (exclusivePercent > 1.0):
print "%00.2f%%\t %d\t %s" % (exclusivePercent, extracted.getCalls(0,event), shortEvent)
myEvents.add(event)
return myEvents
| UTF-8 | Python | false | false | 1,509 | py | 1,665 | getTopX.py | 1,103 | 0.741551 | 0.727634 | 0 | 49 | 29.795918 | 91 |
guilatrova/ccb-notificacoes-cpfl | 13,675,175,900,364 | c40d581f163bc61ad31c9aebcc5f4427ca0086b9 | 8a141edccd20920faa5df1e26d95c3347dc4c953 | /src/formatters/HtmlFormatter.py | 9a80330ba60b7aaebe2fa25208cd9ebdc431ba62 | [] | no_license | https://github.com/guilatrova/ccb-notificacoes-cpfl | f2d08638138462a852b1d80ce5fd3bcbf4d42068 | d9607993903e04ef77cd76387158d0cba8c58a2b | refs/heads/master | "2020-04-20T16:49:18.904182" | "2019-02-03T17:41:13" | "2019-02-03T17:41:13" | 168,969,581 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Declara as classes HtmlFormatter"""
import os
try:
from formatters.BaseFormatter import VencimentoFormatter
except ModuleNotFoundError: # For local execution
from src.formatters.BaseFormatter import VencimentoFormatter
TEMPLATES_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "templates")
class HtmlFormatter(VencimentoFormatter):
"""Formata as informações usando templates HTML"""
ROW_TEMPLATE = os.path.join(TEMPLATES_PATH, "row.html")
CORPO_TEMPLATE = os.path.join(TEMPLATES_PATH, "email.html")
ERRORS_TEMPLATE = os.path.join(TEMPLATES_PATH, "errors.html")
def __init__(self, titulo):
self.titulo = titulo
def formatar(self, contas, erros):
pre_formatado = super(HtmlFormatter, self).formatar(contas)
vencidas = self._formatar_contas(
[conta.copy() for conta in pre_formatado if conta["Vencida"]]
)
em_aberto = self._formatar_contas(contas)
erros = self._formatar_erros(erros)
return self._formatar_corpo(em_aberto, vencidas, erros)
def _formatar_data(self, conta):
conta["Vencimento"] = conta["Vencimento"].strftime("%d/%m/%Y")
if conta["Vencida"]:
conta["Vencimento"] = '<span class="red">{}</span>'.format(
conta["Vencimento"]
)
return conta
def _formatar_contas(self, contas):
contas = [self._formatar_data(conta) for conta in contas]
with open(HtmlFormatter.ROW_TEMPLATE, "r", encoding="utf-8") as template:
raw = template.read()
return [
self._replace_template(
raw,
DESCRICAO=conta["Descricao"],
MES=conta["MesReferencia"],
VENCIMENTO=conta["Vencimento"],
VALOR=conta["Valor"],
SVG=conta["SvgCodigoBarras"],
COD=conta["CodigoBarras"],
)
for conta in contas
]
def _formatar_corpo(self, contas, contas_vencidas, erros):
with open(HtmlFormatter.CORPO_TEMPLATE, "r", encoding="utf-8") as template:
raw = template.read()
contas = " ".join(contas)
contas_vencidas = " ".join(contas_vencidas)
return self._replace_template(
raw,
TITULO=self.titulo,
LINHAS_VENCIDAS=contas_vencidas,
LINHAS_ABERTO=contas,
ERROS=erros,
)
def _formatar_erros(self, erros):
if erros:
with open(HtmlFormatter.ERRORS_TEMPLATE, "r", encoding="utf-8") as template:
raw = template.read()
wrapped = []
for erro in erros:
wrapped.append("<li>{}</li>".format(erro))
return self._replace_template(raw, LINHAS=" ".join(wrapped))
return ""
def _replace_template(self, template, **kwargs):
formatted = template
for key, value in kwargs.items():
replace_this = "#{}#".format(key)
formatted = formatted.replace(replace_this, value)
return formatted
| UTF-8 | Python | false | false | 3,191 | py | 18 | HtmlFormatter.py | 11 | 0.569771 | 0.56883 | 0 | 94 | 32.925532 | 88 |
sesquideus/asmodeus | 4,724,464,030,083 | db8cc786385e590f6a67c1568e8086ee660a59af | ba1585406af7a75e425b104a3a304ee99c9b11f0 | /core/asmodeus/asmodeus.py | a7ac4de0a22644fe180c9e89117199a5e4c03903 | [
"MIT"
] | permissive | https://github.com/sesquideus/asmodeus | 1ac9dce56b124f2c8318914b0d666a6ea4e079db | cc56357b7fc336e28d19a7297a67890669550be8 | refs/heads/master | "2023-08-03T08:55:51.305957" | "2023-07-25T18:01:14" | "2023-07-25T18:01:14" | 152,603,129 | 4 | 2 | MIT | false | "2023-02-10T23:14:38" | "2018-10-11T14:15:16" | "2022-05-28T06:39:38" | "2023-02-10T23:14:36" | 618 | 4 | 2 | 9 | Python | false | false | import argparse
import sys
import multiprocessing as mp
import logging
import time
from core import dataset, exceptions, configuration
from utilities import colour as c
from models.observer import Observer
log = logging.getLogger('root')
class Asmodeus():
def __init__(self):
self.ok = False
self.start_time = time.time()
log.info(f"Initializing {c.script(f'asmodeus-{self.name}')}")
self.create_argparser()
self.args = self.argparser.parse_args()
try:
try:
self.load_config()
self.override_config()
configuration.make_static(self.config)
log.debug(f"Full configuration is")
if log.getEffectiveLevel() == logging.DEBUG:
self.config.pprint()
self.dataset = dataset.DataManager(self.args.dataset, overwrite=self.config.overwrite)
self.prepare_dataset()
self.configure()
except exceptions.CommandLineError as e:
log.critical(f"Incorrect command line arguments: {e}")
raise exceptions.FatalError
except exceptions.ConfigurationError as e:
log.critical(f"Terminating due to a configuration error: {c.err(e)}")
raise exceptions.FatalError
except exceptions.OverwriteError as e:
log.critical(e)
log.critical(f"Target directory already exists (use {c.param('-O')} or {c.param('--overwrite')} to overwrite the existing dataset)")
raise exceptions.FatalError
except exceptions.PrerequisiteError as e:
log.critical(f"Missing prerequisites: {e}")
log.critical("Aborting")
raise exceptions.FatalError
except exceptions.FatalError:
log.critical(f"{c.script(f'asmodeus-{self.name}')} aborting during configuration")
sys.exit(-1)
log.info("Initialization complete")
def create_argparser(self):
self.argparser = argparse.ArgumentParser(description="All-Sky Meteor Observation and Detection Efficiency Simulator")
self.argparser.add_argument('dataset', type = str, help = "name of the dataset")
self.argparser.add_argument('config', type = argparse.FileType('r'), help = "main configuration file")
self.argparser.add_argument('-O', '--overwrite', action = 'store_true', help = "overwrite the dataset if it exists")
self.argparser.add_argument('-d', '--debug', action = 'store_true', help = "much more verbose logging")
self.argparser.add_argument('-l', '--logfile', type = argparse.FileType('w'), help = "output log to file")
def prepare_dataset(self):
raise NotImplementedError(f"You need to define the {c.name('prepareDataset')} method for every ASMODEUS subclass.")
def load_config(self):
self.config = configuration.load_YAML(self.args.config)
def override_config(self):
log.setLevel(logging.DEBUG if self.args.debug else logging.INFO)
if self.args.debug:
log.warning(f"Debug output is {c.over('active')}")
if self.args.overwrite:
log.warning(f"Dataset overwrite {c.over('enabled')}")
self.config.overwrite = True
else:
self.config.overwrite = False
if self.args.logfile:
log.addHandler(logging.FileHandler(self.args.logfile.name))
log.warning(f"Added log output {c.over(self.args.logfile.name)}")
def mark_time(self):
self.mark = time.time()
def stop_time(self):
return time.time() - self.mark
def run_time(self):
return time.time() - self.start_time
def run(self):
try:
self.run_specific()
self.finalize()
except exceptions.PrerequisiteError as e:
log.critical(f"Terminating due to missing prerequisites")
except exceptions.ConfigurationError as e:
log.critical(f"Terminating due to a configuration error: {e}")
finally:
if self.ok:
log.info(f"{c.script(f'asmodeus-{self.name}')} finished successfully in {self.run_time():.6f} s")
log.info("-" * 50)
else:
log.critical(f"{c.script(f'asmodeus-{self.name}')} aborted during runtime")
def override_warning(self, parameter, old, new):
log.warning(f"Overriding {c.param(parameter)} ({c.over(old)} -> {c.over(new)})")
def finalize(self):
log.debug("Wrapping everything up...")
self.ok = True
| UTF-8 | Python | false | false | 4,744 | py | 92 | asmodeus.py | 61 | 0.598019 | 0.597175 | 0 | 115 | 40.252174 | 148 |
CG4002-Capstone-Project/internal_comms | 7,017,976,563,692 | 2e670595fe696f9ed76b2ff728b13b79ac8d6df3 | bd7fae7b96f2f7eaef113046f09c066b7283b1b9 | /dnn_utils.py | 30dd3b8de4b1299500cf615b8a79d52a3d998045 | [] | no_license | https://github.com/CG4002-Capstone-Project/internal_comms | 0a613ac8c49b6e8b278fc7dee2424852cc30e7bd | e14ab11b62a07eee8be71eae5e398f45839e5ccc | refs/heads/main | "2023-03-22T01:21:52.122061" | "2021-03-17T09:45:38" | "2021-03-17T09:45:38" | 344,720,952 | 1 | 1 | null | false | "2021-03-15T10:23:57" | "2021-03-05T06:49:26" | "2021-03-14T10:31:14" | "2021-03-15T10:23:56" | 70 | 0 | 0 | 1 | Python | false | false | import warnings
import numpy as np
import torch
import torch.nn as nn
from joblib import load
from scipy import signal, stats
warnings.filterwarnings("ignore")
activities = ["dab", "gun", "elbow"]
n_labels = len(activities)
def scale_data(data, scaler, is_train=False):
"""
data: inputs of shape (num_instances, num_features, num_time_steps)
scaler: standard scalar to scale data
"""
if is_train:
data = scaler.fit_transform(data)
else:
data = scaler.transform(data)
return data
def compute_mean(data):
return np.mean(data)
def compute_variance(data):
return np.var(data)
def compute_median_absolute_deviation(data):
return stats.median_absolute_deviation(data)
def compute_root_mean_square(data):
def compose(*fs):
def wrapped(x):
for f in fs[::-1]:
x = f(x)
return x
return wrapped
rms = compose(np.sqrt, np.mean, np.square)
return rms(data)
def compute_interquartile_range(data):
return stats.iqr(data)
def compute_percentile_75(data):
return np.percentile(data, 75)
def compute_kurtosis(data):
return stats.kurtosis(data)
def compute_min_max(data):
return np.max(data) - np.min(data)
def compute_signal_magnitude_area(data):
return np.sum(data) / len(data)
def compute_zero_crossing_rate(data):
return ((data[:-1] * data[1:]) < 0).sum()
def compute_spectral_centroid(data):
spectrum = np.abs(np.fft.rfft(data))
normalized_spectrum = spectrum / np.sum(spectrum)
normalized_frequencies = np.linspace(0, 1, len(spectrum))
spectral_centroid = np.sum(normalized_frequencies * normalized_spectrum)
return spectral_centroid
def compute_spectral_entropy(data):
freqs, power_density = signal.welch(data)
return stats.entropy(power_density)
def compute_spectral_energy(data):
freqs, power_density = signal.welch(data)
return np.sum(np.square(power_density))
def compute_principle_frequency(data):
freqs, power_density = signal.welch(data)
return freqs[np.argmax(np.square(power_density))]
def extract_raw_data_features_per_row(f_n):
f1_mean = compute_mean(f_n)
f1_var = compute_variance(f_n)
f1_mad = compute_median_absolute_deviation(f_n)
f1_rms = compute_root_mean_square(f_n)
f1_iqr = compute_interquartile_range(f_n)
f1_per75 = compute_percentile_75(f_n)
f1_kurtosis = compute_kurtosis(f_n)
f1_min_max = compute_min_max(f_n)
f1_sma = compute_signal_magnitude_area(f_n)
f1_zcr = compute_zero_crossing_rate(f_n)
f1_sc = compute_spectral_centroid(f_n)
f1_entropy = compute_spectral_entropy(f_n)
f1_energy = compute_spectral_energy(f_n)
f1_pfreq = compute_principle_frequency(f_n)
return (
f1_mean,
f1_var,
f1_mad,
f1_rms,
f1_iqr,
f1_per75,
f1_kurtosis,
f1_min_max,
f1_sma,
f1_zcr,
f1_sc,
f1_entropy,
f1_energy,
f1_pfreq,
)
def extract_raw_data_features(X, n_features=84):
new_features = np.ones((X.shape[0], n_features))
rows = X.shape[0]
cols = X.shape[1]
for row in range(rows):
features = []
for col in range(cols):
f_n = X[row][col]
feature = extract_raw_data_features_per_row(f_n)
features.extend(feature)
new_features[row] = np.array(features)
return new_features
class DNN(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(84, 64)
self.dp1 = nn.Dropout(0.1)
self.fc2 = nn.Linear(64, 16)
self.dp2 = nn.Dropout(0.1)
self.fc3 = nn.Linear(16, n_labels)
def forward(self, x):
x = self.fc1(x)
x = self.dp1(x)
x = self.fc2(x)
x = self.dp2(x)
x = self.fc3(x)
return x
class Dataset(object):
def __init__(self, X, y):
self.X = X
self.y = y
def __getitem__(self, idx):
data = self.X[idx]
target = self.y[idx][0]
return data, target
def __len__(self):
return len(self.X)
def load_dataloader(X, y):
dataset = Dataset(X, y)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=100, shuffle=True, num_workers=4,
)
return dataloader
if __name__ == "__main__":
model_path = "dnn_model.pth"
scaler_path = "dnn_std_scaler.bin"
inputs_path = "inputs.npy"
labels_path = "labels.npy"
# Load data
X, y = np.load(inputs_path), np.load(labels_path)
# Load model
model = DNN()
model.load_state_dict(torch.load(model_path))
model.eval()
# Load scaler
scaler = load(scaler_path)
# Prepare data
dataloader = load_dataloader(X, y)
# Run inference
correct = 0
total = 0
for inputs, labels in dataloader:
inputs = inputs.numpy() # convert to numpy
inputs = extract_raw_data_features(inputs) # extract features
inputs = scale_data(inputs, scaler) # scale features
inputs = torch.tensor(inputs) # convert to tensor
outputs = model(inputs.float())
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print("Accuracy: ", correct / total)
| UTF-8 | Python | false | false | 5,347 | py | 10 | dnn_utils.py | 5 | 0.610623 | 0.595287 | 0 | 226 | 22.659292 | 76 |
CatFelts/Pyfun | 1,443,109,043,572 | e90a2cdaa6018d1a55d4363976bb0f800313e173 | 1cffa4e5afc6776418254d2aa5f13a3ea0af556b | /game.py | 632b7674b7c3290a4e3d95520f9ddce09ce3a037 | [] | no_license | https://github.com/CatFelts/Pyfun | 8b982c7ceccd8d83e178251c2702b8a62d3b86b1 | 5ed479075ecd2d4e39cf81549e8f1708d23deb96 | refs/heads/master | "2021-09-06T11:50:31.807555" | "2018-02-06T07:35:18" | "2018-02-06T07:35:18" | 119,914,336 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pygame
x = pygame.init()
pygame.font.init()
my_font = pygame.font.SysFont('Comic Sans MS', 30)
#color constants
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
PURPLE = (72, 9, 150)
PINK = (239, 19, 221)
YELLOW = (250, 255, 0)
ORANGE = (255, 119, 0)
#FPS
FRAMERATE = 30
#screen dimensions
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
#start position
START_X = SCREEN_WIDTH/2
START_Y = SCREEN_HEIGHT/2
HEAD_SIZE = 10
clock = pygame.time.Clock()
#gameover screen
gameover_text = my_font.render("GAME OVER", False, RED)
restart_text = my_font.render("Would you like to start a new game?", False, RED)
yes_or_no_text = my_font.render("(Press Y for yes or N for n)", False, RED)
#game window
game_display = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption("ssslitherrr")
def restart_game():
global game_over
global head_x
global head_y
global x_change
global y_change
game_over = False
head_x = START_X
x_change = 0
head_y = START_Y
y_change = 0
#game loop
restart_game()
game_over = False
game_exit = False
paused = False
while not game_over and not game_exit:
#event handling
for event in pygame.event.get():
#quit
if event.type == pygame.QUIT:
game_exit = True
#keyboard input from user
if event.type == pygame.KEYDOWN:
#arrow keys for movement
if not paused:
if event.key == pygame.K_LEFT:
x_change = -HEAD_SIZE
y_change = 0
print("left arrow button pressed")
elif event.key == pygame.K_RIGHT:
x_change = HEAD_SIZE
y_change = 0
print("right arrow button pressed")
elif event.key == pygame.K_UP:
y_change = -HEAD_SIZE
x_change = 0
print("up arrow button pressed")
elif event.key == pygame.K_DOWN:
y_change = HEAD_SIZE
x_change = 0
print("down arrow button pressed")
#other keys for game controls
#pause button is 'P'
if event.key == pygame.K_p:
paused = not paused
if paused == True: #if the user just paused the game, save the game state
saved_x_change = x_change
saved_y_change = y_change
x_change = 0
y_change = 0
if paused == False: #if the user just unpaused the game, restore game state
x_change = saved_x_change
y_change = saved_y_change
if event.key == pygame.K_ESCAPE:
game_exit = True
head_x = head_x + x_change
head_y = head_y + y_change
#if the head hits the edges, game over
if head_x <=0 or head_x >= SCREEN_WIDTH or head_y <= 0 or head_y >= SCREEN_HEIGHT:
game_over = True
#draws
game_display.fill(WHITE)
pygame.draw.rect(game_display, GREEN, [head_x, head_y, HEAD_SIZE, HEAD_SIZE])
pygame.display.update()
clock.tick(FRAMERATE)
while game_over and not game_exit:
#debugging prints
print("game over screen")
print("head_x = %d" % (head_x))
print("head_y = %d" % (head_y))
print("x_change = %d" % (x_change))
print("y_change = %d" % (y_change))
#game over screen event handler
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_exit = True
#if 'Y' key pressed, restart game
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_y:
game_over = False
restart_game()
if event.key == pygame.K_n:
game_exit = True
#draws
game_display.fill(BLACK)
game_display.blit(gameover_text, (300, 150))
game_display.blit(restart_text, (150, 300))
game_display.blit(yes_or_no_text, (200, 500))
pygame.display.flip()
clock.tick(FRAMERATE)
pygame.quit()
quit()
| UTF-8 | Python | false | false | 5,052 | py | 2 | game.py | 1 | 0.454869 | 0.435669 | 0 | 146 | 33.60274 | 103 |
jksdf/IV122 | 15,848,429,334,495 | 284b72f269401f3e426c2a3f23009beb42166cc0 | 7fced4cf0f53a6439d3a2110454fbc7210b8ebac | /src/week3/PartB.py | 7b2048093633193d3614835660b9f3642d2f6745 | [] | no_license | https://github.com/jksdf/IV122 | 9d56192ebbebfa1a858309252a10d7d802501691 | f4413bf5dec8d3fe8e762c2ae2a3b2642dcbad4a | refs/heads/master | "2022-12-02T02:59:09.577072" | "2019-05-28T02:31:40" | "2019-05-28T02:31:40" | 171,647,115 | 0 | 0 | null | false | "2022-11-22T03:29:54" | "2019-02-20T09:56:41" | "2019-05-28T02:31:48" | "2022-11-22T03:29:51" | 13,672 | 0 | 0 | 2 | Python | false | false | import math
from Base import Base, AbstractFilenameProvider
from common.math.geometry import deg2rad, rad2deg
from common.turtle.Turtle import Turtle
from week3.common import star, polygon
class PartB(Base):
name = 'B'
def run(self, fnprovider: AbstractFilenameProvider):
fn = fnprovider.get_filename(suffix='.svg', name="polygon")
turtle = Turtle(fn)
self.pentagon(turtle)
turtle.resetpos(position=(250, 0))
self.squares(turtle)
turtle.resetpos(position=(400, 0))
self.grid(turtle)
turtle.resetpos(position=(750, 0))
self.triangles(turtle)
turtle.resetpos(position=(900, 50))
self.flower(turtle)
turtle.save(frame=(1000, 200))
return fn
def pentagon(self, turtle):
turtle.pendown()
angle = 180 - 360 / 5
star(turtle, 5, math.sqrt(2 * 100 * 100 - 2 * 100 * 100 * math.cos(deg2rad(angle)))) # cosine rule
turtle.left((180 - angle) / 2)
polygon(turtle, 5, 100)
turtle.right((180 - angle) / 2)
turtle.penup()
def squares(self, turtle, iters=50, size=100, offset=0.15):
turtle.pendown()
angle = rad2deg(math.atan(offset / (1 - offset)))
for _ in range(iters):
for _ in range(4):
turtle.forward(size)
turtle.right(90)
turtle.forward(offset * size)
turtle.right(angle)
size = size * (1 - offset)
def grid(self, turtle, density=10, r=100):
pos = turtle.position[0] + r, turtle.position[1] + r
for x in range(-density, density):
xpos = x * r / density
ypos = math.sqrt(r ** 2 - xpos ** 2)
turtle.line(start=(pos[0] + xpos, pos[1] + ypos), end=(pos[0] + xpos, pos[1] - ypos))
turtle.line(start=(pos[0] + ypos, pos[1] + xpos), end=(pos[0] - ypos, pos[1] + xpos))
def triangles(self, turtle, side=200, step=17):
angle = 120
turtle.pendown()
turtle.right(60)
while side > 0:
for _ in range(3):
turtle.forward(side)
turtle.right(angle)
turtle.right(30)
turtle.penup()
turtle.forward(step / 2 / math.cos(deg2rad(30)))
turtle.pendown()
turtle.left(30)
side -= step
def flower(self, turtle: Turtle):
for _ in range(12):
for _ in range(12):
turtle.forward(20)
turtle.right(360 / 12)
turtle.right(360 / 12) | UTF-8 | Python | false | false | 2,568 | py | 113 | PartB.py | 58 | 0.547118 | 0.496885 | 0 | 77 | 32.363636 | 107 |
JoaoRicardoSimplicio/market | 618,475,338,747 | ab2868e6e463f8d2c2ee98dc47e8378921a3a343 | 6099ea6ca4a1aa38e6ad9403fb765c90f335ef3f | /stocks/tasks/stock_purchase.py | 349951cadffb28b434e6ad2c13c4aade0f025827 | [] | no_license | https://github.com/JoaoRicardoSimplicio/market | 5d49ed9a2b90fdb911e1e83d545fac859afc208d | fb0e7a0d8efdf286e24c807ff4dd0a89a8f8be0d | refs/heads/master | "2023-03-05T21:28:15.043875" | "2021-02-14T06:16:35" | "2021-02-14T06:16:35" | 323,718,408 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from stocks.queries.stock_queries import StockQueries
from stocks.tasks.stock_information import get_user_information
def buy_stock(stock_code):
data = get_user_information(stock_code=stock_code)
stock = StockQueries(**data)
stock.buy()
def get_stock_purchases(stock_code):
stock = StockQueries(code=stock_code, price=None, description=None)
purchases = stock.purchases()
return purchases
| UTF-8 | Python | false | false | 417 | py | 33 | stock_purchase.py | 24 | 0.748201 | 0.748201 | 0 | 14 | 28.785714 | 71 |