hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e91ff99a3728e01c9518fdfe79d256b14ae28af1 | 353 | py | Python | DataBase Sqlite3/NoteMeilheur.py | otmanabdoun/IHM-Python | 624e961c2f6966b98bf2c1bc4dd276b812954ba1 | [
"Apache-2.0"
] | 3 | 2021-12-08T10:34:55.000Z | 2022-01-17T21:02:40.000Z | NoteMeilheur.py | otmanabdoun/IHM-Python | 624e961c2f6966b98bf2c1bc4dd276b812954ba1 | [
"Apache-2.0"
] | null | null | null | NoteMeilheur.py | otmanabdoun/IHM-Python | 624e961c2f6966b98bf2c1bc4dd276b812954ba1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 3 04:38:07 2021
@author: User
"""
import sqlite3
connexion = sqlite3.connect("dbM2IQL.db")
curseur = connexion.cursor()
curseur.execute("""SELECT e.Nom, c.note FROM Etudiant as e INNER JOIN
CF as c ON e.id = c.fk_etudiant
ORDER BY c.note DESC LIMIT 1""")
print(curseur.fetchone()) | 25.214286 | 70 | 0.651558 |
e921b1bc0cceec8b113f393fdb06b057357e8848 | 24,410 | py | Python | packages/robotframework-test-assistant/robotframework-test-assistant.py | jg8481/leon | b94a6c753cee79f4568ab7a83932351f7c949791 | [
"MIT"
] | 3 | 2020-01-15T20:49:42.000Z | 2020-11-22T01:41:33.000Z | packages/robotframework-test-assistant/robotframework-test-assistant.py | jg8481/leon | b94a6c753cee79f4568ab7a83932351f7c949791 | [
"MIT"
] | null | null | null | packages/robotframework-test-assistant/robotframework-test-assistant.py | jg8481/leon | b94a6c753cee79f4568ab7a83932351f7c949791 | [
"MIT"
] | 3 | 2020-01-18T17:06:56.000Z | 2020-12-16T16:03:57.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
import utils
import os
import os.path
import sys
import subprocess
import re
import time
filepath = os.path.dirname(os.path.realpath(__file__))
small_time_delay = 5 ##--> Use this to set up your small time delay. This time delay is in seconds.
medium_time_delay = 20 ##--> Use this to set up your medium time delay. This time delay is in seconds.
large_time_delay = 600 ##--> Use this to set up your large time delay. This time delay is in seconds.
def Clean_Up_Results(string, entities):
"""Leon will clean up the results folder"""
subprocess.call(filepath + '/robotframework-runner.sh Clean-Up-Results', shell=True)
return utils.output('end', 'clean_up_results_ran', utils.translate('clean_up_results_ran'))
def Check_One(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Check-One', shell=True)
return utils.output('end', 'single_check_ran', utils.translate('single_check_ran'))
def Check_Two(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Check-Two', shell=True)
return utils.output('end', 'single_check_ran', utils.translate('single_check_ran'))
def Check_Three(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Check-Three', shell=True)
return utils.output('end', 'single_check_ran', utils.translate('single_check_ran'))
def Check_Four(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Check-Four', shell=True)
return utils.output('end', 'single_check_ran', utils.translate('single_check_ran'))
def Set_Up_Runner_One(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Runner-One', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Check_One(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-One', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Check_Two(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Two', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Check_Three(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Three', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Check_Four(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Four', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Custom_Runner_One(string, entities):
"""Leon will start a custom Robot Framework automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Custom-Runner-One', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Display_Runner_One(string, entities):
"""Leon will display the results of the Robot Framework automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Display-Runner-One', shell=True)
return utils.output('end', 'display_results', utils.translate('display_results'))
def Group_One(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Group-One', shell=True)
return utils.output('end', 'parallel_checks_ran', utils.translate('parallel_checks_ran'))
def Group_Two(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Group-Two', shell=True)
return utils.output('end', 'parallel_checks_ran', utils.translate('parallel_checks_ran'))
def Group_Three(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Group-Three', shell=True)
return utils.output('end', 'parallel_checks_ran', utils.translate('parallel_checks_ran'))
def Group_Four(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Group-Four', shell=True)
return utils.output('end', 'parallel_checks_ran', utils.translate('parallel_checks_ran'))
def Set_Up_Runner_Two(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Runner-Two', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Group_One(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-One', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Group_Two(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Two', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Group_Three(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Three', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Group_Four(string, entities):
"""Leon will set up a custom automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Four', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Custom_Runner_Two(string, entities):
"""Leon will start a custom Robot Framework automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Custom-Runner-Two', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Display_Runner_Two(string, entities):
"""Leon will display the results of the Robot Framework automated check run"""
subprocess.call(filepath + '/robotframework-runner.sh Display-Runner-Two', shell=True)
return utils.output('end', 'display_results', utils.translate('display_results'))
def Slack_Notification_Send_All(string, entities):
"""Leon will send the console log results of the Robot Framework automated check runs to Slack"""
subprocess.call(filepath + '/robotframework-runner.sh Slack-Notification-Send-All', shell=True)
return utils.output('end', 'notify_the_team', utils.translate('notify_the_team'))
def Build_Docker_Containers(string, entities):
"""Leon will build Docker Containers for running Robot Framework scripts"""
subprocess.call(filepath + '/robotframework-runner.sh Build-Docker-Containers', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Clean_Up_Docker_Containers(string, entities):
"""Leon will stop and remove Docker Containers"""
subprocess.call(filepath + '/robotframework-runner.sh Clean-Up-Docker-Containers', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Robot_Framework_Docker_API_Checks(string, entities):
"""Leon will run Robot Framework scripts from within Docker Containers that run locally"""
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Docker-API-Checks', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Robot_Framework_Docker_Random_Order_API_Checks(string, entities):
"""Leon will run Robot Framework scripts from within Docker Containers that run locally"""
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Docker-Random-Order-API-Checks', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Robot_Framework_Docker_MBT_Graphwalker_Checks(string, entities):
"""Leon will run Robot Framework scripts from within Docker Containers that run locally"""
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Docker-MBT-Graphwalker-Checks', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Display_Current_MBT_Graphwalker_Path(string, entities):
"""Leon will display the results of the current Graphwalker Path generated by the Robot Framework Docker Container"""
subprocess.call(filepath + '/robotframework-runner.sh Display-Current-MBT-Graphwalker-Results', shell=True)
return utils.output('end', 'display_results', utils.translate('display_results'))
def Run_Same_Robot_Framework_Docker_MBT_Graphwalker_Checks_Again(string, entities):
"""Leon will run Robot Framework scripts from within Docker Containers that run locally"""
subprocess.call(filepath + '/robotframework-runner.sh Run-Same-Robot-Framework-Docker-MBT-Graphwalker-Checks-Again', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Robot_Framework_Selenium_Desktop_Web_Checks(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Selenium-Desktop-Web-Checks', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Start_Remote_API_Check_Process_Webhook_Docker_Container(string, entities):
"""Leon will start a Docker Container for running remote Robot Framework scripts triggered by a webhook"""
subprocess.call(filepath + '/robotframework-runner.sh Start-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Start_Remote_Selenium_Process_Webhook_Container(string, entities):
"""Leon will start a Docker Container for running remote Robot Framework scripts triggered by a webhook"""
subprocess.call(filepath + '/robotframework-runner.sh Start-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Trigger_Remote_API_Check_Process_Webhook_Docker_Container(string, entities):
"""Leon will trigger a Docker Container for running remote Robot Framework scripts using a webhook"""
subprocess.call(filepath + '/robotframework-runner.sh Trigger-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Trigger_Remote_Selenium_Process_Webhook_Container(string, entities):
"""Leon will trigger a Docker Container for running remote Robot Framework scripts using a webhook"""
subprocess.call(filepath + '/robotframework-runner.sh Trigger-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Trigger_Both_Webhook_Docker_Containers_For_Parallel_Run(string, entities):
"""Leon will trigger a Docker Container for running remote Robot Framework scripts using a webhook"""
subprocess.call(filepath + '/robotframework-runner.sh Trigger-Both-Webhook-Docker-Containers-For-Parallel-Run', shell=True)
return utils.output('end', 'multiple_checks_ran', utils.translate('multiple_checks_ran'))
def Custom_Tasks_And_Suites_Runner(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Custom-Tasks-And-Suites-Runner', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('multiple_checks_ran'))
def Set_Up_Custom_Tasks_And_Suites_Runner(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Custom-Tasks-And-Suites-Runner', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Slack_Notification_Send_All(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Slack-Notification-Send-All', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Build_Docker_Containers(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Build-Docker-Containers', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Clean_Up_Docker_Containers(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Clean-Up-Docker-Containers', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Start_Remote_API_Check_Process_Webhook_Docker_Container(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Start-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Start_Remote_Selenium_Process_Webhook_Docker_Container(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Start-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Trigger_Remote_API_Check_Process_Webhook_Docker_Container(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Trigger_Remote_Selenium_Process_Webhook_Docker_Container(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Trigger_Both_Webhook_Docker_Containers_For_Parallel_Run(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Both-Webhook-Docker-Containers-For-Parallel-Run', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Set_Generate_Bug_Risk_Prediction_Scores_For_A_GitHub_Repo(string, entities):
"""Leon will set up a custom automated tasks and suites run"""
subprocess.call(filepath + '/robotframework-runner.sh Set-Generate-Bug-Risk-Prediction-Scores-For-A-GitHub-Repo', shell=True)
return utils.output('end', 'finished_setting_up', utils.translate('finished_setting_up'))
def Display_Custom_Tasks_And_Suites_Runner(string, entities):
"""Leon will display the results of the Robot Framework automated RPA tasks run"""
subprocess.call(filepath + '/robotframework-runner.sh Display-Custom-Tasks-And-Suites-Runner', shell=True)
return utils.output('end', 'display_results', utils.translate('display_results'))
def Generic_Customizable_Time_Delayed_Runner_One(string, entities):
"""Leon will set up a time delayed generic task runner"""
##--> Suggestion: Feel free to change the time.sleep to small_time_delay, medium_time_delay or large_time_delay.
time.sleep(small_time_delay)
##--> Suggestion: Feel free to set the following subprocess.call to any of the previously defined commands in this robotframework-test-assistant.py leon-ai module. The following is just an example triggering a single time delayed check.
subprocess.call(filepath + '/robotframework-runner.sh Check-One', shell=True)
return utils.output('end', 'generic_time_delayed_task_ran', utils.translate('generic_time_delayed_task_ran'))
def Generic_Customizable_Time_Delayed_Runner_Two(string, entities):
"""Leon will set up a time delayed generic task runner"""
##--> Suggestion: Feel free to change the time.sleep to small_time_delay, medium_time_delay or large_time_delay.
time.sleep(small_time_delay)
##--> Suggestion: Feel free to set the following subprocess.call to any of the previously defined commands in this robotframework-test-assistant.py leon-ai module. The following example builds off of a previously created Custom_Runner_Two .csv file.
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Runner-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Three', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Two', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Three', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Runner-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Display-Runner-One', shell=True)
return utils.output('end', 'generic_time_delayed_task_ran', utils.translate('generic_time_delayed_task_ran'))
def Generic_Customizable_Time_Delayed_Runner_Three(string, entities):
"""Leon will set up a time delayed generic task runner"""
##--> Suggestion: Feel free to change the time.sleep to small_time_delay, medium_time_delay or large_time_delay.
time.sleep(small_time_delay)
##--> Suggestion: Feel free to set the following subprocess.call to any of the previously defined commands in this robotframework-test-assistant.py leon-ai module. The following example will chain together the commands for a new Custom_Runner_One .csv file, runs it, and displays results.
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Clean-Up-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Build-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Generate-Bug-Risk-Prediction-Scores-For-A-GitHub-Repo', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Display-Custom-Tasks-And-Suites-Runner', shell=True)
return utils.output('end', 'generic_time_delayed_task_ran', utils.translate('generic_time_delayed_task_ran'))
def Generic_Customizable_On_Demand_Runner(string, entities):
"""Leon will set up a generic on-demand task runner"""
##--> Suggestion: Feel free to set the following subprocess.call to any of the previously defined commands in this robotframework-test-assistant.py leon-ai module. The following example will chain together the commands for all of the custom runners and sends notifications to the team.
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Clean-Up-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Build-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Generate-Bug-Risk-Prediction-Scores-For-A-GitHub-Repo', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Runner-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Three', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Two', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-Four', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Check-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Runner-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Selenium-Desktop-Web-Checks', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Runner-Two', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Two', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-One', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Four', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Group-Three', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Runner-Two', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Robot-Framework-Docker-MBT-Graphwalker-Checks', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Up-Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Clean-Up-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Build-Docker-Containers', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Start-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Start-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Both-Webhook-Docker-Containers-For-Parallel-Run', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Remote-Selenium-Process-Webhook-Docker-Container', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Set-Trigger-Remote-API-Check-Process-Webhook-Docker-Container', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Custom-Tasks-And-Suites-Runner', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Gather-All-Robot-Framework-Test-Results-And-Deploy-Dashboard-To-Heroku', shell=True)
subprocess.call(filepath + '/robotframework-runner.sh Slack-Notification-Send-All', shell=True)
return utils.output('end', 'generic_on_demand_task_ran', utils.translate('generic_on_demand_task_ran'))
def Gather_All_Robot_Framework_Test_Results_And_Deploy_Dashboard_To_Heroku(string, entities):
"""Leon will run Robot Framework ReBot and Git commands to deploy a results file to Heroku"""
subprocess.call(filepath + '/robotframework-runner.sh Gather-All-Robot-Framework-Test-Results-And-Deploy-Dashboard-To-Heroku', shell=True)
return utils.output('end', 'gathered_test_results_and_deployed_dashboard_to_heroku', utils.translate('gathered_test_results_and_deployed_dashboard_to_heroku'))
def Help_Confused_Users(string, entities):
"""Leon will try to help confused users who don't know how to use this leon-ai package"""
return utils.output('end', 'help_confused_users', utils.translate('help_confused_users'))
def Jira_Task_Runner(string, entities):
"""Leon will run Robot Framework through the script runner"""
subprocess.call(filepath + '/robotframework-runner.sh Jira-Task-Runner', shell=True)
return utils.output('end', 'single_check_ran', utils.translate('single_check_ran'))
| 70.549133 | 292 | 0.7712 |
e9225ac8234cba226c9c33772de98e2d065d77b6 | 349 | py | Python | chapter2/bandit.py | mtrazzi/understanding-rl | 83a9b7608c805189a39b4ef81893f6ebe982f9e1 | [
"MIT"
] | 95 | 2020-04-26T12:36:07.000Z | 2020-05-02T13:23:47.000Z | chapter2/bandit.py | 3outeille/rl-book-challenge | b02595b0aec3e9632ef5d9814e925384931089bd | [
"MIT"
] | 2 | 2020-09-24T20:29:29.000Z | 2021-11-27T11:17:45.000Z | chapter2/bandit.py | 3outeille/rl-book-challenge | b02595b0aec3e9632ef5d9814e925384931089bd | [
"MIT"
] | 15 | 2020-04-27T04:10:02.000Z | 2020-04-30T21:42:04.000Z | import numpy as np
| 19.388889 | 52 | 0.638968 |
e92450a33cbfd0332cf6d8991a025ba1f22e0f12 | 1,765 | py | Python | Sublime_Plugin/EditFormat.py | dtysky/Gal2Renpy | 59a70c5d336394155dedaf82d17bd99297f92d1a | [
"MIT"
] | 36 | 2015-04-19T05:03:10.000Z | 2022-03-29T08:12:38.000Z | Sublime_Plugin/EditFormat.py | dtysky/Gal2Renpy | 59a70c5d336394155dedaf82d17bd99297f92d1a | [
"MIT"
] | 2 | 2016-05-05T07:24:09.000Z | 2017-11-01T05:32:11.000Z | Sublime_Plugin/EditFormat.py | dtysky/Gal2Renpy | 59a70c5d336394155dedaf82d17bd99297f92d1a | [
"MIT"
] | 2 | 2016-12-01T02:12:33.000Z | 2020-03-09T02:27:19.000Z | #coding:utf-8
#########################
#Copyright(c) 2014 dtysky
######################### | 19.611111 | 82 | 0.273088 |
e924c56b0295f7f4a6d78dab18bd9428b1fe0209 | 272 | py | Python | wesgame.py | WestenPy/Curso_em_video | 9f6a9775d27e1b86d54b381aba5da69b2ae21b27 | [
"MIT"
] | null | null | null | wesgame.py | WestenPy/Curso_em_video | 9f6a9775d27e1b86d54b381aba5da69b2ae21b27 | [
"MIT"
] | null | null | null | wesgame.py | WestenPy/Curso_em_video | 9f6a9775d27e1b86d54b381aba5da69b2ae21b27 | [
"MIT"
] | null | null | null | from random import randint
ataque()
| 16 | 31 | 0.5 |
e924f0db03f4f2a8c126f7c109a518852a2aa24a | 6,850 | py | Python | ProcessingData/get_gp-bias.py | gomes-lab/SARA_ScienceAdvances | 61848d1c92a66bd58c8c195e5b2bb250ef8efb51 | [
"MIT"
] | 1 | 2022-01-13T12:17:29.000Z | 2022-01-13T12:17:29.000Z | ProcessingData/get_gp-bias.py | gomes-lab/SARA_ScienceAdvances | 61848d1c92a66bd58c8c195e5b2bb250ef8efb51 | [
"MIT"
] | null | null | null | ProcessingData/get_gp-bias.py | gomes-lab/SARA_ScienceAdvances | 61848d1c92a66bd58c8c195e5b2bb250ef8efb51 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Script to extract the gp bias features from microscopy images
"""
import sys
import json
import os
import copy as cp
import numpy as np
import glob
import matplotlib.pyplot as plt
import matplotlib
from numpy.polynomial import polynomial
import offsets as GS
from probability_dist import *
import data_storage as ds
import zone as LSA_Zone
from os import listdir
from matplotlib import cm
from collections import OrderedDict
import seaborn as sns
import itertools
#Set color schemes
cmaps = OrderedDict()
cmaps['Qualitative'] = ['Pastel1', 'Pastel2', 'Paired', 'Accent',
'Dark2', 'Set1', 'Set2', 'Set3',
'tab10', 'tab20', 'tab20b', 'tab20c']
plt.rcParams["image.cmap"] = "Set1"
plt.rcParams['axes.prop_cycle'] = plt.cycler(color=plt.cm.Set1.colors)
palette = itertools.cycle(sns.color_palette("muted"))
palette = sns.color_palette("muted")
def convert_bias_parameters(bias_parameters, center):
"""
Converts the sum of Gaussian parameters into a format the the GP can
interpret
"""
bias_parameters_new = []
for b in bias_parameters:
std = b[2] * 0.5 * np.sqrt(2.*np.log(2.))
bb = (b[0] * b[4], b[1] - center, std, b[3])
bias_parameters_new.append(bb)
return bias_parameters_new
def get_img_filename(pos, image_error, bx = 1., by = 1.):
"""
Convert position to a filename
"""
lsa = ds.LSA()
stripe = {}
stripe["x"] = pos[0]
stripe["y"] = pos[1]
if image_error:
stripe["x"] = round(pos[0]/bx)
stripe["y"] = round(pos[1]/by)
stripe["dwell"] = 0.
stripe["Tpeak"] = 0.
fn = lsa.image_name(stripe)
fn = fn[:9]
return fn
def get_filename(pos, img_dir, bx = 1., by = 1.):
"""
Captures an image with given settings.
"""
fn = get_img_filename(pos, image_error = True, bx = bx, by = by)
fn += "*.bmp"
if 'img_dir' in locals():
fn = os.path.join(img_dir, fn)
img_fn = glob.glob(fn)
if len(img_fn) > 0:
img_fn = sorted(img_fn)[0]
img = Image.open(img_fn)
mode = img.mode
if mode == "RGB":
r, g, b = img.split()
img = Image.merge("RGB", (b, g, r))
return img, img_fn
rescaling_datas = []
img_dir = "Bi2O3/Images/"
files = list_files(img_dir, "bmp")
exclude = []
for f in files[:]:
if f in exclude:
continue
rescaling_data = {}
#Parse information from the filename
meta_img = {}
fn_meta = f.split("_")
#The last part is the temperature in C
meta_img["Tpeak"] = float(fn_meta[-1].split(".")[0])
#The second last part is the temperature in dwell time in microsec
meta_img["dwell"] = float(fn_meta[-2])
meta_img["logtau"] = np.log10(float(fn_meta[-2]))
meta_img["pos"] = [float(fn_meta[0][1:])*2, float(fn_meta[1])*5]
meta_img["filename"] = f
pos = meta_img["pos"]
img, img_fn = get_filename(pos, img_dir, bx = 2., by = 5.)
plt_out = img_fn.replace("bmp", "png").replace("b", "aa")
zone = LSA_Zone.zone()
img_spec_offset = GS.img_spec_offset()
img_spec_offset.scale = 0.00092 #Scaling of pixels in mm
img_spec_offset.scale_imgcam = 0.0006680932 #Scaling of pixels in mm for imaging camera
img_spec_offset.offset = 0 #Offset of the spectrometer with respect to the image center in pixels.
img_spec_offset.offsety = 0 #Offset of the spectrometer with respect to the image center in pixels.
img_spec_offset.img_shift = img_spec_offset.offset * img_spec_offset.scale #The amount of shift along the x-axis in mm of the spectrum with respect to image
img_spec_offset.offset_global = [0., 0.]
zone.pos = pos
pd = probability_dist()
img, img_center_px, img_info, img_data, img_peaks = zone.image_from_file(img_fn, img_spec_offset)
if abs(img_center_px - zone.img_width * 0.5) > zone.img_width*0.1:
img_center_px = 0.5 * zone.img_width
img_center = zone.img_xdomain[0] + img_center_px/zone.img_width * (zone.img_xdomain[1] - zone.img_xdomain[0])
spec_center = img_center
peaks = np.array(img_peaks)
n_dense = 800
zone.spec_xdomain = [img_center-1.75, img_center+1.75]
x_plot = np.linspace(zone.spec_xdomain[0], zone.spec_xdomain[1], n_dense).reshape(-1,1)
dist_peaks, dist_lsa, dist_peaks_lsa, bias_parameters, LSA_width = pd.get_img_bias(peaks, img_center, spec_center, x_plot, lsa_frac = 1.)
bias_parameter_centered = convert_bias_parameters(bias_parameters, img_center)
#Convolve the uncertainty and the prior distribution
dist_sum_peaks = pd.sum(dist_peaks,"SumPeaks",1.)
dist_sum_peaks_lsa = pd.sum(dist_peaks_lsa,"SumPeaks",1.)
# Plot on three seperate axes
fig, axes = plt.subplots(nrows=2, sharex=True)
axes = axes.tolist()
axes[0].set_ylabel("Rescaling (a.u.)")
axes[1].set_ylabel("y pos (mm)")
axes[1].set_xlabel("x pos (mm)")
w1 = zone.img_xdomain[0] - img_center
w2 = zone.img_xdomain[1] - img_center
h1 = zone.img_ydomain[0] - 0.5 * (zone.img_ydomain[0] + zone.img_ydomain[1])
h2 = zone.img_ydomain[1] - 0.5 * (zone.img_ydomain[0] + zone.img_ydomain[1])
l1, = axes[0].plot(x_plot - img_center, dist_lsa, color=palette[3], label = "LSA bias")
axes[0].yaxis.set_ticks([])
axes.append(axes[0].twinx())
l2, = axes[2].plot(x_plot - img_center, dist_sum_peaks['dist'], color=palette[4], label = "RGB bias")
axes[2].yaxis.set_ticks([])
plt.legend([l1, l2],["LSA bias", "RGB bias"], loc = 'upper right', frameon=False)
# Size of the image in pixels (size of orginal image)
width, height = img.size
# Setting the points for cropped image
left = 0
top = height/2
right = width
bottom = height
# Cropped image of above dimension
img = img.crop((left, top, right, bottom))
width, height = img.size
im = axes[1].imshow(img, extent=[w1,w2,h1,h2], aspect = 'auto')
axes[1].set_xlim([-0.55, 0.55])
for bias_i in bias_parameter_centered[:-1]:
axes[1].axvline(x=bias_i[1], ymin = (h2), ymax = 2.2*h2,
color=palette[8], linewidth = 1.0)
title_str = "Dwell "+str(meta_img["dwell"])+"\u03bcs, Tpeak "+str(meta_img["Tpeak"])+""
plt.title(title_str)
plt.savefig(plt_out, format='png')
plt.close(fig)
rescaling_data["meta_data"] = meta_img
rescaling_data["rescaling_parameters"] = bias_parameter_centered
rescaling_datas.append(rescaling_data)
# Serializing json
json_object = json.dumps(rescaling_datas, indent = 4)
# Writing to json
with open("bias.json", "w") as outfile:
outfile.write(json_object)
| 35.492228 | 163 | 0.646277 |
e925522b3d3915457215980e5bca266c8fd2ff38 | 2,448 | py | Python | monitoring/automation/monitor.py | shane0/flask-website-monitor | 39031b9207c97baef4b10a792e038f241bcdc857 | [
"MIT"
] | 1 | 2017-04-13T05:29:15.000Z | 2017-04-13T05:29:15.000Z | monitoring/automation/monitor.py | shane0/flask-website-monitor | 39031b9207c97baef4b10a792e038f241bcdc857 | [
"MIT"
] | 1 | 2017-04-12T23:44:58.000Z | 2017-04-12T23:44:58.000Z | monitoring/automation/monitor.py | shane0/flask-website-monitor | 39031b9207c97baef4b10a792e038f241bcdc857 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A website monitor.
"""
import sys
import traceback
import requests
import re
import json
import datetime
DEFAULT_CONFIG_FILE = 'config.json'
| 28.137931 | 126 | 0.541258 |
e9268aab7efb78626ab35cbb0daf3f9adf12bcb0 | 253 | py | Python | tests/test_MicropythonBoards.py | dwighthubbard/micropython-cloudmanager | 1b41eeaf7f5a34a622826bf0030a9f5c45d1aefc | [
"MIT"
] | 1 | 2017-02-22T03:18:48.000Z | 2017-02-22T03:18:48.000Z | tests/test_MicropythonBoards.py | dwighthubbard/micropython-cloudmanager | 1b41eeaf7f5a34a622826bf0030a9f5c45d1aefc | [
"MIT"
] | null | null | null | tests/test_MicropythonBoards.py | dwighthubbard/micropython-cloudmanager | 1b41eeaf7f5a34a622826bf0030a9f5c45d1aefc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
import sys
sys.path.insert(0, '.')
from cloudmanager.board import MicropythonBoards
for result in MicropythonBoards().execute("import os;print(os.uname())"):
print(result.read().strip())
| 25.3 | 73 | 0.754941 |
e92912ace35fc868f85b6a3bdb13260570590334 | 412 | py | Python | Chapter03/c3_27_datadotworld_1.py | andrewjcoxon/Hands-On-Data-Science-with-Anaconda | 82504a059ecd284b3599fa9af2b3eb6bbd6e28f3 | [
"MIT"
] | 25 | 2018-06-25T16:21:09.000Z | 2022-02-08T09:28:29.000Z | Hands-On-Data-Science-with-Anaconda-master/Hands-On-Data-Science-with-Anaconda-master/Chapter03/c3_27_datadotworld_1.py | manual123/Nacho-Jupyter-Notebooks | e75523434b1a90313a6b44e32b056f63de8a7135 | [
"MIT"
] | null | null | null | Hands-On-Data-Science-with-Anaconda-master/Hands-On-Data-Science-with-Anaconda-master/Chapter03/c3_27_datadotworld_1.py | manual123/Nacho-Jupyter-Notebooks | e75523434b1a90313a6b44e32b056f63de8a7135 | [
"MIT"
] | 17 | 2018-06-15T02:55:30.000Z | 2022-03-09T15:24:42.000Z | """
Name : c3_27_datadotworld_1.py
Book : Hands-on Data Science with Anaconda)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan and James Yan
Date : 1/15/2018
email : yany@canisius.edu
paulyxy@hotmail.com
"""
import datadotworld as dw
dataset = 'jonloyens/an-intro-to-dataworld-dataset'
data = dw.load_dataset(dataset, force_update=True)
list(dataset.dataframes) | 27.466667 | 51 | 0.694175 |
e92a3ce5abab1bfe02516472d0fc6c56a482d48d | 15,964 | py | Python | strutil.py | IloveKanade/k3fmt | 13a81562b9fc706dbf7fc05fcae130260bc2551d | [
"MIT"
] | null | null | null | strutil.py | IloveKanade/k3fmt | 13a81562b9fc706dbf7fc05fcae130260bc2551d | [
"MIT"
] | 3 | 2021-08-06T07:24:40.000Z | 2022-03-23T06:58:36.000Z | strutil.py | IloveKanade/k3fmt | 13a81562b9fc706dbf7fc05fcae130260bc2551d | [
"MIT"
] | 1 | 2021-08-04T08:41:33.000Z | 2021-08-04T08:41:33.000Z | import re
import os
import errno
import string
import subprocess
import k3color
listtype = (tuple, list)
invisible_chars = ''.join(map(chr, list(range(0, 32))))
invisible_chars_re = re.compile('[%s]' % re.escape(invisible_chars))
def line_pad(linestr, padding=''):
"""
:param linestr: multiple line string with `\n` as line separator.
:param padding: left padding string to add before each line.
It could also be a callable object that returns a string.
This is useful when creating dynamic padding.
:return: multiple line string with `\n` as line separator, with left padding added.
"""
lines = linestr.split("\n")
if type(padding) in (str, bytes):
lines = [padding + x for x in lines]
elif callable(padding):
lines = [padding(x) + x for x in lines]
lines = "\n".join(lines)
return lines
def struct_repr(data, key=None):
"""
Render primitive or composite data to a structural representation string list.
:param data: a number, string, list or dict to render to a structural representation.
:param key: is a callable that is used to sort dict keys. It is used in sort: `keys.sort(key=key)`.
:return: a list of string.
Render a data to a multi-line structural(yaml-like) representation.
a = {
1: 3,
'x': {1:4, 2:5},
'l': [1, 2, 3],
}
for l in struct_repr(a):
print l
"""
# Output:
# 1 : 3
# l : - 1
# - 2
# - 3
# x : 1 : 4
# 2 : 5
if type(data) in listtype:
if len(data) == 0:
return ['[]']
max_width = 0
elt_lines = []
for elt in data:
sublines = struct_repr(elt)
sublines_max_width = max([len(x) for x in sublines])
if max_width < sublines_max_width:
max_width = sublines_max_width
elt_lines.append(sublines)
lines = []
for sublines in elt_lines:
# - subline[0]
# subline[1]
# ...
lines.append('- ' + sublines[0].ljust(max_width))
for l in sublines[1:]:
lines.append(' ' + l.ljust(max_width))
return lines
elif type(data) == dict:
if len(data) == 0:
return ['{}']
max_k_width = 0
max_v_width = 0
kvs = []
for k, v in data.items():
k = utf8str(k)
sublines = struct_repr(v)
sublines_max_width = max([len(x) for x in sublines])
if max_k_width < len(k):
max_k_width = len(k)
if max_v_width < sublines_max_width:
max_v_width = sublines_max_width
kvs.append((k, sublines))
kvs.sort(key=key)
lines = []
for k, sublines in kvs:
# foo : sub-0
# sub-1
# b : sub-0
# sub-0
lines.append(k.rjust(max_k_width) + ' : ' +
sublines[0].ljust(max_v_width))
for l in sublines[1:]:
lines.append(' '.rjust(max_k_width) +
' ' + l.ljust(max_v_width))
return lines
else:
data = filter_invisible_chars(data)
return [utf8str(data)]
def filter_invisible_chars(data):
"""
Filters invisible characters in a string or a unicode object
:param data: a string or unicode object to filter invisible characters
:return: a filtered string or unicode object
"""
# from pykit.strutil import filter_invisible_chars
# cases = [
# "1273883926293937729\000\001\031",
# "\x00\x01\x02\x03\x04\005",
# u"1122299299299299292",
# u"\x00\x01\x02\x03\x04\005",
# ]
#
# rst = []
# for case in cases:
# rst.append(strutil.filter_invisible_chars(case))
#
# for r in rst:
# print(r)
# '1273883926293937729'
# ''
# u'1122299299299299292'
# u''
if type(data) not in (bytes, str):
return data
return invisible_chars_re.sub('', data)
def format_line(items, sep=' ', aligns=''):
"""
It formats a list in a multi row manner.
It is compatible with colored string such as those created with `strutil.blue("blue-text")`.
:param items: elements in a line.
Each element could be a `string` or a `list` of `string`.
If it is a `list` of `string`, it would be rendered as a multi-row
element.
:param sep: specifies the separator between each element in a line.
By default it is a single space `" "`.
:param aligns: specifies alignment for each element.
- `l` for left-align.
- `r` for right-align.
If no alignment specified for i-th element, it will be aligned to right by default.
:return: formatted string.
format a line with multi-row columns.
"""
# items = [ 'name:',
# [ 'John',
# 'j is my nick'],
# [ 'age:' ],
# [ 26, ],
# [ 'experience:' ],
# [ '2000 THU',
# '2006 sina',
# '2010 other'
# ],
# ]
# format_line(items, sep=' | ', aligns = 'llllll')
#
# outputs:
# name: | John | age: | 26 | experience: | 2000 THU
# | j is my nick | | | | 2006 sina
# | | | | | 2010 other
aligns = [x for x in aligns] + [''] * len(items)
aligns = aligns[:len(items)]
aligns = ['r' if x == 'r' else x for x in aligns]
items = [(x if type(x) in listtype else [x])
for x in items]
items = [[_to_str(y)
for y in x]
for x in items]
maxHeight = max([len(x) for x in items] + [0])
widths = [max_width(x) for x in items]
items = [(x + [''] * maxHeight)[:maxHeight]
for x in items]
lines = []
for i in range(maxHeight):
line = []
for j in range(len(items)):
width = widths[j]
elt = items[j][i]
actualWidth = elt.__len__()
elt = utf8str(elt)
if actualWidth < width:
padding = ' ' * (width - actualWidth)
if aligns[j] == 'l':
elt = elt + padding
else:
elt = padding + elt
line.append(elt)
line = sep.join(line)
lines.append(line)
return "\n".join(lines)
def format_table(rows,
keys=None,
colors=None,
sep=' | ',
row_sep=None):
"""
Render a list of data into a table.
Number of rows is `len(rows)`.
Number of columns is `len(rows[0])`.
:param rows: list of items to render.
Element of list can be number, string, list or dict.
:param keys: specifies indexes(for list) or keys(for dict) to render.
It is a list.
Indexes or keys those are not in this list will not be rendered.
It can also be used to specify customized column headers, if element in
list is a 2-element tuple or list:
:param colors: specifies the color for each column.
It is a list of color values in number or color name strings.
If length of `colors` is smaller than the number of columns(the number of
indexes of a list, or keys of a dict), the colors are repeated for columns
after.
:param sep: specifies char to separate rows.
By default it is None, it means do not add line separator.
:param row_sep: specifies column separator char.
By default it is `" | "`.
:return: a list of string.
"""
keys, column_headers = _get_key_and_headers(keys, rows)
colors = _get_colors(colors, len(keys))
# element of lns is a mulit-column line
# lns = [
# # line 1
# [
# # column 1 of line 1
# ['name:', # row 1 of column 1 of line 1
# 'foo', # row 2 of column 1 of line 1
# ],
#
# # column 2 of line 1
# ['school:',
# 'foo',
# 'bar',
# ],
# ],
# ]
# headers
lns = [
[[a + ': ']
for a in column_headers]
]
for row in rows:
if row_sep is not None:
lns.append([[None] for k in keys])
if type(row) == dict:
ln = [struct_repr(row.get(k, ''))
for k in keys]
elif type(row) in listtype:
ln = [struct_repr(row[int(k)])
if len(row) > int(k) else ''
for k in keys]
else:
ln = [struct_repr(row)]
lns.append(ln)
max_widths = [get_max_width(cols) for cols in zip(*lns)]
rows = []
for row in lns:
ln = []
for i in range(len(max_widths)):
color = colors[i]
w = max_widths[i]
ln.append([k3color.Str(x.ljust(w), color)
if x is not None else row_sep * w
for x in row[i]])
rows.append(format_line(ln, sep=sep))
return rows
def tokenize(line, sep=None, quote='"\'', preserve=False):
"""
:param line: the line to tokenize.
:param sep: is None or a non-empty string separator to tokenize with.
If sep is None, runs of consecutive whitespace are regarded as a single
separator, and the result will contain no empty strings at the start or end
if the string has leading or trailing whitespace. Consequently, splitting
an empty string or a string consisting of just whitespace with a None
separator returns `[]`. Just like `str.split(None)`.
By default, `sep` is None.
:param quote:Every character in `quote` is regarded as a quote. Add a `\` prefix to make
an exception. Segment between the same quotes is preserved.
By default, `quote` is `'"\''`.
:param preserve: preserve the quote itself if `preserve` is `True`.
By default, `preserve` is `False`.
:return: a list of string.
"""
if sep == quote:
raise ValueError('diffrent sep and quote is required')
if sep is None:
if len(line) == 0:
return []
line = line.strip()
rst = ['']
n = len(line)
i = 0
while i < n:
quote_s, quote_e, escape = _findquote(line[i:], quote)
if len(escape) > 0:
lines = []
x = 0
for e in escape:
lines.append(line[x:i + e])
x = i + e + 1
lines.append(line[x:])
line = ''.join(lines)
n = len(line)
if quote_s < 0:
sub = n
else:
sub = i + quote_s
if i < sub:
sub_rst = line[i:sub].split(sep)
if sep is None:
if line[sub - 1] in string.whitespace:
sub_rst.append('')
if line[i] in string.whitespace:
sub_rst.insert(0, '')
head = rst.pop()
sub_rst[0] = head + sub_rst[0]
rst += sub_rst
if quote_s < 0:
break
# discard incomplete
# 'a b"c' -> ['a']
if quote_e < 0:
rst.pop()
break
head = rst.pop()
if preserve:
head += line[i + quote_s:i + quote_e + 1]
else:
head += line[i + quote_s + 1:i + quote_e]
rst.append(head)
i += quote_e + 1
return rst
def page(lines, max_lines=10, control_char=True, pager=('less',)):
"""
Display `lines` of string in console, with a pager program (`less`) if too many
lines.
It could be used in a interactive tool to display large content.
It output strings directly to stdout.
:param lines: is `list` of lines to display.
:param max_lines: specifies the max lines not to use a pager.
By default it is 10 lines.
:param control_char: specifies if to interpret controlling chars, such as color char in terminal.
:param pager: specifies the program as a pager.
It is a list of command and argument.
By default it is `('less',)`.
:return: Nothing
"""
if len(lines) > max_lines:
pp = {'stdin': subprocess.PIPE,
'stdout': None,
'stderr': None}
cmd_pager = list(pager)
if control_char:
if pager == ('less',):
cmd_pager += ['-r']
subproc = subprocess.Popen(cmd_pager,
close_fds=True,
cwd='./',
**pp)
try:
out, err = subproc.communicate(bytes('\n'.join(lines).encode("utf-8")))
except IOError as e:
if e[0] == errno.EPIPE:
pass
else:
raise
subproc.wait()
else:
os.write(1, bytes(('\n'.join(lines) + "\n").encode("utf-8")))
| 26.084967 | 103 | 0.5057 |
e92ba6f82fbd7b5de0f238a51cd87521f2ccd146 | 16,920 | py | Python | camera.py | Euclideon/udSDKPython | a82157ab6382fda6291bdcca9ec2a51203b95b2a | [
"MIT"
] | 4 | 2020-09-03T05:35:15.000Z | 2021-11-08T04:31:55.000Z | camera.py | Euclideon/udSDKPython | a82157ab6382fda6291bdcca9ec2a51203b95b2a | [
"MIT"
] | 1 | 2020-08-18T06:49:21.000Z | 2020-08-18T06:49:21.000Z | camera.py | Euclideon/udSDKPython | a82157ab6382fda6291bdcca9ec2a51203b95b2a | [
"MIT"
] | 1 | 2020-09-11T07:52:32.000Z | 2020-09-11T07:52:32.000Z | import logging
import math
import numpy as np
import pyglet
import udSDK
logger = logging.getLogger(__name__)
def get_view_vertices(self):
"""
Returns
-------
the extents of the viewing volume projected onto 2d space
"""
#TODO make this correctly display the location of near and far plane
rat = np.tan(self.FOV/2/180*np.pi)/self.farPlane
nearLeft = [-self.nearPlane * rat, self.nearPlane/self.farPlane]
farLeft = [-self.farPlane * rat, self.farPlane/self.farPlane]
nearRight = [self.nearPlane * rat, self.nearPlane/self.farPlane]
farRight = [self.farPlane * rat, self.farPlane/self.farPlane]
return [farLeft, nearLeft, nearRight, farRight]
def on_key_press(self, symbol, modifiers):
"""
Defined for passing key presses not mapped using the key bindings in the view port
override subclasses
Parameters
----------
symbol
modifiers
Returns
-------
"""
pass
def rotate_polar(self, vec, dtheta, dphi):
"""
takes change in polar coordiantes and updates the camera rotation
based on it
Returns
-------
the a copy of vector vec rotated by dtheta in the xy plane and phi
"""
r = math.sqrt(vec[0]**2+vec[1]**2+vec[2]**2)
theta = math.atan2(vec[1], vec[0])
phi = math.acos(vec[2]/r)
#prevent rotation such that the vector is pointing directly up or down
thresh = 0.1
if abs(phi + dphi) < thresh or abs(phi + dphi - math.pi) < thresh:
dphi = 0
xprime = r * math.sin(phi+dphi)*math.cos(theta+dtheta)
yprime = r * math.sin(phi+dphi) * math.sin(theta + dtheta)
zprime = r * math.cos(phi+dphi)
self.phi = phi
self.theta = theta
return [xprime, yprime, zprime]
def set_rotation(self, x=0, y=-5, z=0, roll=0, pitch=0, yaw=0):
"""
Sets the camera matrix to have a rotation of yaw, pictch roll
Parameters
----------
x
y
z
roll
pitch
yaw
Returns
-------
"""
sy = math.sin(yaw)
cy = math.cos(yaw)
sp = math.sin(pitch)
cp = math.cos(pitch)
sr = math.sin(roll)
cr = math.cos(roll)
self.matrix = np.array([
[cy*cp, cy*sp*sr-sy*cr, cy*sp*cr+sy*sr, 0],
[sy*cp, sy*sp*sr+cy*cr, sy*sp*cr-cy*sr, 0],
[-sp, cp*sr, cp*cr, 0],
[x, y, z, 1]
])
self.rotationMatrix = self.matrix[:3, :3]
self._view.SetMatrix(udSDK.udRenderTargetMatrix.Camera, self.matrix.flatten())
def look_at(self, lookAtPoint=None, cameraPosition=None):
"""
faces the camera at point2, positions the camera at point1
Parameters
----------
cameraPosition: position of the camera
lookAtPoint: x, y, z tuple to face the camera towards
"""
if cameraPosition is None:
cameraPosition = self.position
else:
self.position = cameraPosition
if lookAtPoint is None:
lookAtPoint = self.lookAtTarget
if not np.array_equal(lookAtPoint, cameraPosition):
#calculate our axis of rotation based on the distance between these points
dPoint = np.array(lookAtPoint) - np.array(cameraPosition)
else:
dPoint = np.array([1, 1, 0])
self.look_direction(dPoint)
def look_direction(self, dPoint: np.array):
"""
Points the camera in the direction vector dPoint
assumes that the tangent vector has a z value of zero (i.e. no roll)
Parameters
----------
dPoint
Returns
-------
"""
tangent = [0, 0, 0]
if dPoint[1] != 0:
tangent[0] = (dPoint[0]-np.sqrt(dPoint[0]**2+4*dPoint[1]**2))/(2*dPoint[1])
elif dPoint[2]>0:
tangent[0] = 1
else:
tangent[0] = -1
tangent[1] = 1-tangent[0]**2
tangent = -np.array(tangent)
tangent = tangent / np.sqrt(tangent.dot(tangent))
forward = dPoint/np.sqrt(dPoint.dot(dPoint))
axis = np.cross(tangent, forward)
axis = axis / np.sqrt(axis.dot(axis))
self.matrix = np.array(
[
[tangent[0], tangent[1], tangent[2], 0],
[forward[0], forward[1], forward[2], 0],
[axis[0], axis[1], axis[2], 0],
[self.position[0], self.position[1], self.position[2], 1]
]
)
self.rotationAxis = axis
self.tangentVector = tangent
self.rotationMatrix = self.matrix[:3, :3]
self.facingDirection = np.array([0,1,0]).dot(self.rotationMatrix).tolist()
self._view.SetMatrix(udSDK.udRenderTargetMatrix.Camera, self.matrix.flatten())
def update_move_direction(self):
"""
updates the velocity and projection based on what keys have been pressed since the last call
"""
self.moveVelocity = [0, 0, 0]# in local coordinates
if self.shiftPressed:
self.moveSpeed = self.fastSpeed
else:
self.moveSpeed = self.normalSpeed
if self.forwardPressed:
self.moveVelocity[1] += self.moveSpeed
if self.backPressed:
self.moveVelocity[1] -= self.moveSpeed
if self.rightPressed:
self.moveVelocity[0] += self.moveSpeed
if self.leftPressed:
self.moveVelocity[0] -= self.moveSpeed
if self.upPressed:
self.moveVelocity[2] += self.moveSpeed
if self.downPressed:
self.moveVelocity[2] -= self.moveSpeed
if self.zoomInPressed:
self.zoom += 1
if self.zoomOutPressed and self.zoom>1:
self.zoom -= 1
self.mouseSensitivity = 0.1/self.zoom
self.set_projection_perspective(self.nearPlane, self.farPlane, self.zoom)
self.moveVelocity = np.array(self.moveVelocity).dot(self.rotationMatrix).tolist()
class OrthoCamera(Camera):
class MapCamera(OrthoCamera):
"""
Orthographic camera that follows a target and remains a set height above it
"""
#here we override the default control behaviour of the camera
class OrbitCamera(Camera):
"""
Movement of this camera is relative to a fixed point in space
"""
class PerspectiveCamera(OrbitCamera):
class TrackCamera(Camera):
class RecordCamera(Camera):
"""
A camera class for manual generation and replay of flythroughs of models
the user defines a set of waypoints by pressing space when the camera is positioned at
the desired locations
Pressing enter will replay the path
Backspace will delete the most recently added waypoint
"""
| 29.32409 | 176 | 0.642317 |
e930d65f391b7723982c2721df59191c1d9d3a9f | 316 | py | Python | src/menu.py | simonenicf/Escape-from-Narlilia | cd512860e74d5df81504cccbcb667fdf794c8dda | [
"MIT"
] | null | null | null | src/menu.py | simonenicf/Escape-from-Narlilia | cd512860e74d5df81504cccbcb667fdf794c8dda | [
"MIT"
] | 1 | 2020-11-03T13:41:49.000Z | 2020-11-03T14:05:22.000Z | src/menu.py | simonenicf/Escape-from-Narlilia | cd512860e74d5df81504cccbcb667fdf794c8dda | [
"MIT"
] | null | null | null | import sys
| 24.307692 | 60 | 0.487342 |
e9321bfef23bb209db9bf3ff382e024a2aab02ee | 337 | py | Python | envipyarclib/gptool/parameter/templates/ulong64array.py | envi-idl/envipyarclib | 90135652510c3d53c5f51177252c1fea2639bf22 | [
"MIT"
] | 1 | 2019-08-03T05:10:18.000Z | 2019-08-03T05:10:18.000Z | envipyarclib/gptool/parameter/templates/ulong64array.py | envi-idl/envipyarclib | 90135652510c3d53c5f51177252c1fea2639bf22 | [
"MIT"
] | null | null | null | envipyarclib/gptool/parameter/templates/ulong64array.py | envi-idl/envipyarclib | 90135652510c3d53c5f51177252c1fea2639bf22 | [
"MIT"
] | 1 | 2020-02-25T14:12:50.000Z | 2020-02-25T14:12:50.000Z | """
Defines the parameter template for the specified data type.
"""
from .basicarray import BASICARRAY
def template():
"""Factory method for this parameter template class"""
return ULONG64ARRAY('GPLong')
| 19.823529 | 63 | 0.706231 |
e9324103dd727dbfbcd73f1ba4bae58a0ea2e051 | 41,336 | py | Python | Dataflow/full_executer_wordshop.py | Smurf-maker/WordShop | ac0095ee28207f23744337d4cb35c5ca764d7e26 | [
"MIT"
] | null | null | null | Dataflow/full_executer_wordshop.py | Smurf-maker/WordShop | ac0095ee28207f23744337d4cb35c5ca764d7e26 | [
"MIT"
] | null | null | null | Dataflow/full_executer_wordshop.py | Smurf-maker/WordShop | ac0095ee28207f23744337d4cb35c5ca764d7e26 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Full Executer WordShop.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1kGSQWNtImJknauUN9L8ZRRwIzAdwbmo_
First, we load the pegasus paraphraser.
"""
# Commented out IPython magic to ensure Python compatibility.
!git clone https://github.com/google-research/pegasus
# %cd pegasus
!export PYTHONPATH=.
!pip3 install -r requirements.txt
!pip install transformers==3.5.0
import torch
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
model_name = 'tuner007/pegasus_paraphrase'
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
tokenizer = PegasusTokenizer.from_pretrained(model_name)
model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device)
import pandas as pd
import nltk
nltk.download('cmudict')
nltk.download('wordnet')
"""Next, we import the procrustean alliteration paraphraser"""
from enum import Enum
from nltk import RegexpTokenizer
from nltk.corpus import cmudict
from nltk.corpus import wordnet
from nltk.wsd import lesk
import heapq
"""We define three methods for use in the final integrated function"""
"""Next, we define the sentence curator"""
!pip install fuzzywuzzy[speedup]
# Load clustering and Levenshtein distance
import scipy.cluster.hierarchy as h
from fuzzywuzzy import fuzz
import scipy.spatial.distance as d
# Load word2vec prerequisites for correlation distance between words
!wget -P /root/input/ -c "https://s3.amazonaws.com/dl4j-distribution/GoogleNews-vectors-negative300.bin.gz"
!pip3 install gensim
from gensim.models import KeyedVectors
vecmod = KeyedVectors.load_word2vec_format('/root/input/GoogleNews-vectors-negative300.bin.gz', binary=True)
# Load Google's pre-trained Word2Vec model.
import numpy as np
from scipy import spatial
from nltk.tokenize import word_tokenize
from nltk import download
download('stopwords')
download('punkt')
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
index2word_set = set(vecmod.wv.index2word)
"""Next, we integrate the Pegasus sample generator, the Procrustean paraphraser and the paraphrase curator."""
x = "the cat jumped over the moon and ate the planet jupyter"
paraphrase(x,0.4,0.15,2,25,rhyming)
x = "the cat jumped over the moon and ate the planet jupyter"
paraphrase(X,0.3,0.5,5,20,assonance)
x = "the cat jumped over the moon and landed on planet jupyter"
proportion = 0.4
sensitivity = 0.3
max_output = 3
sample_size = 20
paraphrase(x,proportion,sensitivity,max_output,sample_size,alliteration)
"""Lastly, we test the algorithm on a sample of eight excerpts from classic works.
We have 3 methods and we set the max output to 3. Thus we have a maximum of 72 possible sentences to evaluate.
"""
Dickens = "My meaning simply is, that whatever I have tried to do in life, I have tried with all my heart to do well"
Twain = "Persons attempting to find a motive in this narrative will be prosecuted; persons attempting to find a moral in it will be banished; persons attempting to find a plot in it will be shot."
Forster = "Most of life is so dull that there is nothing to be said about it, and the books and talk that would describe it as interesting are obliged to exaggerate, in the hope of justifying their own existence."
Grahame = "The Mole was a good listener, and Toad, with no one to check his statements or to criticize in an unfriendly spirit, rather let himself go."
Joyce = "A certain pride, a certain awe, withheld him from offering to God even one prayer at night, though he knew it was in Gods power to take away his life while he slept and hurl his soul hellward ere he could beg for mercy."
London = "When, on the still cold nights, he pointed his nose at a star and howled long and wolf-like, it was his ancestors, dead and dust, pointing nose at star and howling down through the centuries and through him."
Fitzgerald = "And so with the sunshine and the great bursts of leaves growing on the trees, just as things grow in fast movies, I had that familiar conviction that life was beginning over again with the summer."
Eliot = "For years after Lydgate remembered the impression produced in him by this involuntary appealthis cry from soul to soul."
test_set = [Dickens,Twain,Forster,Grahame,Joyce,London,Fitzgerald,Eliot]
Dickens_alliteration = paraphrase(Dickens,0.3,0.3,3,30,alliteration)
print(Dickens)
for sentence in Dickens_alliteration:
print(sentence)
Twain_alliteration = paraphrase(Twain,0.2,0.2,3,30,alliteration)
print(Twain)
for sentence in Twain_alliteration:
print(sentence)
Forster_alliteration = paraphrase(Forster,0.2,0.6,3,30,alliteration)
print(Forster)
for sentence in Forster_alliteration:
print(sentence)
Grahame_alliteration = paraphrase(Grahame,0.3,0.3,3,30,alliteration)
print(Grahame)
for sentence in Grahame_alliteration:
print(sentence)
Joyce_alliteration = paraphrase(Joyce,0.2,0.3,3,30,alliteration)
print(Joyce)
for sentence in Joyce_alliteration:
print(sentence)
London_alliteration = paraphrase(London,0.3,0.3,3,30,alliteration)
print(London)
for sentence in London_alliteration:
print(sentence)
Fitzgerald_alliteration = paraphrase(Fitzgerald,0.2,0.3,3,20,alliteration)
print(Fitzgerald)
for sentence in Fitzgerald_alliteration:
print(sentence)
Eliot_alliteration = paraphrase(Eliot,0.2,0.5,3,30,alliteration)
print(Eliot)
for sentence in Eliot_alliteration:
print(sentence)
Dickens_rhyming = paraphrase(Dickens,0.4,0.5,3,30,rhyming)
print(Dickens)
for sentence in Dickens_rhyming:
print(sentence)
Twain_rhyming = paraphrase(Twain,0.3,0.5,3,30,rhyming)
print(Twain)
for sentence in Twain_rhyming:
print(sentence)
Forster_rhyming = paraphrase(Forster,0.3,0.3,3,30,rhyming)
print(Forster)
for sentence in Forster_rhyming:
print(sentence)
Grahame_rhyming = paraphrase(Grahame,0.3,0.25,3,30,rhyming)
for sentence in Grahame_rhyming:
print(sentence)
Joyce_rhyming = paraphrase(Joyce,0.33,0.5,3,30,rhyming)
print(Joyce)
for sentence in Joyce_rhyming:
print(sentence)
London_rhyming = paraphrase(London,0.3,0.3,3,30,rhyming)
print(London)
for sentence in London_rhyming:
print(sentence)
Fitzgerald_rhyming = paraphrase(Fitzgerald,0.3,0.3,3,30,rhyming)
for sentence in Fitzgerald_rhyming:
print(sentence)
Eliot_rhyming = paraphrase(Eliot,0.3,0.3,3,30,rhyming)
print(Eliot)
for sentence in Eliot_rhyming:
print(sentence)
Dickens_assonance = paraphrase(Dickens,0.1,0.15,3,30,assonance)
for sentence in Dickens_assonance:
print(sentence)
Twain_assonance = paraphrase(Twain,0.2,0.1,3,30,assonance)
for sentence in Twain_assonance:
print(sentence)
Forster_assonance = paraphrase(Forster,0.1,0.1,3,30,assonance)
for sentence in Forster_assonance:
print(sentence)
Grahame_assonance = paraphrase(Grahame,0.2,0.1,3,30,assonance)
for sentence in Grahame_assonance:
print(sentence)
Joyce_assonance = paraphrase(Joyce,0.2,0.1,3,30,assonance)
for sentence in Joyce_assonance:
print(sentence)
London_assonance = paraphrase(London,0.2,0.1,3,30,assonance)
for sentence in London_assonance:
print(sentence)
Fitzgerald_assonance = paraphrase(Fitzgerald,0.2,0.1,3,30,assonance)
for sentence in Fitzgerald_assonance:
print(sentence)
Eliot_assonance = paraphrase(Eliot,0.2,0.1,3,30,assonance)
for sentence in Eliot_assonance:
print(sentence) | 42.483042 | 230 | 0.682287 |
e932fb4ec343373146508adfa905b3c8915cb66b | 4,831 | py | Python | train.py | ppujol76/-Pere_Transformers | e267bcc6559c998accaed647cacbff253031f8b0 | [
"MIT"
] | null | null | null | train.py | ppujol76/-Pere_Transformers | e267bcc6559c998accaed647cacbff253031f8b0 | [
"MIT"
] | null | null | null | train.py | ppujol76/-Pere_Transformers | e267bcc6559c998accaed647cacbff253031f8b0 | [
"MIT"
] | 1 | 2021-06-21T08:40:18.000Z | 2021-06-21T08:40:18.000Z | import torch
import os
from model.visualization import Visualization
from panel.main import tensorboard_panel
from torch.utils.data.dataset import Subset
import random
import numpy as np
def split_subsets(dataset,train_percentage=0.8,all_captions=True):
"""
Performs the split of the dataset into Train and Test
"""
if all_captions==True:
# Get a list of all indexes in the dataset and convert to a numpy array
all_indexes = np.array([*range(0,len(dataset))])
# Reshape the array so we can shuffle indexes in chunks of 5
all_indexes_mat = all_indexes.reshape(-1,5)
np.random.shuffle(all_indexes_mat)
all_indexes_shuffled = all_indexes_mat.flatten()
# Get the number of images for train and the rest are for test
num_train_imgs = int(len(all_indexes_shuffled)/5*train_percentage)
# Create the subsets for train and test
train_split = Subset(dataset,all_indexes_shuffled[0:num_train_imgs*5].tolist())
test_split = Subset(dataset,all_indexes_shuffled[num_train_imgs*5:].tolist())
else:
all_first_index = [*range(0,len(dataset),5)]
random.shuffle(all_first_index)
num_train_imgs = int(len(all_first_index)*train_percentage)
train_split = Subset(dataset,all_first_index[0:num_train_imgs])
test_split = Subset(dataset,all_first_index[num_train_imgs:])
return train_split,test_split
def train_single_epoch(epoch, model, train_loader, optimizer, criterion, device,scheduler):
"""
Train single epoch
"""
model.train()
for i, batch in enumerate(iter(train_loader)):
# Si volem entrenar noms amb un batch
# if i==0:
# batch1 = batch
# img, target = batch1
img, target = batch
img, target = img.to(device), target.to(device)
optimizer.zero_grad()
output = model(img, target)
output = output.permute(1,2,0)
loss = criterion(output[:,:,:-1], target[:,1:]) # target[:,1:])
print(i, loss.item())
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=0.25)
optimizer.step()
# Aixo s per fer servir el scheduer Exponential, que s'ha de fer estep cada cop que vulguis abaixar la gamma.
# if (i+1)%10 == 0:
# scheduler.step()
# print(optimizer.param_groups[0]['lr'])
candidate_corpus = [model.vocab.generate_caption(torch.argmax(output[0].transpose(1, 0), dim=-1))]
reference_corpus = [model.vocab.generate_caption(target[0, 1:])]
bleu = 0
# bleu = bleu_score(candidate_corpus, reference_corpus)
print('--------------------------------------------------------------------------------------------------')
print('--------------------------------------------------------------------------------------------------')
print(f'Epoch {epoch} batch: {i} loss: {loss.item()}')
print('--------------------------------------------------------------------------------------------------')
print(candidate_corpus[0])
print(reference_corpus[0])
print('--------------------------------------------------------------------------------------------------')
# Ho comento per qu em dona un error de cuda
# write_on_tensorboard(i+(epoch*len(train_loader)),loss.item(),bleu,img[0],reference_corpus,candidate_corpus)
def save_model(model, epoch):
"""
Function to save current model
"""
filename = os.path.join('model','checkpoints','Epoch_'+str(epoch)+'_model_state.pth')
model_state = {
'epoch':epoch,
'model':model.state_dict()
}
torch.save(model_state, filename)
def train(num_epochs, model, train_loader,test_loader, optimizer, criterion, device,log_interval,vocab,scheduler):
"""
Executes model training. Saves model to a file every 5 epoch.
"""
for epoch in range(1,num_epochs+1):
train_single_epoch(epoch, model, train_loader,optimizer, criterion, device, scheduler)
scheduler.step()
if epoch % 5 == 0:
save_model(model, epoch)
| 35.262774 | 114 | 0.673981 |
e933799d41eabf2ce3d0578ad558fcf9ab8d220d | 2,251 | py | Python | views/probabilidade.py | pxcx/ambar-backend | 350baabb492e4fbc1002ea851d1cef4fc999b81a | [
"MIT"
] | null | null | null | views/probabilidade.py | pxcx/ambar-backend | 350baabb492e4fbc1002ea851d1cef4fc999b81a | [
"MIT"
] | null | null | null | views/probabilidade.py | pxcx/ambar-backend | 350baabb492e4fbc1002ea851d1cef4fc999b81a | [
"MIT"
] | null | null | null | from flask import jsonify
from sqlalchemy import func
from datetime import datetime, date
from models.previsao import Previsao, db | 39.491228 | 97 | 0.52821 |
e937f0e5ec885071b7daceb7fa5456d999a1e95f | 293 | py | Python | scripts/makeNegativesList.py | jccaicedo/localization-agent | d280acf355307b74e68dca9ec80ab293f0d18642 | [
"MIT"
] | 8 | 2016-11-20T19:43:45.000Z | 2020-12-09T04:58:05.000Z | scripts/makeNegativesList.py | jccaicedo/localization-agent | d280acf355307b74e68dca9ec80ab293f0d18642 | [
"MIT"
] | 45 | 2015-05-04T20:41:05.000Z | 2017-07-17T12:04:13.000Z | scripts/makeNegativesList.py | jccaicedo/localization-agent | d280acf355307b74e68dca9ec80ab293f0d18642 | [
"MIT"
] | 9 | 2016-11-20T19:43:46.000Z | 2020-09-01T21:01:54.000Z | import sys,os
import utils as cu
params = cu.loadParams('fullList positivesList output')
full = [x for x in open(params['fullList'])]
positives = [x for x in open(params['positivesList'])]
out = open(params['output'],'w')
for r in full:
if r not in positives:
out.write(r)
out.close()
| 22.538462 | 55 | 0.692833 |
e93a77efc359563f0911c10f45a8c7e3f5ed8fd4 | 1,354 | py | Python | tests/test_model.py | alexdawn/rollinghub | 6043c12520d7e0b0596f28c166616c1014e1f870 | [
"MIT"
] | null | null | null | tests/test_model.py | alexdawn/rollinghub | 6043c12520d7e0b0596f28c166616c1014e1f870 | [
"MIT"
] | 11 | 2019-08-18T21:37:28.000Z | 2022-03-21T22:17:37.000Z | tests/test_model.py | alexdawn/rollinghub | 6043c12520d7e0b0596f28c166616c1014e1f870 | [
"MIT"
] | null | null | null | import pytest
from rollinghub.db import get_db
| 27.08 | 72 | 0.656573 |
e93be486b0635edc83619c16da55bfa370ed7c0e | 19,672 | py | Python | openpype/hosts/unreal/plugins/load/load_camera.py | Tilix4/OpenPype | 8909bd890170880aa7ec8b673abaa25a9bdf40f2 | [
"MIT"
] | 1 | 2022-02-08T15:40:41.000Z | 2022-02-08T15:40:41.000Z | openpype/hosts/unreal/plugins/load/load_camera.py | zafrs/OpenPype | 4b8e7e1ed002fc55b31307efdea70b0feaed474f | [
"MIT"
] | null | null | null | openpype/hosts/unreal/plugins/load/load_camera.py | zafrs/OpenPype | 4b8e7e1ed002fc55b31307efdea70b0feaed474f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Load camera from FBX."""
from pathlib import Path
import unreal
from unreal import EditorAssetLibrary
from unreal import EditorLevelLibrary
from unreal import EditorLevelUtils
from openpype.pipeline import (
AVALON_CONTAINER_ID,
legacy_io,
)
from openpype.hosts.unreal.api import plugin
from openpype.hosts.unreal.api import pipeline as unreal_pipeline
| 35.509025 | 79 | 0.573861 |
e93d157cf7aab5c1bcb7bfeee8e1f4209c714ad6 | 2,862 | py | Python | recommander-lib/src/main.py | armendu/recommander-system | e2d13838237584cc5cc4de2f4ea2d63f9f3b8889 | [
"MIT"
] | 1 | 2021-04-29T04:15:13.000Z | 2021-04-29T04:15:13.000Z | recommander-lib/src/main.py | armendu/recommander-system | e2d13838237584cc5cc4de2f4ea2d63f9f3b8889 | [
"MIT"
] | null | null | null | recommander-lib/src/main.py | armendu/recommander-system | e2d13838237584cc5cc4de2f4ea2d63f9f3b8889 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Main application file
"""
__author__ = "Armend Ukehaxhaj"
__version__ = "1.0.0"
__license__ = "MIT"
from logzero import logger
import numpy as np
import pandas as pd
import csv
import pickle
from word2vec import word2vec
from preprocessor import preprocessor
import json
primary_data_filename = "input/GoTrainedData.txt"
sample_data_filename = "input/Sample.txt"
amazon_sample = "input/amazon_co-ecommerce_sample.csv"
if __name__ == "__main__":
main()
| 27.519231 | 74 | 0.615653 |
e93d7534c6c036af381481b03aad9004f87feec7 | 27 | py | Python | tests/models/__init__.py | Stevenjin8/song2vec | 908de06881d0598dcd2869d89709a2d50654a7fe | [
"MIT"
] | null | null | null | tests/models/__init__.py | Stevenjin8/song2vec | 908de06881d0598dcd2869d89709a2d50654a7fe | [
"MIT"
] | null | null | null | tests/models/__init__.py | Stevenjin8/song2vec | 908de06881d0598dcd2869d89709a2d50654a7fe | [
"MIT"
] | null | null | null | """Tests for ML models."""
| 13.5 | 26 | 0.592593 |
e93dd26357433b7e319a7cf157df9046ce5be7e6 | 2,378 | py | Python | spark_auto_mapper/data_types/datetime.py | gagan-chawla/SparkAutoMapper | 7b0aca2e4bece42b3229550f3f2fcc9607f79437 | [
"Apache-2.0"
] | null | null | null | spark_auto_mapper/data_types/datetime.py | gagan-chawla/SparkAutoMapper | 7b0aca2e4bece42b3229550f3f2fcc9607f79437 | [
"Apache-2.0"
] | null | null | null | spark_auto_mapper/data_types/datetime.py | gagan-chawla/SparkAutoMapper | 7b0aca2e4bece42b3229550f3f2fcc9607f79437 | [
"Apache-2.0"
] | null | null | null | from typing import Optional, List
from pyspark.sql import Column, DataFrame
from pyspark.sql.functions import coalesce, to_timestamp
from spark_auto_mapper.data_types.column import AutoMapperDataTypeColumn
from spark_auto_mapper.data_types.data_type_base import AutoMapperDataTypeBase
from spark_auto_mapper.data_types.literal import AutoMapperDataTypeLiteral
from spark_auto_mapper.helpers.value_parser import AutoMapperValueParser
from spark_auto_mapper.type_definitions.defined_types import AutoMapperDateInputType
| 38.983607 | 109 | 0.670311 |
e93e7a9f148352765158065775751a4ec95c81cf | 1,425 | py | Python | backend/registry/migrations/0002_auto_20220105_1336.py | mrmap-community/MrMap | 5dc05b7a5339b967047cd207755718f670a1d7cd | [
"MIT"
] | 10 | 2021-03-12T17:46:38.000Z | 2022-03-11T10:59:01.000Z | backend/registry/migrations/0002_auto_20220105_1336.py | mrmap-community/mrmap | 5dc05b7a5339b967047cd207755718f670a1d7cd | [
"MIT"
] | 214 | 2021-03-10T19:24:17.000Z | 2022-03-15T07:34:24.000Z | backend/registry/migrations/0002_auto_20220105_1336.py | mrmap-community/MrMap | 5dc05b7a5339b967047cd207755718f670a1d7cd | [
"MIT"
] | 9 | 2021-03-16T19:47:54.000Z | 2022-03-11T11:01:22.000Z | # Generated by Django 3.2.9 on 2022-01-05 12:36
from django.db import migrations, models
import django.db.models.manager
| 36.538462 | 162 | 0.65193 |
e93e898e14d862c8186e0e63f6ce2ac5ff75423c | 15,524 | py | Python | relah.py | ttwj/ReLah | 8231636d4698001dc615848096a97ebd78ae2713 | [
"WTFPL"
] | 3 | 2020-01-31T08:22:49.000Z | 2021-01-10T20:02:37.000Z | relah.py | ttwj/ReLah | 8231636d4698001dc615848096a97ebd78ae2713 | [
"WTFPL"
] | null | null | null | relah.py | ttwj/ReLah | 8231636d4698001dc615848096a97ebd78ae2713 | [
"WTFPL"
] | null | null | null | # Python implementation of DBS PayLah!
# By ttwj - 2017
import base64
import random
import string
#remember to install pycryptodome!
import datetime
from Crypto.Cipher import AES, PKCS1_v1_5
from Crypto.PublicKey import RSA
import lxml.etree, json
from lxml import html
from pprint import pprint
from io import StringIO
import requests
import re
import time
import warnings
import requests
import contextlib
from api.models import PayLahAPISource
http_proxy = "http://localhost:8888"
https_proxy = "https://localhost:8888"
app_ver = '4.0.0'
proxyDict = {
"http": http_proxy,
'https': https_proxy
}
try:
from functools import partialmethod
except ImportError:
# Python 2 fallback: https://gist.github.com/carymrobbins/8940382
from functools import partial
from Crypto.Cipher import AES
from Crypto import Random
#paylah_api_source = PayLahAPISource.objects.get(pk=1)
#txn = DBSPayLahTransaction(paylah_api_source)
#txn.get_transaction_history()
| 33.67462 | 987 | 0.626578 |
e93fa44d8c8e89fa596f6f1e1b5862803b660a31 | 13,090 | py | Python | st_dashboard.py | amirtaghavy/TDI-capstone-RedditTalks-vs-MarketAction | 62d6b754348ed7ae5d5ef4bd31eb2553a76c8892 | [
"MIT"
] | null | null | null | st_dashboard.py | amirtaghavy/TDI-capstone-RedditTalks-vs-MarketAction | 62d6b754348ed7ae5d5ef4bd31eb2553a76c8892 | [
"MIT"
] | null | null | null | st_dashboard.py | amirtaghavy/TDI-capstone-RedditTalks-vs-MarketAction | 62d6b754348ed7ae5d5ef4bd31eb2553a76c8892 | [
"MIT"
] | null | null | null | import streamlit as st
import dill
import pandas as pd
import plotly.express as px
from datetime import date
import statsmodels
with open('compiled-sentiment-history.pkd', 'rb') as f:
df_compiled = dill.load(f)
df_compiled.drop_duplicates(inplace=True)
dates = list({idx[1] for idx in df_compiled.index})
dates = sorted(dates, key=lambda dt: (str(dt).split('-')))
# date_ = '2021-06-01'
st.title('The Data Incubator Capstone Project')
st.subheader('*Title*: **Wallstreetbets Gossip vs. Market Price Action**')
st.subheader('*Created by*: Amir A. Taghavey - Summer, 2021')
st.markdown('*Email*: a [dot] taghavey @ gmail [dot] com')
''' '''
st.markdown(
'This App was developed as main deliverable of thecapstone project requirement of [**the Data Incubator**](https://www.thedataincubator.com/) fellowship program.')
st.sidebar.title('Options Dashboard:')
page = st.sidebar.selectbox('Select field:',
(
'Synopsis',
'App structure',
'VIZ: Reddit hot_10 vs. time',
'VIZ: Gossip vs. Action',
'ML analysis summary',
'Acknowledgments')
, 0)
if page == 'Synopsis':
st.markdown(
'''
**Background**: The short-squeeze of GameStop and AMC stocks in early 2021 was impacted in great part by the massive-scale coordinated action of the subreddit ***wallstreetbets*** ants army of retail investors.
Many of the early ants realized remarkable gains on their investment enabling them to payoff their student loans or home mortgages at the demise of a few hedge funds such as the London-based White Square Capital.
These events motivated new swarms of retail investors to join in the movement with their hard-earned savings, and for many this game has offered its ugly face!
**Objective**: Motivated by the story above, this project aimed at finding an objective answer to one question: ***Is safety in being a part of the herd when it comes to navigating the US Stock Market?***
**Methods**: To achieve this, I (i) scanned popular social media platforms to identify and characterize how the retail investors percieved the market performance for the most frequently talked about stocks on New York Stock Exchange before each trading session and (ii) compiled the actual market action data at the end of each trading session on a daily basis over the time period of 6/1/2021-9/1/2021, and performed an extensive amount of analysis to extract possible underlying correlartions.
**Summary**: NO correlation (and hence NO basis for meaningful predictions) was found betweem the market price action and any of the prior (i) PRE-market gossip / sentiment, (ii) stock price action, or (iii) stock options activity from the previous trading session.
**Conclusion**: Moral of the story, objectively and in a nutshell, is that ***No evidence was found to support ANY consistent forward temporal correlation bwteen market gossip and price action!***
'''
)
elif page == 'App structure':
st.markdown(
'''
App Structure:
\n
A. *reddit's PRE-market hot_20* (9:00 AM ET), the 20 most talked about NYSE stocks are identified
B. recent posts from *stocktwits* and *twitter* APIs for the hot_20 list of the day are compiled
C. vader sentiment intensity analyzer is implemented to extract investor sentiment from compiled text
D. price action data are collected from *yahoo_fin* API at the close of market (4:00 PM ET)
E. investor sentiment - market performance data are analyzed, modeled, and visualized
''')
img = 'CodeStructure.png'
st.image(img, clamp=True,
caption='Schematic of the logical code structure and inter-connections between modules \
(i) compiling market talk data from social media platforms, \
(ii) performing sentiment intensity analysis, \
(iii) gathering financial data, and \
(iv) conducting data analytics on compiled market gossip - price action data.')
elif page == 'ML analysis summary':
st.subheader('**Machine Learning Correlation Analysis**')
st.markdown('''
\n
***Summary:*** An extensive correlation analysis study of the compiled data was conducted
with the *objective* to find underlying forward temporal correlations (if any) between
(a) post-market price action and (b.1) pre-market sentiment nalysis data, (b.2) pre-market
stock options activity data (e.g., contract volume, change in open interest, change in percent ITM / OTM, etc.),
and/or (b.3) previous trading session post-market price action data for reddit's hot stock list.
\n
***Approach***: Target (i.e. lable) was to predict the change in stock price, $$\Delta$$P.
Price change was defined as price quote at market close less price quote at market open normalized to
price quote at market open for a given ticker on reddit hot list. Two types of approaches were implemented
to model $$\Delta$$P: **A. Regressive Approach**, and **B. Binary Classification Approach**.
In the latter approach, price action signal was reduced to upward / downward trends.
\n
***Transformations***: All quantitative features were scaled using standard scaler, and dimensionality
reduction was carried out using TrauncatedSVD method.
\n
***Modeling***: Cross validation score was used to compare modeling performance of the tested models.
Model comparisons among regressors and classifiers were done separately using $$r^{2}$$ and accuracy
metrics, respectively.
\n
Models implemented include:
\n
| Model | Regression | Classification |
| :--- | :--------: | :------------: |
| Linear Regression | | |
| Logistic Regression | | |
| Ridge with cross-validation | | |
| Decision Tree | | |
| Random Forest | | |
| K-Nearest-Neighbors | | |
| Support Vector Machine | | |
| Multi-layer Perceptron Network | | |
\n
.
\n
***Results***: All regressors returned an $$r^{2}$$-value equal to zero (0) consistent with no detectable correlation
between any of (i) sentiment, (ii) stock options, or (iii) previous-day stock data and the response
variable (i.e. $$\Delta$$P). This was further corroborated with the slighly higher than the null-model
classification accuracy score yielded by the KNN classifier of 0.54 (versus 0.53 classification
accuracy corresponding to the null hypothesis).
The modeling results could extract no correlation between (signal) price action data for the
reddit hotlist and the sentiment extracted from the market talks, option activities or prior
trading-session data.
''')
elif page == 'Acknowledgments':
st.markdown('''
- Reddit hotlist sentiment intensity analysis in this project was done by implementing an exising
[reddit-sentiment_analyis](https://github.com/asad70/reddit-sentiment-analysis) github repository
developed by [**asad70**](https://github.com/asad70). It was modified to expend search scope
to additional financial sub-reddits, provide human-guided training to Vader Sentiment Intensity
Analyzer, and to fit the required i/o structure of this project.
- I would like to thank and acknowledge Dr. [Robert Schroll](robert@thedataincubator.com),
my instructor and TDI capstone project advisor, for the instrumental feedback I received from him
during the design, development and execution of this project.
''')
elif page == 'VIZ: Gossip vs. Action':
trendline_on = st.sidebar.checkbox('add linear trendline:', False)
date_idx = st.sidebar.slider('Select date index:',
min_value=0,
max_value=len(dates)-1,
value=0)
date_ = dates[date_idx]
df = df_compiled.loc[(slice(None), date_),:]
df.sort_values('counts', ascending=False, inplace=True)
df.reset_index(inplace=True)
# plt = sentiment_visualizer_date(c_df,'2021-06-01')
plt=px.scatter(df,
x='bull_bear_ratio',
y='change_sn',
color='neutral',
size='counts', #text='ticker',
size_max=20,
color_continuous_scale=px.colors.sequential.BuPu_r,
hover_data=['ticker', 'volume'],
labels={'bull_bear_ratio': 'Investor Bullishness [-]',
'change_sn': 'Price Change [-]'},
trendline='ols' if trendline_on else None,
title=f"As of {date.strftime(date_, r'%B, %d %Y')}:"
)
plt.update_layout(plot_bgcolor='white', # #ceced0
title_font={'size':16, 'family':'Arial Black'},
yaxis={'showgrid':False, 'zeroline':False, 'linecolor': 'black',
'zerolinecolor': 'grey', 'tickfont':{'size':12},
'titlefont':{'size':14, 'family':'Arial Black'},
'range':[-0.2,0.2]},
xaxis={'showgrid':False, 'zeroline':False, 'linecolor': 'black',
'tickfont':{'size':12}, 'titlefont':{'size':14, 'family':'Arial Black'},
'range':[.75,1.75]},
height=600, width=700, #'ylorrd'
coloraxis_colorbar={'title':"Neutrality",
'tickvals': [0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] ,
'tick0': 0.4,
# 'cmin':0.5,
# 'cmax': 1.0,
#'tickvals':[5,6,7,8,9], 'ticktext': ['0.1M', '1M', '10M', '100M', '1B']
},
hovermode="x unified"
)
plt.update_traces(textposition='top center',
textfont={'size':10, 'color':'grey'},
marker={'line':{'color':'#ceced0'}},
#hovertemplate=None,
)
st.plotly_chart(plt, use_container_width=True)
st.subheader('Sentiment')
st.dataframe(df[['ticker', 'bearish', 'bullish',
'neutral', 'bull_bear_ratio',
'change_sn', 'volume']])
elif page == 'VIZ: Reddit hot_10 vs. time':
st.subheader('All-time (since the Memorial Day weekend!) HOT-10 stocks on Reddit:')
hot_10_inds = df_compiled.reset_index().groupby(by='ticker') \
.count()[['date']].sort_values('date', ascending=False)[:10].index
df_ = df_compiled.reset_index()
hot10_counts = df_[df_.ticker.isin(hot_10_inds)] \
.groupby('ticker') \
.sum()[['counts']] \
.reindex(hot_10_inds) \
.reset_index()
fig = px.pie(hot10_counts, values='counts', names='ticker', hole=0.3,
color_discrete_sequence=px.colors.sequential.RdBu)
fig.update_traces(textposition='inside', textinfo='percent+label')
st.plotly_chart(fig)
hot10 = [f'{i+1}. {ticker}' for i, ticker in enumerate(hot_10_inds)]
picked_hot = st.sidebar.selectbox('choose ticker to plot:', options=hot10, index=0)
picked_hot = picked_hot.split(' ')[1]
st.markdown(f'Bar chart of daily intra-session change in stock price for **${picked_hot}**:')
df = df_compiled.loc[picked_hot].drop(columns=['counts'])
plt = px.bar(df, y='change_sn', text='volume', color='bull_bear_ratio',
color_continuous_scale=px.colors.sequential.RdBu_r)
plt.update_traces(texttemplate='%{text:.2s}', textposition='outside')
plt.update_layout(uniformtext_minsize=8)
plt.update_layout(xaxis_tickangle=-45,
yaxis={'showgrid':False,
'title': 'session change [-]',
'range':[-0.1, 0.1]},
coloraxis_colorbar={'title':"Investor\nBullishness",
'tickmode': 'array',
'tickvals': [0.8, 0.9, 1, 1.1, 1.2],
'tick0': 0.8,})
st.plotly_chart(plt, use_container_width=True)
st.dataframe(df)
| 55.940171 | 504 | 0.58793 |
e940349493488e9c1525d630923a4b14c70fd2d8 | 745 | py | Python | hoofball/migrations/0002_comment.py | leo-holanda/Hoofball | ccf4399d33a6381acd2ff41efce3dbf0dca6a092 | [
"MIT"
] | 1 | 2021-07-30T10:05:43.000Z | 2021-07-30T10:05:43.000Z | hoofball/migrations/0002_comment.py | leo-holanda/Hoofball | ccf4399d33a6381acd2ff41efce3dbf0dca6a092 | [
"MIT"
] | null | null | null | hoofball/migrations/0002_comment.py | leo-holanda/Hoofball | ccf4399d33a6381acd2ff41efce3dbf0dca6a092 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-03-01 20:26
from django.db import migrations, models
import django.utils.timezone
| 29.8 | 114 | 0.587919 |
e9417a482c1a501cccd1b38f496ec064d6eb9c78 | 627 | py | Python | clock/clock/lib.py | litheblas/blasrummet-rpi | 5300e61c0d0d93fd77cd489eb02165793453b99f | [
"MIT"
] | null | null | null | clock/clock/lib.py | litheblas/blasrummet-rpi | 5300e61c0d0d93fd77cd489eb02165793453b99f | [
"MIT"
] | null | null | null | clock/clock/lib.py | litheblas/blasrummet-rpi | 5300e61c0d0d93fd77cd489eb02165793453b99f | [
"MIT"
] | null | null | null | from __future__ import absolute_import, unicode_literals
import logging
from time import sleep
from datetime import datetime
from .settings import PULSE_DURATION
from .io import pull, release
logger = logging.getLogger(__name__)
def sleep_until_next_minute():
"""
Sleeps until the time is xx:xx:00 with good enough accuracy (the error is
usually in the order of 10-100 milliseconds).
"""
now = datetime.now().time()
sleep(60.0 - now.second - now.microsecond/1000000.0)
| 24.115385 | 77 | 0.717703 |
e9425f8008305f97cda9a9d9e3075c0d79dde033 | 38 | py | Python | CH_03_pythonic_syntax/T_17_mypy.py | mastering-python/code_2 | 441af8b67402c8216c482cca7c002e1d7f0f1baa | [
"MIT"
] | null | null | null | CH_03_pythonic_syntax/T_17_mypy.py | mastering-python/code_2 | 441af8b67402c8216c482cca7c002e1d7f0f1baa | [
"MIT"
] | null | null | null | CH_03_pythonic_syntax/T_17_mypy.py | mastering-python/code_2 | 441af8b67402c8216c482cca7c002e1d7f0f1baa | [
"MIT"
] | null | null | null | some_number: int
some_number = 'test'
| 12.666667 | 20 | 0.763158 |
e9432f57bd7e613914b0ff79424dc9823a1f7a75 | 2,513 | py | Python | crowdgezwitscher/users/migrations/0004_auto_20170402_1358.py | Strassengezwitscher/Crowdgezwitscher | afdd433acb35c1a554ba79464b744975de065151 | [
"MIT"
] | 4 | 2016-07-22T07:20:31.000Z | 2016-11-13T18:13:34.000Z | crowdgezwitscher/users/migrations/0004_auto_20170402_1358.py | Strassengezwitscher/Strassengezwitscher | afdd433acb35c1a554ba79464b744975de065151 | [
"MIT"
] | 402 | 2016-04-26T08:38:17.000Z | 2022-03-11T23:26:49.000Z | crowdgezwitscher/users/migrations/0004_auto_20170402_1358.py | Strassengezwitscher/Crowdgezwitscher | afdd433acb35c1a554ba79464b744975de065151 | [
"MIT"
] | 1 | 2018-01-14T16:58:57.000Z | 2018-01-14T16:58:57.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-04-02 13:58
from django.db import migrations
def forwards_func(apps, schema_editor):
"""Grant permissions for TwitterAccount to admins and mods.
Includes creating a new view permission.
"""
# We can't import the models directly as they may be newer versions than this migration expects.
# We use the historical versions instead.
Group = apps.get_model('auth', 'Group')
Permission = apps.get_model('auth', 'Permission')
ContentType = apps.get_model('contenttypes', 'ContentType')
TwitterAccount = apps.get_model('twitter', 'TwitterAccount')
db_alias = schema_editor.connection.alias
admins = Group.objects.using(db_alias).get(name="Administratoren")
mods = Group.objects.using(db_alias).get(name="Moderatoren")
content_type = ContentType.objects.get_for_model(TwitterAccount)
for perm_type in ['add', 'change', 'delete', 'view']:
permission, _ = Permission.objects.get_or_create(
codename='%s_twitteraccount' % perm_type,
name='Can %s twitter account' % perm_type,
content_type=content_type,
)
admins.permissions.add(permission)
mods.permissions.add(permission)
def reverse_func(apps, schema_editor):
"""Removes permissions for TwitterAccount from admins and mods.
Includes deleting (in addition to unassigning) the view permission.
"""
Group = apps.get_model('auth', 'Group')
Permission = apps.get_model('auth', 'Permission')
ContentType = apps.get_model('contenttypes', 'ContentType')
TwitterAccount = apps.get_model('twitter', 'TwitterAccount')
db_alias = schema_editor.connection.alias
admins = Group.objects.using(db_alias).get(name="Administratoren")
mods = Group.objects.using(db_alias).get(name="Moderatoren")
content_type = ContentType.objects.get_for_model(TwitterAccount)
for perm_type in ['add', 'change', 'delete', 'view']:
permission = Permission.objects.get(codename='%s_twitteraccount' % perm_type)
admins.permissions.remove(permission)
mods.permissions.remove(permission)
Permission.objects.using(db_alias).filter(content_type=content_type, codename__contains='view').delete()
| 36.955882 | 108 | 0.702746 |
e945ff9db15d3f14ca3c606adc1612355944457e | 909 | py | Python | gallery/03_sp/plot_wavelets.py | RandallBalestriero/TheanoXLA | d8778c2eb3254b478cef4f45d934bf921e695619 | [
"Apache-2.0"
] | 67 | 2020-02-21T21:26:46.000Z | 2020-06-14T14:25:42.000Z | gallery/03_sp/plot_wavelets.py | RandallBalestriero/TheanoXLA | d8778c2eb3254b478cef4f45d934bf921e695619 | [
"Apache-2.0"
] | 8 | 2020-02-22T14:45:56.000Z | 2020-06-07T16:56:47.000Z | gallery/03_sp/plot_wavelets.py | RandallBalestriero/TheanoXLA | d8778c2eb3254b478cef4f45d934bf921e695619 | [
"Apache-2.0"
] | 4 | 2020-02-21T17:34:46.000Z | 2020-05-30T08:30:14.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# flake8: noqa
"""
Morlet Wavelet in time and Fourier domain
=========================================
This example shows how to generate a wavelet filter-bank.
"""
import symjax
import symjax.tensor as T
import matplotlib.pyplot as plt
import numpy as np
J = 5
Q = 4
scales = T.power(2, T.linspace(0.1, J - 1, J * Q))
scales = scales[:, None]
wavelet = symjax.tensor.signal.complex_morlet(5 * scales, np.pi / scales)
waveletw = symjax.tensor.signal.fourier_complex_morlet(
5 * scales, np.pi / scales, wavelet.shape[-1]
)
f = symjax.function(outputs=[wavelet, waveletw])
wavelet, waveletw = f()
plt.subplot(121)
for i in range(J * Q):
plt.plot(2 * i + wavelet[i].real, c="b")
plt.plot(2 * i + wavelet[i].imag, c="r")
plt.subplot(122)
for i in range(J * Q):
plt.plot(i + waveletw[i].real, c="b")
plt.plot(i + waveletw[i].imag, c="r")
| 22.170732 | 73 | 0.628163 |
3a5d52f7066df721bcc6a4454c0e49f976cabd83 | 39 | py | Python | kfdata/__main__.py | kylef-archive/KFData.py | 685d58255c9f8518834e395d94d3b75d3dd3eceb | [
"BSD-3-Clause"
] | 1 | 2015-11-08T13:23:39.000Z | 2015-11-08T13:23:39.000Z | kfdata/__main__.py | kylef/KFData.py | 685d58255c9f8518834e395d94d3b75d3dd3eceb | [
"BSD-3-Clause"
] | null | null | null | kfdata/__main__.py | kylef/KFData.py | 685d58255c9f8518834e395d94d3b75d3dd3eceb | [
"BSD-3-Clause"
] | null | null | null | from kfdata.manage import main
main()
| 9.75 | 30 | 0.769231 |
3a5f1a224d28494c27bf5124ac4e6a08b36bb55e | 240 | py | Python | com/sujoym/basic/sys.py | sujoym/python-examples | ee49a6aeb50749611341b2850587b30c38b34509 | [
"Apache-2.0"
] | null | null | null | com/sujoym/basic/sys.py | sujoym/python-examples | ee49a6aeb50749611341b2850587b30c38b34509 | [
"Apache-2.0"
] | null | null | null | com/sujoym/basic/sys.py | sujoym/python-examples | ee49a6aeb50749611341b2850587b30c38b34509 | [
"Apache-2.0"
] | null | null | null | import sys
#sys.stderr.write('Stderr text\n');
#sys.stderr.flush()
#sys.stdout.write('Stdout text\n');
#print(sys.argv[1])
#if len(sys.argv)>1:
# print(float(sys.argv[1])*5);
main(sys.argv[1]);
| 17.142857 | 35 | 0.641667 |
3a5f95c4dd3189822a688ab6608502a352c54b4b | 167 | py | Python | slackforms/handlers/__init__.py | Albatrous/django-slack-forms | baee37942085bf2f9e35beb9a4a4aa767b319b35 | [
"MIT"
] | 1 | 2019-06-20T00:11:58.000Z | 2019-06-20T00:11:58.000Z | slackforms/handlers/__init__.py | Albatrous/django-slack-forms | baee37942085bf2f9e35beb9a4a4aa767b319b35 | [
"MIT"
] | 3 | 2020-02-11T23:46:14.000Z | 2021-06-10T21:10:37.000Z | slackforms/handlers/__init__.py | Albatrous/django-slack-forms | baee37942085bf2f9e35beb9a4a4aa767b319b35 | [
"MIT"
] | 3 | 2019-12-13T06:53:18.000Z | 2021-06-04T07:12:56.000Z | # flake8: noqa
from .form import FormHandler
from .slash import SlashHandler
from .manual import ManualHandler
from .interactions import ActionHandler, MessageHandler
| 27.833333 | 55 | 0.838323 |
3a61c5a4f7f2b0b08f169681bdd4f9538e9142c6 | 13,902 | py | Python | RocMethod.py | meiyuanqing/MetaThreshold | fbccc7e5356606b929211eedaf5371506232c1b5 | [
"MIT"
] | null | null | null | RocMethod.py | meiyuanqing/MetaThreshold | fbccc7e5356606b929211eedaf5371506232c1b5 | [
"MIT"
] | null | null | null | RocMethod.py | meiyuanqing/MetaThreshold | fbccc7e5356606b929211eedaf5371506232c1b5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding:utf-8
"""
Author : Yuanqing Mei
Date : 2021/4/8
Time: 15:42
File: RocMethod.py
HomePage : http://github.com/yuanqingmei
Email : dg1533019@smail.nju.edu.cn
This script find out the cutoff of a metric value by maximizing the AUC value and ROCBPPMFMGM methods.
References
[1] Bender, R. Quantitative risk assessment in epidemiological studies investigating threshold effects.
Biometrical Journal, 41 (1999), 305-319.VARLSEP310
[2] Zhou, Y., et al. "An in-depth study of the potentially confounding effect of class size in fault prediction."
ACM Trans. Softw. Eng. Methodol. (2014) 23(1): 1-51. (BPPMFM(F1))
[3] Shatnawi, R. (2018). Identifying Threshold Values of Change-Prone Modules.
(sum(Sensitivity+Specificity)=sum(TPR+TNR))
"""
import time
if __name__ == '__main__':
s_time = time.time()
roc_threshold()
e_time = time.time()
execution_time = e_time - s_time
print("The __name__ is ", __name__, ". This is end of RocMethod.py!\n",
"The execution time of Bender.py script is ", execution_time)
| 47.447099 | 120 | 0.535319 |
3a6257117bb5b39b2295a1e04bfebd82a73a4f27 | 1,372 | py | Python | app/models.py | jonathankamau/note-taking-app | f7315dbde34d1a06aa10bc0548fc85770fa4c142 | [
"MIT"
] | 1 | 2020-07-18T16:35:42.000Z | 2020-07-18T16:35:42.000Z | app/models.py | jonathankamau/note-taking-app | f7315dbde34d1a06aa10bc0548fc85770fa4c142 | [
"MIT"
] | 32 | 2020-07-19T18:19:24.000Z | 2021-06-04T23:45:34.000Z | app/models.py | jonathankamau/note-taking-app | f7315dbde34d1a06aa10bc0548fc85770fa4c142 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
| 29.826087 | 78 | 0.715015 |
3a63a86305fa3e3ced908249d69f673dd8d16d58 | 717 | py | Python | migrations/versions/2018-09-27_12:25:31__3cbc86a0a9d7.py | gems-uff/sms | 01cfa84bd467617c58f58da04711c5097dd93fe6 | [
"MIT"
] | null | null | null | migrations/versions/2018-09-27_12:25:31__3cbc86a0a9d7.py | gems-uff/sms | 01cfa84bd467617c58f58da04711c5097dd93fe6 | [
"MIT"
] | null | null | null | migrations/versions/2018-09-27_12:25:31__3cbc86a0a9d7.py | gems-uff/sms | 01cfa84bd467617c58f58da04711c5097dd93fe6 | [
"MIT"
] | null | null | null | """empty message
Revision ID: 3cbc86a0a9d7
Revises: 77894fcde804
Create Date: 2018-09-27 12:25:31.893545
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3cbc86a0a9d7'
down_revision = '77894fcde804'
branch_labels = None
depends_on = None
| 24.724138 | 106 | 0.707113 |
3a640b59523119016904d7053ed1bc557df19331 | 2,685 | py | Python | mp_roguelike/ai.py | nonk123/mp_roguelike | 48785b44dd3f2518a5a639a6609670408e7ea1f5 | [
"MIT"
] | null | null | null | mp_roguelike/ai.py | nonk123/mp_roguelike | 48785b44dd3f2518a5a639a6609670408e7ea1f5 | [
"MIT"
] | null | null | null | mp_roguelike/ai.py | nonk123/mp_roguelike | 48785b44dd3f2518a5a639a6609670408e7ea1f5 | [
"MIT"
] | null | null | null | import random
from .util import sign
| 26.584158 | 79 | 0.582495 |
3a640e6170ae4b45fbad29d7cf0c3f5b49ab9f01 | 83 | py | Python | mach_cad/model_obj/materials/__init__.py | Severson-Group/MachEval | dbb7999188133f8744636da53cab475ae538ce80 | [
"BSD-3-Clause"
] | 6 | 2021-11-02T20:12:32.000Z | 2021-11-13T10:50:35.000Z | mach_cad/model_obj/materials/__init__.py | Severson-Group/MachEval | dbb7999188133f8744636da53cab475ae538ce80 | [
"BSD-3-Clause"
] | 18 | 2021-11-29T20:14:55.000Z | 2022-03-02T07:17:37.000Z | mach_cad/model_obj/materials/__init__.py | Severson-Group/MachEval | dbb7999188133f8744636da53cab475ae538ce80 | [
"BSD-3-Clause"
] | 1 | 2022-01-29T00:52:38.000Z | 2022-01-29T00:52:38.000Z |
from .material_generic import *
__all__ = []
__all__ += material_generic.__all__ | 13.833333 | 35 | 0.759036 |
3a6432af138a6ef234f6f37cbbda6934b7bb3c37 | 7,657 | py | Python | hpc-historias-clinicas/settings.py | btenaglia/hpc-historias-clinicas | 649d8660381381b1c591667760c122d73071d5ec | [
"BSD-3-Clause"
] | null | null | null | hpc-historias-clinicas/settings.py | btenaglia/hpc-historias-clinicas | 649d8660381381b1c591667760c122d73071d5ec | [
"BSD-3-Clause"
] | null | null | null | hpc-historias-clinicas/settings.py | btenaglia/hpc-historias-clinicas | 649d8660381381b1c591667760c122d73071d5ec | [
"BSD-3-Clause"
] | null | null | null | import os
PROJECT_ROOT = os.path.dirname(__file__)
# DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Buenos_Aires'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'es-es'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static_ files should be collected to.
# Don't put anything in this directory yourself; store your static_ files
# in apps' "static_/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static_/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
# URL prefix for static_ files.
# Example: "http://media.lawrence.com/static_/"
STATIC_URL = '/static/'
# Additional locations of static_ files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'staticfiles'),
)
# List of finder classes that know how to find static_ files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'e89^m%8%qx)qfj^m8@*=pp9wyg=sujhy*z9xty4f^x)tzq7_&m'
# MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'djangosecure.middleware.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'pagination.middleware.PaginationMiddleware',
)
# END MIDDLEWARE CONFIGURATION
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
#'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# END TEMPLATE CONFIGURATION
ROOT_URLCONF = 'hpc-historias-clinicas.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'hpc-historias-clinicas.wsgi.application'
# APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.flatpages',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'pagination', # paginacion
'easy_pdf',
'easy_thumbnails',
'djrill',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'hpc-historias-clinicas.users', # Usuarios
'hpc-historias-clinicas.medicos', # Medicos
'hpc-historias-clinicas.ayudantes', # Ayudantes
'hpc-historias-clinicas.pacientes', # Pacientes
'hpc-historias-clinicas.anamnesis', # Anamnesis
'hpc-historias-clinicas.antecedentes_familiares', # Antecedentes Familiares
'hpc-historias-clinicas.antecedentes_personales', # Antecedentes Personales
'hpc-historias-clinicas.diagnosticos', # Diagnosticos
'hpc-historias-clinicas.habitos', # Habitos
'hpc-historias-clinicas.aparatos', # Aparatos
'hpc-historias-clinicas.examen_fisico', # Examen Fisico
'hpc-historias-clinicas.planteos', # PLanteos diagnosticos
'hpc-historias-clinicas.metodologias', # Metodologias de estudio y tratamiento
'hpc-historias-clinicas.historias', # Historias clinicas
'hpc-historias-clinicas.inter_consultas', # Inter Consultas
'hpc-historias-clinicas.evoluciones', # Evoluciones
'hpc-historias-clinicas.fojas_quirurgicas', # Fojas quirurgicas
'hpc-historias-clinicas.procedimientos_quirurgicos', # Procedimientos quirurgicos
'hpc-historias-clinicas.epicrisis', # Epicrisis
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# END APP CONFIGURATION
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# END Custom user app defaults
APPEND_SLASH = False
THUMBNAIL_BASEDIR = 'thumbs'
# --------------------------
# IMPORT LOCAL SETTINGS
# --------------------------
try:
from settings_local import *
except ImportError:
pass
| 38.094527 | 87 | 0.750947 |
3a66f861ec173370f50a0b31924da0bccb5e1872 | 2,661 | py | Python | romanyh/transposition.py | napulen/romanyh | 34bc75d40bf532eb20607db763fcbc2693cac35f | [
"BSD-3-Clause"
] | null | null | null | romanyh/transposition.py | napulen/romanyh | 34bc75d40bf532eb20607db763fcbc2693cac35f | [
"BSD-3-Clause"
] | 5 | 2020-12-08T04:37:21.000Z | 2021-01-06T03:36:30.000Z | romanyh/transposition.py | napulen/romanyh | 34bc75d40bf532eb20607db763fcbc2693cac35f | [
"BSD-3-Clause"
] | null | null | null | import re
import sys
from music21.interval import Interval
from music21.key import Key
def findKeysInRomanTextString(rntxt):
"""Get all the keys in a RomanText string.
Receive a string with valid RomanText content.
Output a list of all the key changes that happen
throughout the content.
"""
return re.findall(r" ([a-gA-G][#b]?): ", rntxt)
def transposeKeys(keys, newTonic):
"""Transpose a list of keys relative to a new tonic."""
referenceKey = Key(keys[0])
newTonicKey = Key(newTonic, mode=referenceKey.mode)
intervalDiff = Interval(referenceKey.tonic, newTonicKey.tonic)
transposedKeys = [newTonicKey.tonicPitchNameWithCase]
for k in keys[1:]:
localKey = Key(k)
newLocalTonic = localKey.tonic.transpose(intervalDiff)
newLocalKey = Key(newLocalTonic, mode=localKey.mode)
if abs(newLocalKey.sharps) >= 7:
newLocalKey = Key(
newLocalTonic.getEnharmonic(), mode=localKey.mode
)
transposedKeys.append(newLocalKey.tonicPitchNameWithCase)
transposedKeys = [k.replace("-", "b") for k in transposedKeys]
return transposedKeys
def transposeRomanText(f, newTonic="C"):
"""Transposes a RomanText file into a different key.
The transposition is performed in the following way:
- The first key in the file is taken as the reference key
- An interval between the reference key and new tonic is computed
- Every transposed key respects that interval, unless it becomes
or exceeds a key signature with 7 sharps or 7 flats
- In that case, the enharmonic spelling is preferred
The mode of the original key is always respected. That is,
attempting to transpose an annotation in the key of C Major
with a newTonic of `a` will result in a transposition to
A Major. Change of mode is not trivial and it is not addressed
in this code.
"""
with open(f) as fd:
rntxt = fd.read()
keys = findKeysInRomanTextString(rntxt)
transposedKeys = transposeKeys(keys, newTonic)
keysString = [f" {k}: " for k in keys]
transposedKeysString = [f" {k}: " for k in transposedKeys]
transposedRntxt = ""
for original, transposed in zip(keysString, transposedKeysString):
solved, replace, remainder = rntxt.partition(original)
transposedRntxt += solved + transposed
rntxt = remainder
transposedRntxt += rntxt
return transposedRntxt
if __name__ == "__main__":
inputFile = sys.argv[1]
newTonic = sys.argv[2] if len(sys.argv) == 3 else "C"
transposedRntxt = transposeRomanText(inputFile, newTonic)
print(transposedRntxt)
| 36.452055 | 70 | 0.693724 |
3a67795832eb29853a6ccb60a0d65c013b0a8f82 | 4,847 | py | Python | management_api_app/tests_ma/test_service_bus/test_deployment_status_update.py | LizaShak/AzureTRE | b845eb4b73439ef7819565aaadb36f43b6484ad9 | [
"MIT"
] | 2 | 2021-11-14T16:57:16.000Z | 2022-03-13T15:14:26.000Z | management_api_app/tests_ma/test_service_bus/test_deployment_status_update.py | anatbal/AzureTRE | d1d4891657c737092e761c4aaf80b04ff0f03fc7 | [
"MIT"
] | null | null | null | management_api_app/tests_ma/test_service_bus/test_deployment_status_update.py | anatbal/AzureTRE | d1d4891657c737092e761c4aaf80b04ff0f03fc7 | [
"MIT"
] | null | null | null | import json
import pytest
import uuid
from mock import AsyncMock, patch
from db.errors import EntityDoesNotExist
from models.domain.resource import Status
from models.domain.workspace import Workspace
from models.domain.resource import Deployment
from resources import strings
from service_bus.deployment_status_update import receive_message_and_update_deployment
pytestmark = pytest.mark.asyncio
test_data = [
'bad',
'{"good": "json", "bad": "message"}'
]
test_sb_message = {
"id": "59b5c8e7-5c42-4fcb-a7fd-294cfc27aa76",
"status": Status.Deployed,
"message": "test message"
}
| 41.784483 | 115 | 0.810192 |
3a67c5e3bdd0bcd555184047e8d52728b4026b70 | 1,384 | py | Python | fms-python/face_core.py | seanarwa/firm | 774965004766c2ce59e17bb08692370280b3e95c | [
"Apache-2.0"
] | null | null | null | fms-python/face_core.py | seanarwa/firm | 774965004766c2ce59e17bb08692370280b3e95c | [
"Apache-2.0"
] | null | null | null | fms-python/face_core.py | seanarwa/firm | 774965004766c2ce59e17bb08692370280b3e95c | [
"Apache-2.0"
] | null | null | null | import os
import time
import logging as log
import numpy as np
from scikit-learn.preprocessing import normalize
# local modules
import config | 33.756098 | 126 | 0.760838 |
3a695ae89ca40a6004f7716018ec39b583cbbbfd | 1,587 | py | Python | tests/sms/models/test_reschedule_sms_messages.py | infobip-community/infobip-api-python-sdk | 5ffc5ab877ee1748aa29391f991c8c5324387487 | [
"MIT"
] | null | null | null | tests/sms/models/test_reschedule_sms_messages.py | infobip-community/infobip-api-python-sdk | 5ffc5ab877ee1748aa29391f991c8c5324387487 | [
"MIT"
] | null | null | null | tests/sms/models/test_reschedule_sms_messages.py | infobip-community/infobip-api-python-sdk | 5ffc5ab877ee1748aa29391f991c8c5324387487 | [
"MIT"
] | null | null | null | from datetime import date, datetime, timedelta
import pytest
from pydantic.error_wrappers import ValidationError
from infobip_channels.sms.models.body.reschedule_sms_messages import (
RescheduleSMSMessagesMessageBody,
)
from infobip_channels.sms.models.query_parameters.reschedule_messages import (
RescheduleSMSMessagesQueryParameters,
)
| 27.842105 | 83 | 0.672968 |
3a69ee7bd76a61928b4ca3a0383eeeac9e541646 | 8,349 | py | Python | python/mxnet/device.py | t-triobox/incubator-mxnet | 93aa9e33fcb2f216179c691ed9461bc96e37ae70 | [
"Apache-2.0"
] | 1 | 2021-11-09T01:40:17.000Z | 2021-11-09T01:40:17.000Z | python/mxnet/device.py | t-triobox/incubator-mxnet | 93aa9e33fcb2f216179c691ed9461bc96e37ae70 | [
"Apache-2.0"
] | null | null | null | python/mxnet/device.py | t-triobox/incubator-mxnet | 93aa9e33fcb2f216179c691ed9461bc96e37ae70 | [
"Apache-2.0"
] | 1 | 2018-07-19T00:43:30.000Z | 2018-07-19T00:43:30.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Device management API of mxnet."""
import contextvars
import ctypes
from .base import _LIB
from .base import check_call
def cpu(device_id=0):
"""Returns a CPU device.
This function is a short cut for ``Device('cpu', device_id)``.
For most operations, when no device is specified, the default device is `cpu()`.
Examples
----------
>>> with mx.cpu():
... cpu_array = mx.np.ones((2, 3))
>>> cpu_array.device
cpu(0)
>>> cpu_array = mx.np.ones((2, 3), ctx=mx.cpu())
>>> cpu_array.device
cpu(0)
Parameters
----------
device_id : int, optional
The device id of the device. `device_id` is not needed for CPU.
This is included to make interface compatible with GPU.
Returns
-------
device : Device
The corresponding CPU device.
"""
return Device('cpu', device_id)
def cpu_pinned(device_id=0):
"""Returns a CPU pinned memory device. Copying from CPU pinned memory to GPU
is faster than from normal CPU memory.
This function is a short cut for ``Device('cpu_pinned', device_id)``.
Examples
----------
>>> with mx.cpu_pinned():
... cpu_array = mx.np.ones((2, 3))
>>> cpu_array.device
cpu_pinned(0)
>>> cpu_array = mx.np.ones((2, 3), ctx=mx.cpu_pinned())
>>> cpu_array.device
cpu_pinned(0)
Parameters
----------
device_id : int, optional
The device id of the device. `device_id` is not needed for CPU.
This is included to make interface compatible with GPU.
Returns
-------
device : Device
The corresponding CPU pinned memory device.
"""
return Device('cpu_pinned', device_id)
def gpu(device_id=0):
"""Returns a GPU device.
This function is a short cut for Device('gpu', device_id).
The K GPUs on a node are typically numbered as 0,...,K-1.
Examples
----------
>>> cpu_array = mx.np.ones((2, 3))
>>> cpu_array.device
cpu(0)
>>> with mx.gpu(1):
... gpu_array = mx.np.ones((2, 3))
>>> gpu_array.device
gpu(1)
>>> gpu_array = mx.np.ones((2, 3), ctx=mx.gpu(1))
>>> gpu_array.device
gpu(1)
Parameters
----------
device_id : int, optional
The device id of the device, needed for GPU.
Returns
-------
device : Device
The corresponding GPU device.
"""
return Device('gpu', device_id)
def num_gpus():
"""Query CUDA for the number of GPUs present.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
count : int
The number of GPUs.
"""
count = ctypes.c_int()
check_call(_LIB.MXGetGPUCount(ctypes.byref(count)))
return count.value
def gpu_memory_info(device_id=0):
"""Query CUDA for the free and total bytes of GPU global memory.
Parameters
----------
device_id : int, optional
The device id of the GPU device.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
(free, total) : (int, int)
"""
free = ctypes.c_uint64()
total = ctypes.c_uint64()
dev_id = ctypes.c_int(device_id)
check_call(_LIB.MXGetGPUMemoryInformation64(dev_id, ctypes.byref(free), ctypes.byref(total)))
return (free.value, total.value)
_current = contextvars.ContextVar('namemanager', default=Device('cpu', 0))
def current_device():
"""Returns the current device.
By default, `mx.cpu()` is used for all the computations
and it can be overridden by using `with mx.Device(x)` statement where
x can be cpu(device_id) or gpu(device_id).
Examples
-------
>>> mx.current_device()
cpu(0)
>>> with mx.Device('gpu', 1): # Device changed in `with` block.
... mx.current_device() # Computation done here will be on gpu(1).
...
gpu(1)
>>> mx.current_device() # Back to default device.
cpu(0)
Returns
-------
default_device : Device
"""
return _current.get()
| 28.016779 | 108 | 0.618517 |
3a6a4945d24f523a66e8dd1cc3a18e4d3749558b | 5,578 | py | Python | _pkg_KuFunc/mod_SetLabel.py | tianlunjiang/_NukeStudio_v2 | 5ed9b9217aff16d903bdcda5c2f1e1cd3bebe367 | [
"CNRI-Python"
] | 6 | 2019-08-27T01:30:15.000Z | 2020-11-17T00:40:01.000Z | _pkg_KuFunc/mod_SetLabel.py | tianlunjiang/_NukeMods | 47861bfc273262abba55b9f9a61782a5d89479b1 | [
"CNRI-Python"
] | 2 | 2019-01-22T04:09:28.000Z | 2019-01-23T15:11:39.000Z | _pkg_KuFunc/mod_SetLabel.py | tianlunjiang/_NukeMods | 47861bfc273262abba55b9f9a61782a5d89479b1 | [
"CNRI-Python"
] | 1 | 2020-08-03T22:43:23.000Z | 2020-08-03T22:43:23.000Z |
# ------------------------------------------------------------------------------
# Module Import
# ------------------------------------------------------------------------------
import nuke, nukescripts
import platform
from Qt import QtWidgets, QtGui, QtCore
#------------------------------------------------------------------------------
#-Header
#------------------------------------------------------------------------------
__VERSION__ = '2.0'
__OS__ = platform.system()
__AUTHOR__ = "Tianlun Jiang"
__WEBSITE__ = "jiangovfx.com"
__COPYRIGHT__ = "copyright (c) %s - %s" % (__AUTHOR__, __WEBSITE__)
__TITLE__ = "SetLabel v%s" % __VERSION__
# ------------------------------------------------------------------------------
# Global Variables
# ------------------------------------------------------------------------------
KNOB_IGNORE = set(['layer', 'invert_mask', 'help',
'dope_sheet', 'hide_input', 'xpos',
'crop', 'channels', 'note_font_color',
'onCreate', 'quality', 'updateUI',
'knobChanged', 'note_font', 'tile_color',
'bookmark', 'selected', 'autolabel',
'process_mask', 'label', 'onDestroy',
'inject', 'indicators', 'icon',
'channel', 'maskFrom', 'maskChannelMask',
'enable', 'maskChannelInput', 'Mask',
'ypos', 'postage_stamp_frame', 'postage_stamp',
'lifetimeStart', 'maskChannel', 'panel',
'lifetimeEnd', 'maskFromFlag',
'name', 'cached', 'fringe',
'mask', 'note_font_size', 'filter',
'useLifetime', 'gl_color'])
KNOB_IGNORE_KEYWORDS = ['_panelDropped', 'enable', 'unpremult', 'clamp']
# ------------------------------------------------------------------------------
# Core Class
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Supporting Fucntions
# ------------------------------------------------------------------------------
def filterKnobs(knobs):
"""filter knobs for labels
@knobs: (list) list of knobs
return: (list) filtered list of knobs
"""
ls_ignored = list( set(knobs)-KNOB_IGNORE )
ls_filtered = []
for k in ls_ignored:
count = 0
for f in KNOB_IGNORE_KEYWORDS:
if f not in k: count += 1
if count == len(KNOB_IGNORE_KEYWORDS): ls_filtered.append(k)
return sorted(ls_filtered)
def get_dag():
"""For DAG context when selecting nodes"""
app = QtWidgets.QApplication
pos = QtGui.QCursor.pos()
widget = app.widgetAt(pos)
#print dir(widget)
context = widget.parent().windowTitle().split('Node Graph')[0].strip()
print(context)
return nuke.root() if context == '' else nuke.toNode(context)
# ------------------------------------------------------------------------------
# Instancing
# ------------------------------------------------------------------------------
SetLabel = Core_SetLabel()
| 25.126126 | 103 | 0.573682 |
3a6bbdcced64c871e2fdd5b7e14da08f29defe31 | 897 | py | Python | account/migrations/0003_auto_20161110_2135.py | fitahol/fitahol | ce84dc909aa98f2dc7594ef26568e015cbfe0e94 | [
"MIT"
] | 2 | 2017-02-20T14:11:30.000Z | 2017-06-11T16:10:33.000Z | account/migrations/0003_auto_20161110_2135.py | fitahol/fitahol | ce84dc909aa98f2dc7594ef26568e015cbfe0e94 | [
"MIT"
] | null | null | null | account/migrations/0003_auto_20161110_2135.py | fitahol/fitahol | ce84dc909aa98f2dc7594ef26568e015cbfe0e94 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-11-10 21:35
from __future__ import unicode_literals
from django.db import migrations, models
| 27.181818 | 102 | 0.585284 |
3a6c6afbecc178b754f00e36139090ce170c777c | 780 | py | Python | imgur_stuff.py | djs2022/DataEntrySite | aac8e71fe0a8b159113b1488cbe7a8a7e641bf1d | [
"MIT"
] | null | null | null | imgur_stuff.py | djs2022/DataEntrySite | aac8e71fe0a8b159113b1488cbe7a8a7e641bf1d | [
"MIT"
] | null | null | null | imgur_stuff.py | djs2022/DataEntrySite | aac8e71fe0a8b159113b1488cbe7a8a7e641bf1d | [
"MIT"
] | null | null | null | import requests
import os
| 27.857143 | 77 | 0.574359 |
3a6d77c44f6c1309b10cae742c418b58169828c7 | 4,489 | py | Python | roles/tox/library/tox_parse_output.py | g-chauvel/zuul-jobs | 7ea241a626f2f2e05d4aeb8cf0328d22736b1f0f | [
"Apache-2.0"
] | null | null | null | roles/tox/library/tox_parse_output.py | g-chauvel/zuul-jobs | 7ea241a626f2f2e05d4aeb8cf0328d22736b1f0f | [
"Apache-2.0"
] | null | null | null | roles/tox/library/tox_parse_output.py | g-chauvel/zuul-jobs | 7ea241a626f2f2e05d4aeb8cf0328d22736b1f0f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2018 Red Hat
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: tox_parse_output
short_description: Parses the output of tox looking for per-line comments
author: Monty Taylor (@mordred)
description:
- Looks for output from the tox command to find content that could be
returned as inline comments.
requirements:
- "python >= 3.5"
options:
tox_output:
description:
- Output from the tox command run
required: true
type: str
'''
import os
import re
from ansible.module_utils.basic import AnsibleModule
ANSI_RE = re.compile(r'(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]')
PEP8_RE = re.compile(r"^(.*):(\d+):(\d+): (.*)$")
SPHINX_RE = re.compile(r"^([^:]*):([\d]+):(\w.+)$")
matchers = [
pep8_matcher,
sphinx_matcher,
]
def extract_line_comment(line):
"""
Extracts line comment data from a line using multiple matchers.
"""
file_path = None
start_line = None
message = None
for matcher in matchers:
file_path, start_line, message = matcher(line)
if file_path:
message = ANSI_RE.sub('', message)
break
return file_path, start_line, message
if __name__ == '__main__':
main()
| 28.775641 | 74 | 0.642237 |
3a6f8d05144479257560ebcab7dfac73539e7dff | 4,199 | py | Python | rpyc/utils/authenticators.py | fruch/rpyc | 8db3fdcef2272d468aca562465279370d075be72 | [
"MIT"
] | null | null | null | rpyc/utils/authenticators.py | fruch/rpyc | 8db3fdcef2272d468aca562465279370d075be72 | [
"MIT"
] | null | null | null | rpyc/utils/authenticators.py | fruch/rpyc | 8db3fdcef2272d468aca562465279370d075be72 | [
"MIT"
] | null | null | null | """
authenticators: the server instance accepts an authenticator object,
which is basically any callable (i.e., a function) that takes the newly
connected socket and "authenticates" it.
the authenticator should return a socket-like object with its associated
credentials (a tuple), or raise AuthenticationError if it fails.
a very trivial authenticator might be
def magic_word_authenticator(sock):
if sock.recv(5) != "Ma6ik":
raise AuthenticationError("wrong magic word")
return sock, None
s = ThreadedServer(...., authenticator = magic_word_authenticator)
your authenticator can return any socket-like object. for instance, it may
authenticate the client and return a TLS/SSL-wrapped socket object that
encrypts the transport.
the credentials returned alongside with the new socket can be any object.
it will be stored in the rpyc connection configruation under the key
"credentials", and may be used later by the service logic. if no credentials
are applicable, just return None as in the example above.
rpyc includes integration with tlslite, a TLS/SSL library:
the VdbAuthenticator class authenticates clients based on username-password
pairs.
"""
import os
import sys
import anydbm
from rpyc.lib import safe_import
tlsapi = safe_import("tlslite.api")
ssl = safe_import("ssl")
def set_user(self, username, password):
self.vdb[username] = self.vdb.makeVerifier(username, password, self.BITS)
def del_user(self, username):
del self.vdb[username]
def list_users(self):
return self.vdb.keys()
def __call__(self, sock):
sock2 = tlsapi.TLSConnection(sock)
sock2.fileno = lambda fd = sock.fileno(): fd # tlslite omitted fileno
try:
sock2.handshakeServer(verifierDB = self.vdb)
except Exception:
ex = sys.exc_info()[1]
raise AuthenticationError(str(ex))
return sock2, sock2.allegedSrpUsername
| 32.550388 | 92 | 0.639676 |
3a71c85b49b297a21ae96cfc5a938c33c9b45b83 | 1,156 | py | Python | kratos/apps/configuration/migrations/0001_initial.py | cipher-ops/backend-kts | 7ea54d7f56bcb0da54b901ac8f3cfbfbb0b12319 | [
"MIT"
] | 1 | 2020-11-30T09:53:40.000Z | 2020-11-30T09:53:40.000Z | kratos/apps/configuration/migrations/0001_initial.py | cipher-ops/backend-kts | 7ea54d7f56bcb0da54b901ac8f3cfbfbb0b12319 | [
"MIT"
] | null | null | null | kratos/apps/configuration/migrations/0001_initial.py | cipher-ops/backend-kts | 7ea54d7f56bcb0da54b901ac8f3cfbfbb0b12319 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-09-21 06:44
from django.db import migrations, models
import django.db.models.deletion
| 35.030303 | 132 | 0.583045 |
3a734bc1e70aa19debbc9af66f403f4c7634a66a | 2,823 | py | Python | events/migrations/0002_auto_20180325_0035.py | eforsell/eurovisiontippning | 1a26dac0e06c5eef9a752ea6f68ad9b9567b6261 | [
"MIT"
] | null | null | null | events/migrations/0002_auto_20180325_0035.py | eforsell/eurovisiontippning | 1a26dac0e06c5eef9a752ea6f68ad9b9567b6261 | [
"MIT"
] | null | null | null | events/migrations/0002_auto_20180325_0035.py | eforsell/eurovisiontippning | 1a26dac0e06c5eef9a752ea6f68ad9b9567b6261 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.3 on 2018-03-24 23:35
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
| 34.426829 | 139 | 0.557563 |
3a737b4d0699668e68dfd11d0393dc995f8e0e88 | 574 | py | Python | python-code/transformer-sample/basic/sentiment_analysis.py | 87-midnight/NewbieInJava | ba84153c6b3a382e620c4df7892d653be2e1a607 | [
"MIT"
] | null | null | null | python-code/transformer-sample/basic/sentiment_analysis.py | 87-midnight/NewbieInJava | ba84153c6b3a382e620c4df7892d653be2e1a607 | [
"MIT"
] | 2 | 2019-10-22T08:21:09.000Z | 2019-10-22T08:21:09.000Z | python-code/transformer-sample/basic/sentiment_analysis.py | 87-midnight/NewbieInJava | ba84153c6b3a382e620c4df7892d653be2e1a607 | [
"MIT"
] | null | null | null | #
import torch
from transformers import BertTokenizer, BertForSequenceClassification
torch.manual_seed(0)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
model = BertForSequenceClassification.from_pretrained("bert-base-uncased", problem_type="multi_label_classification", num_labels=2)
inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss
outputs = model(**inputs, labels=labels)
loss = outputs.loss
logits = outputs.logits
list(logits.shape) | 41 | 131 | 0.801394 |
3a750f402f6cc67161071bf3b54785b45c55a45d | 1,293 | py | Python | examples/tutorial/parallel_amuse_script.py | rknop/amuse | 85d5bdcc29cfc87dc69d91c264101fafd6658aec | [
"Apache-2.0"
] | 131 | 2015-06-04T09:06:57.000Z | 2022-02-01T12:11:29.000Z | examples/tutorial/parallel_amuse_script.py | rknop/amuse | 85d5bdcc29cfc87dc69d91c264101fafd6658aec | [
"Apache-2.0"
] | 690 | 2015-10-17T12:18:08.000Z | 2022-03-31T16:15:58.000Z | examples/tutorial/parallel_amuse_script.py | rieder/amuse | 3ac3b6b8f922643657279ddee5c8ab3fc0440d5e | [
"Apache-2.0"
] | 102 | 2015-01-22T10:00:29.000Z | 2022-02-09T13:29:43.000Z | import time
import numpy
from amuse.lab import Huayno
from amuse.lab import Hermite
from amuse.lab import nbody_system
from amuse.lab import new_king_model
from matplotlib import pyplot
if __name__ in ('__main__'):
N = 1024
W0 = 7.0
t_end = 0.1 | nbody_system.time
bodies = new_king_model(N, W0)
bodies.scale_to_standard()
nproc= 6
proc = numpy.arange(1, nproc+1, 1)
tcpu = []
for npi in proc:
tcpu.append(gravity_minimal(bodies, t_end, npi))
pyplot.scatter(proc, tcpu)
pyplot.xlabel("n proc")
pyplot.ylabel("CPU time [s]")
pyplot.savefig("fig_parallel_performance_N1k_Hermite.pdf")
| 25.352941 | 71 | 0.664346 |
3a75e62e27fdd3a634c7ec673852b4fb62407232 | 311 | py | Python | modules/random_cat.py | ChaseBosman/chatbot | a39e655e6d586fa596471cd20617dff5f9795a96 | [
"Unlicense"
] | 3 | 2019-10-19T12:07:06.000Z | 2020-10-05T17:24:56.000Z | modules/random_cat.py | ChaseBosman/chatbot | a39e655e6d586fa596471cd20617dff5f9795a96 | [
"Unlicense"
] | 17 | 2019-10-05T12:30:17.000Z | 2021-07-25T20:06:33.000Z | modules/random_cat.py | ChaseBosman/chatbot | a39e655e6d586fa596471cd20617dff5f9795a96 | [
"Unlicense"
] | 26 | 2018-10-19T05:43:12.000Z | 2020-10-02T05:27:48.000Z | import requests
import json
| 25.916667 | 74 | 0.614148 |
3a788b9c9eab36584491247515f283acec64a519 | 407 | py | Python | Pycharm_Project/0414/2.py | duanjiefei/Python-Study | 88e17a3eab9112a2515f09b2bcf4e032059cc28b | [
"Apache-2.0"
] | null | null | null | Pycharm_Project/0414/2.py | duanjiefei/Python-Study | 88e17a3eab9112a2515f09b2bcf4e032059cc28b | [
"Apache-2.0"
] | null | null | null | Pycharm_Project/0414/2.py | duanjiefei/Python-Study | 88e17a3eab9112a2515f09b2bcf4e032059cc28b | [
"Apache-2.0"
] | null | null | null | #
import time
# test = timer(test)
# test()
test()
#@timer == test=timer(test) | 20.35 | 67 | 0.619165 |
3a794a8245df8781f9b1fc1f4f5373e8c9e7411d | 3,255 | py | Python | docs/source/conf.py | DanielWarfield1/nClusterFramework | 21b68226ab1ffa810281a261feabe1d360a5146d | [
"MIT"
] | null | null | null | docs/source/conf.py | DanielWarfield1/nClusterFramework | 21b68226ab1ffa810281a261feabe1d360a5146d | [
"MIT"
] | 2 | 2021-12-15T05:36:01.000Z | 2021-12-20T01:15:45.000Z | docs/source/conf.py | DanielWarfield1/nClusterFramework | 21b68226ab1ffa810281a261feabe1d360a5146d | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'nClusterFramework'
copyright = '2021, Daniel Warfield'
author = 'Daniel Warfield'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', # Core Sphinx library for auto html doc generation from docstrings
'sphinx.ext.autosummary', # Create neat summary tables for modules/classes/methods etc
'sphinx.ext.intersphinx', # Link to other project's documentation (see mapping below)
'sphinx.ext.viewcode', # Add a link to the Python source code for classes, functions etc.
]
autosummary_generate = True # Turn on sphinx.ext.autosummary
autoclass_content = "both" # Add __init__ doc (ie. params) to class summaries
html_show_sourcelink = False # Remove 'view source code' from top of page (for html, not python)
autodoc_inherit_docstrings = True # If no docstring, inherit from base class
set_type_checking_flag = True # Enable 'expensive' imports for sphinx_autodoc_typehints
nbsphinx_allow_errors = True # Continue through Jupyter errors
add_module_names = False # Remove namespaces from class/method signatures
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# Readthedocs theme
# on_rtd is whether on readthedocs.org, this line of code grabbed from docs.readthedocs.org...
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_css_files = ["readthedocs-custom.css"] # Override some CSS settings
# Pydata theme
#html_theme = "pydata_sphinx_theme"
#html_logo = "_static/logo-company.png"
#html_theme_options = { "show_prev_next": False}
#html_css_files = ['pydata-custom.css']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | 44.589041 | 97 | 0.710906 |
3a79851a367aea689a1293265d02727ae30bb330 | 7,877 | py | Python | cvstudio/view/widgets/common/treeview_model.py | haruiz/PytorchCvStudio | ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef | [
"MIT"
] | 32 | 2019-10-31T03:10:52.000Z | 2020-12-23T11:50:53.000Z | cvstudio/view/widgets/common/treeview_model.py | haruiz/CvStudio | ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef | [
"MIT"
] | 19 | 2019-10-31T15:06:05.000Z | 2020-06-15T02:21:55.000Z | cvstudio/view/widgets/common/treeview_model.py | haruiz/PytorchCvStudio | ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef | [
"MIT"
] | 8 | 2019-10-31T03:32:50.000Z | 2020-07-17T20:47:37.000Z | import itertools
import typing
from typing import Any
from PyQt5 import QtCore
from PyQt5.QtCore import QModelIndex, pyqtSignal, QObject
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import QAbstractItemDelegate, QWidget, QStyleOptionViewItem, QSpinBox
class CustomModelSignals(QObject):
data_changed = pyqtSignal(CustomNode, int, str, str)
class WidgetDelegate(QAbstractItemDelegate):
class CustomModel(QtCore.QAbstractItemModel):
| 30.296154 | 116 | 0.612416 |
3a7af5581758219d8326f2091b7b6047cf305d66 | 1,761 | py | Python | pyrallel/tests/test_map_reduce.py | usc-isi-i2/paralyzer | 2991e9e74f17e35d8c7d9f86d57c5c7f7a311915 | [
"MIT"
] | 13 | 2019-07-02T22:41:46.000Z | 2022-02-20T13:30:00.000Z | pyrallel/tests/test_map_reduce.py | usc-isi-i2/paralyzer | 2991e9e74f17e35d8c7d9f86d57c5c7f7a311915 | [
"MIT"
] | 14 | 2019-07-03T18:04:25.000Z | 2021-05-20T20:45:33.000Z | pyrallel/tests/test_map_reduce.py | usc-isi-i2/paralyzer | 2991e9e74f17e35d8c7d9f86d57c5c7f7a311915 | [
"MIT"
] | 3 | 2020-02-12T21:54:17.000Z | 2020-08-24T20:41:18.000Z | import multiprocessing as mp
from pyrallel.map_reduce import MapReduce
NUM_OF_PROCESSOR = max(2, int(mp.cpu_count() / 2))
| 21.216867 | 53 | 0.540034 |
3a7bafa3c7ab3354d60a1fcd0376c7ade47cb21d | 707 | py | Python | evtx_to_dataframe.py | esua/evtx_to_dataframe | 390bf470e92092e66827373ed7e8b012a4fe94f6 | [
"Apache-2.0"
] | null | null | null | evtx_to_dataframe.py | esua/evtx_to_dataframe | 390bf470e92092e66827373ed7e8b012a4fe94f6 | [
"Apache-2.0"
] | null | null | null | evtx_to_dataframe.py | esua/evtx_to_dataframe | 390bf470e92092e66827373ed7e8b012a4fe94f6 | [
"Apache-2.0"
] | null | null | null | import argparse
import Evtx.Evtx as evtx
import pandas as pd
import xmltodict
import re
parser = argparse.ArgumentParser(description="Convert Windows EVTX event log file to DataFrame.")
parser.add_argument("evtx", type=str, help="Path to the Windows EVTX event log file")
args = parser.parse_args()
with evtx.Evtx(args.evtx) as log:
data_dicts = []
for record in log.records():
elem = record.xml()
elem = re.sub(r'<Data Name="(.+)">(.+)</Data>', r'<\1>\2</\1>', elem) # Replace contents of EventData
data_dict = xmltodict.parse(elem) # convert xml to dict
data_dicts.append(data_dict)
df = pd.json_normalize(data_dicts) # convert dict to pd.DataFrame
print(df)
| 33.666667 | 110 | 0.693069 |
3a7bc36048999e619539f1b4ea6519f544722e26 | 562 | py | Python | test/sandwich.py | DynamicCai/Dynamic-Explorer | c909206b3db52f76f23499b1cb43520d3475b14e | [
"MIT"
] | null | null | null | test/sandwich.py | DynamicCai/Dynamic-Explorer | c909206b3db52f76f23499b1cb43520d3475b14e | [
"MIT"
] | null | null | null | test/sandwich.py | DynamicCai/Dynamic-Explorer | c909206b3db52f76f23499b1cb43520d3475b14e | [
"MIT"
] | null | null | null |
sandwich_meat={
1:{'chicken':3},
2:{'beef':5},
3:{'pork':4},
4:{'bacon':4},
5:{'sausage':4},
6:{'omelette':2},
7:{'none':0}
}
sandwich_sauce=['mayonnaise','ketchup','yellowmustard','blackpeppersauce','cheese','none']
sandwich_vegetable=['lettuce','slicedtomatoes','slicedpickles','potatosalad','redcabbage','none']
sandwich_extra={
1:{'extracheese':3},
2:{'extrapickles':1},
3:{'doublemeat':'X2'},
4:{'none':0}
}
ordered_dish={
'm7eat':
}
print('meat', 'sauce', 'vegetable', 'extra')
| 18.733333 | 101 | 0.565836 |
3a7c85d6a1879df3d91cd853104103d5c1ce8afa | 1,553 | py | Python | paprotka/feature/cepstral.py | michalsosn/paprotka | d6079eefbade2cb8be5896777a7d50ac968d42ec | [
"MIT"
] | 1 | 2019-10-29T04:14:40.000Z | 2019-10-29T04:14:40.000Z | paprotka/feature/cepstral.py | michalsosn/paprotka | d6079eefbade2cb8be5896777a7d50ac968d42ec | [
"MIT"
] | null | null | null | paprotka/feature/cepstral.py | michalsosn/paprotka | d6079eefbade2cb8be5896777a7d50ac968d42ec | [
"MIT"
] | null | null | null | import math
import numpy as np
from scipy import signal, fftpack
| 33.042553 | 90 | 0.675467 |
3a7d8a539d82fbecac85da845cd748fe400b1a12 | 2,688 | py | Python | arelle/plugin/unpackSecEisFile.py | DataFinnovation/Arelle | d4bf45f56fc9249f75ab22e6217dbe55f0510841 | [
"Apache-2.0"
] | 292 | 2015-01-27T03:31:51.000Z | 2022-03-26T07:00:05.000Z | arelle/plugin/unpackSecEisFile.py | DataFinnovation/Arelle | d4bf45f56fc9249f75ab22e6217dbe55f0510841 | [
"Apache-2.0"
] | 94 | 2015-04-18T23:03:00.000Z | 2022-03-28T17:24:55.000Z | arelle/plugin/unpackSecEisFile.py | DataFinnovation/Arelle | d4bf45f56fc9249f75ab22e6217dbe55f0510841 | [
"Apache-2.0"
] | 200 | 2015-01-13T03:55:47.000Z | 2022-03-29T12:38:56.000Z | '''
Unpack SEC EIS File is an example of a plug-in to the GUI menu
that will save the unpacked contents of an SEC EIS File in a directory.
(c) Copyright 2012 Mark V Systems Limited, All rights reserved.
'''
__pluginInfo__ = {
'name': 'Unpack SEC EIS File',
'version': '0.9',
'description': "This plug-in unpacks the contents of an SEC EIS file.",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2012 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'CntlrWinMain.Menu.Tools': unpackSecEisMenuEntender,
}
| 43.354839 | 134 | 0.604911 |
3a7e02c43c6ebf2859a5eb96f826707b1b0a7b33 | 2,251 | py | Python | fpcalc.py | johnlawsharrison/pyacoustid | 55321b316f09e782a1c0914826419be799908e01 | [
"MIT"
] | 203 | 2016-01-18T14:05:49.000Z | 2022-03-25T04:04:42.000Z | fpcalc.py | johnlawsharrison/pyacoustid | 55321b316f09e782a1c0914826419be799908e01 | [
"MIT"
] | 41 | 2016-03-08T10:28:14.000Z | 2021-11-26T20:53:15.000Z | fpcalc.py | johnlawsharrison/pyacoustid | 55321b316f09e782a1c0914826419be799908e01 | [
"MIT"
] | 56 | 2016-01-09T04:22:40.000Z | 2022-01-29T16:01:39.000Z | #!/usr/bin/env python
# This file is part of pyacoustid.
# Copyright 2012, Lukas Lalinsky.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Simple script for calculating audio fingerprints, using the same
arguments/output as the fpcalc utility from Chromaprint."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import argparse
import sys
import acoustid
import chromaprint
if __name__ == '__main__':
main()
| 34.106061 | 77 | 0.660595 |
3a7e4975152b719956030d04fd87b6aff71f9b39 | 203 | py | Python | app/views/dashboard/leadership/__init__.py | Wern-rm/raton.by | 68f862f2bc0551bf2327e9d6352c0cde93f45301 | [
"MIT"
] | null | null | null | app/views/dashboard/leadership/__init__.py | Wern-rm/raton.by | 68f862f2bc0551bf2327e9d6352c0cde93f45301 | [
"MIT"
] | null | null | null | app/views/dashboard/leadership/__init__.py | Wern-rm/raton.by | 68f862f2bc0551bf2327e9d6352c0cde93f45301 | [
"MIT"
] | null | null | null | from app.views.dashboard.leadership.index import leaderships
from app.views.dashboard.leadership.delete import leadership_delete
from app.views.dashboard.leadership.activation import leadership_activated | 67.666667 | 74 | 0.8867 |
3a7ec9858eb7869bba6e4129ded3a123b302b0e2 | 3,071 | py | Python | Learning/button groups.py | atharva0300/PyQt5-Practice | 0feacca6518190646a345ce2ea75e071e7861ac5 | [
"MIT"
] | null | null | null | Learning/button groups.py | atharva0300/PyQt5-Practice | 0feacca6518190646a345ce2ea75e071e7861ac5 | [
"MIT"
] | null | null | null | Learning/button groups.py | atharva0300/PyQt5-Practice | 0feacca6518190646a345ce2ea75e071e7861ac5 | [
"MIT"
] | 1 | 2021-11-16T10:18:07.000Z | 2021-11-16T10:18:07.000Z | # Button Groups in Python
import PyQt5
from PyQt5.QtWidgets import QApplication, QHBoxLayout, QLabel, QButtonGroup, QMainWindow, QDialog, QPushButton, QVBoxLayout
import sys
from PyQt5 import QtGui
from PyQt5.QtGui import QFont, QPixmap
from PyQt5.QtCore import QSize
if __name__ == "__main__":
App = QApplication(sys.argv)
window= window()
sys.exit(App.exec())
| 28.700935 | 124 | 0.581895 |
3a7f65074a8ce42ce2f4be7f8b8b5034567b834f | 20,126 | py | Python | ct-tests/lib/crus_integration_test.py | Cray-HPE/cray-crus | 6643aa3eb3debe5cbe0088f6a30b7e56ca1b1f17 | [
"MIT"
] | null | null | null | ct-tests/lib/crus_integration_test.py | Cray-HPE/cray-crus | 6643aa3eb3debe5cbe0088f6a30b7e56ca1b1f17 | [
"MIT"
] | 1 | 2022-03-02T21:06:21.000Z | 2022-03-04T17:32:14.000Z | ct-tests/lib/crus_integration_test.py | Cray-HPE/cray-crus | 6643aa3eb3debe5cbe0088f6a30b7e56ca1b1f17 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# MIT License
#
# (C) Copyright 2020-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""
CRUS integration test
See crus_integration_test/argparse.py for command line usage.
### SETUP ###
1 Generate map of xnames, nids, and hostnames for target nodes (by default,
all computes)
2 Validate they work with the specified min/max node and step values.
3 Lookup BOS session template
4 Create empty starting, upgrading, and failed HSM groups
5 Create new session template for all target nodes
6 Create new session templates for the upgrading group
7 Use BOS to reboot all target nodes to new BOS session template
### TEST 1 ###
8 Put 1 node into starting group
9 Create CRUS session
10 Verify all goes well & delete CRUS session
### TEST 2 ###
11 Move all nodes into starting group.
Repeat steps 9-10, with step size that results in at least 2 steps
### TEST 3 ###
12 Select 2 nodes
13 Start slurm workload on 1 of them
14 Create CRUS session
15 Verify that CRUS waits while the slurm workloads run
16 Stop the slurm workloads
17 Verify that all goes well & delete CRUS session
### RESTORE NODES ###
18 Create CRUS session to reboot all nodes to base slurm template
19 Verify that all goes well & delete CRUS session
### CLEANUP ###
20 Delete new templates
21 Delete custom vcs branches
22 Delete new hsm groups
"""
from crus_integration_test.argparse import parse_args
from crus_integration_test.crus import verify_crus_waiting_for_quiesce
from crus_integration_test.hsm import create_hsm_groups
from crus_integration_test.slurm import complete_slurm_job, start_slurm_job, \
verify_initial_slurm_state
from crus_integration_test.utils import bos_reboot_nodes, create_bos_session_templates, \
monitor_crus_session, \
verify_results_of_crus_session
from common.bos import bos_session_template_validate_cfs, \
list_bos_session_templates, list_bos_sessions
from common.bosutils import delete_bos_session_templates, \
delete_cfs_configs, \
delete_hsm_groups, \
delete_vcs_repo_and_org
from common.cfs import describe_cfs_config
from common.crus import create_crus_session, delete_crus_session
from common.helpers import CMSTestError, create_tmpdir, debug, error_exit, exit_test, \
init_logger, info, log_exception_error, raise_test_exception_error, \
remove_tmpdir, section, subtest, warn
from common.hsm import set_hsm_group_members
from common.k8s import get_csm_private_key
from common.utils import get_compute_nids_xnames, validate_node_hostnames
from common.vcs import create_and_clone_vcs_repo
import random
import sys
TEST_NAME = "crus_integration_test"
def do_subtest(subtest_name, subtest_func, **subtest_kwargs):
"""
Log that we are about to run a subtest with the specified name, then call the specified function
with the specified arguments. Raise exception in case of an error.
"""
subtest(subtest_name)
try:
return subtest_func(**subtest_kwargs)
except CMSTestError:
raise
except Exception as e:
raise_test_exception_error(e, "%s subtest" % subtest_name)
def do_test(test_variables):
"""
Main test body. Execute each subtest in turn.
"""
# =============================
# =============================
# SETUP
# =============================
# =============================
use_api = test_variables["use_api"]
if use_api:
info("Using API")
else:
info("Using CLI")
# We don't need the CSM private key until it comes time to ssh into the compute nodes, but we'd
# rather know up front if this fails, to save time
do_subtest("Get CSM private key (for later use to ssh to computes)", get_csm_private_key)
nid_to_xname, xname_to_nid = do_subtest("Find compute nids & xnames",
get_compute_nids_xnames, use_api=use_api,
nids=test_variables["nids"],
groups=test_variables["groups"],
xnames=test_variables["xnames"],
min_required=3)
test_variables["nids"] = sorted(list(nid_to_xname.keys()))
test_variables["xnames"] = sorted(list(nid_to_xname.values()))
nids = test_variables["nids"]
xnames = test_variables["xnames"]
info("nids: %s" % str(nids))
slurm_nid = random.choice(nids)
slurm_xname = nid_to_xname[slurm_nid]
test_nids = [ n for n in nids if n != slurm_nid ]
test_xnames = [ x for x in xnames if x != slurm_xname ]
debug("Slurm controller: nid %d (xname %s)" % (slurm_nid, slurm_xname))
debug("Worker nodes:")
for test_nid in sorted(test_nids):
debug(" nid %d (xname %s)" % (test_nid, nid_to_xname[test_nid]))
max_step_size = len(nids)
if test_variables["max_step_size"]:
max_step_size = min(max_step_size, test_variables["max_step_size"])
do_subtest("Validate node hostnames", validate_node_hostnames, nid_to_xname=nid_to_xname)
template_objects = do_subtest("List all BOS session templates", list_bos_session_templates,
use_api=use_api)
info("BOS session template: %s" % test_variables["template"])
if test_variables["template"] not in template_objects:
error_exit("No BOS session template found with name %s" % test_variables["template"])
else:
slurm_template_name = test_variables["template"]
cfs_config_name = do_subtest("Get CFS configuration name from %s BOS session template" % slurm_template_name,
bos_session_template_validate_cfs, bst=template_objects[slurm_template_name])
info("CFS configuration name in %s is %s" % (slurm_template_name, cfs_config_name))
test_variables["base_cfs_config_name"] = cfs_config_name
do_subtest("Validate CFS configuration %s" % cfs_config_name,
describe_cfs_config, use_api=use_api, name=cfs_config_name)
test_hsm_groups = test_variables["test_hsm_groups"]
do_subtest("Create hsm groups", create_hsm_groups, use_api=use_api, test_hsm_groups=test_hsm_groups)
tmpdir = do_subtest("Create temporary directory", create_tmpdir)
test_variables["tmpdir"] = tmpdir
# Always want to make sure that we have a template which does not match any of the others
# for both cfs branch and kernel parameters.
num_test_templates = 3
test_vcs_org = "crus-integration-test-org-%d" % random.randint(0,9999999)
test_vcs_repo = "crus-integration-test-repo-%d" % random.randint(0,9999999)
test_variables["test_vcs_org"] = test_vcs_org
test_variables["test_vcs_repo"] = test_vcs_repo
vcs_repo_dir = do_subtest("Create and clone VCS repo %s in org %s" % (test_vcs_repo, test_vcs_org),
create_and_clone_vcs_repo, orgname=test_vcs_org, reponame=test_vcs_repo,
testname=TEST_NAME, tmpdir=tmpdir)
test_variables["vcs_repo_dir"] = vcs_repo_dir
do_subtest("Create modified BOS session templates",
create_bos_session_templates,
num_to_create=num_test_templates,
use_api=use_api,
template_objects=template_objects,
test_variables=test_variables,
xname_to_nid=xname_to_nid)
test_template_names = test_variables["test_template_names"]
base_test_template, test_template1, test_template2 = test_template_names
debug("Base test template: %s" % base_test_template)
debug("Test template 1: %s" % test_template1)
debug("Test template 2: %s" % test_template2)
# Use BOS to reboot all target nodes to new BOS session template
xname_template_map = dict()
do_subtest("Reboot all target nodes to %s template" % base_test_template, bos_reboot_nodes,
template_name=base_test_template, use_api=use_api, template_objects=template_objects,
xname_to_nid=xname_to_nid, xname_template_map=xname_template_map)
# Verify slurm reports all test nodes as ready
do_subtest("Verify slurm reports test nodes as ready", verify_initial_slurm_state,
use_api=use_api, slurm_control_xname=slurm_xname, worker_xnames=test_xnames,
xname_to_nid=xname_to_nid)
crus_session_hsm_groups = {
"failed_label": test_hsm_groups["failed"],
"starting_label": test_hsm_groups["starting"],
"upgrading_label": test_hsm_groups["upgrading"] }
def _set_starting_group(target_xnames):
"""
Wrapper to call common.hsm.set_hsm_group_members to set our starting
group's member list to equal the specified xnames
"""
group_name = crus_session_hsm_groups["starting_label"]
node_text = ", ".join(sorted(target_xnames))
if len(target_xnames) > 5:
info("Setting HSM group %s member list to: %s" % (group_name, node_text))
subtest_text = "Setting HSM group %s member list to %d test nodes" % (group_name, len(target_xnames))
else:
subtest_text = "Setting HSM group %s member list to: %s" % (group_name, node_text)
do_subtest(subtest_text, set_hsm_group_members, use_api=use_api, group_name=group_name, xname_list=target_xnames)
def _create_crus_session(target_xnames, step_size, template_name):
"""
First, makes a list of all current BOS sessions.
Then creates a CRUS session with the specified values.
The target_xnames list is just used for test logging purposes, to
describe the CRUS session.
Returns the session_id of the CRUS session, a
dictionary of the CRUS session values, and the collected
BOS session list.
"""
bos_sessions = do_subtest("Getting list of BOS sessions before CRUS session is running",
list_bos_sessions, use_api=use_api)
info("BOS session list: %s" % ", ".join(bos_sessions))
node_text = ", ".join(sorted(target_xnames))
if len(target_xnames) > 5:
info("Creating CRUS session with target nodes: %s" % node_text)
node_text = "%d test nodes" % len(target_xnames)
subtest_text = "Create CRUS session (template: %s, step size: %d, nodes: %s)" % (template_name, step_size, node_text)
crus_session_values = {
"use_api": use_api,
"upgrade_step_size": step_size,
"upgrade_template_id": template_name }
crus_session_values.update(crus_session_hsm_groups)
response_object = do_subtest(subtest_text, create_crus_session, **crus_session_values)
crus_session_id = response_object["upgrade_id"]
return crus_session_id, crus_session_values, bos_sessions
def _wait_verify_delete_crus_session(crus_session_id, crus_session_values, target_xnames, bos_sessions):
"""
Wait for CRUS session to be complete.
Update the xname_template_map to reflect the new expected template for the nodes in the session.
Verify that the CRUS session results look okay.
Delete the CRUS session.
"""
do_subtest("Wait for CRUS session %s to complete" % crus_session_id, monitor_crus_session,
use_api=use_api, upgrade_id=crus_session_id, expected_values=crus_session_values,
bos_sessions=bos_sessions)
# Set new expected template for target xnames
for xn in target_xnames:
xname_template_map[xn] = crus_session_values["upgrade_template_id"]
do_subtest("Verify results of CRUS session %s" % crus_session_id, verify_results_of_crus_session,
use_api=use_api, xname_template_map=xname_template_map, template_objects=template_objects,
xname_to_nid=xname_to_nid, target_xnames=list(target_xnames), **crus_session_hsm_groups)
do_subtest("Delete CRUS session %s" % crus_session_id, delete_crus_session,
use_api=use_api, upgrade_id=crus_session_id, max_wait_for_completion_seconds=300)
# =============================
# =============================
# TEST 1
# =============================
# =============================
# Randomly pick 1 xname
xn = random.choice(test_xnames)
target_xnames = [xn]
# Put it into starting HSM group
_set_starting_group(target_xnames)
# Pick random step size (since we're only dealing with 1 node, it doesn't matter)
ssize = random.randint(1, 10000)
# Create CRUS session
crus_session_id, crus_session_values, bos_sessions = _create_crus_session(target_xnames, ssize, test_template1)
# Wait for it to finish, make sure everything looks good, and delete it
_wait_verify_delete_crus_session(crus_session_id, crus_session_values, target_xnames, bos_sessions)
# =============================
# =============================
# TEST 2
# =============================
# =============================
# Set starting group to all test nodes
target_xnames = test_xnames
_set_starting_group(target_xnames)
# Set step size such that we get at least 2 steps
ssize = len(target_xnames) // 2
if (len(target_xnames) % 2) != 0:
ssize += 1
ssize = min(ssize, max_step_size)
# Create CRUS session
crus_session_id, crus_session_values, bos_sessions = _create_crus_session(target_xnames, ssize, test_template2)
# Wait for it to finish, make sure everything looks good, and delete it
_wait_verify_delete_crus_session(crus_session_id, crus_session_values, target_xnames, bos_sessions)
# =============================
# =============================
# TEST 3
# =============================
# =============================
# Randomly select a node for the starting group
xn = random.choice(test_xnames)
target_xnames = [xn]
_set_starting_group(target_xnames)
# Pick random step size (since we're only dealing with 1 node, it doesn't matter)
ssize = random.randint(1, 10000)
# Start slurm workload on node
slurm_job_id, slurm_job_stopfile = do_subtest("Start slurm workload on %s" % xn, start_slurm_job,
slurm_control_xname=slurm_xname, worker_xname=xn, xname_to_nid=xname_to_nid, tmpdir=tmpdir)
# Create CRUS session
crus_session_id, crus_session_values, bos_sessions = _create_crus_session([xn], ssize, test_template1)
# Verify that CRUS session is waiting for nodes to quiesce
do_subtest("Verify CRUS session %s is waiting for nodes to quiesce" % crus_session_id,
verify_crus_waiting_for_quiesce, use_api=use_api, crus_session_id=crus_session_id,
expected_values=crus_session_values)
# Stop slurm workload on node
do_subtest("Stop slurm workload on %s" % xn, complete_slurm_job,
slurm_control_xname=slurm_xname, worker_xname=xn,
stopfile_name=slurm_job_stopfile, slurm_job_id=slurm_job_id)
# Wait for CRUS session to finish, make sure everything looks good, and delete it
_wait_verify_delete_crus_session(crus_session_id, crus_session_values, target_xnames, bos_sessions)
# =============================
# =============================
# RESTORE NODES
# =============================
# =============================
# Set starting group to all test nodes plus the node we've been using for slurm
target_xnames = xnames
_set_starting_group(target_xnames)
# Create CRUS session
crus_session_id, crus_session_values, bos_sessions = _create_crus_session(target_xnames, ssize, base_test_template)
# Wait for it to finish, make sure everything looks good, and delete it
_wait_verify_delete_crus_session(crus_session_id, crus_session_values, target_xnames, bos_sessions)
# =============================
# =============================
# CLEANUP
# =============================
# =============================
section("Cleaning up")
do_subtest("Delete modified BOS session templates", delete_bos_session_templates, use_api=use_api,
template_names=test_template_names)
do_subtest("Delete VCS repo and org", delete_vcs_repo_and_org, test_variables=test_variables)
do_subtest("Delete CFS configurations", delete_cfs_configs, use_api=use_api, cfs_config_names=test_variables["test_cfs_config_names"])
do_subtest("Delete hsm groups", delete_hsm_groups, use_api=use_api, group_map=test_hsm_groups)
do_subtest("Remove temporary directory", remove_tmpdir, tmpdir=tmpdir)
test_variables["tmpdir"] = None
section("Test passed")
if __name__ == '__main__':
test_wrapper()
exit_test() | 45.226966 | 138 | 0.673507 |
3a80351f1ae9d22c12f2dfa0609670916e8b44d0 | 3,071 | py | Python | backend/transaction/models.py | elielagmay/react-budgeteer | 49a25dbd6dd6ea5d8bc93421eefbc12808f585af | [
"Unlicense"
] | 2 | 2018-10-23T00:40:53.000Z | 2021-05-31T08:19:40.000Z | backend/transaction/models.py | elielagmay/react-budgeteer | 49a25dbd6dd6ea5d8bc93421eefbc12808f585af | [
"Unlicense"
] | null | null | null | backend/transaction/models.py | elielagmay/react-budgeteer | 49a25dbd6dd6ea5d8bc93421eefbc12808f585af | [
"Unlicense"
] | null | null | null | from decimal import Decimal
from django.core.exceptions import ValidationError
from django.db import models
from app.utils import get_balances
| 29.528846 | 75 | 0.626506 |
3a804a776b085e92ef90bbf2391ea52e871ea437 | 2,335 | py | Python | src/games/textquiz.py | aleksandrgordienko/melissa-quiz | 49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f | [
"MIT"
] | null | null | null | src/games/textquiz.py | aleksandrgordienko/melissa-quiz | 49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f | [
"MIT"
] | null | null | null | src/games/textquiz.py | aleksandrgordienko/melissa-quiz | 49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f | [
"MIT"
] | null | null | null | # python-telegram-quiz
# @author: Aleksandr Gordienko
# @site: https://github.com/aleksandrgordienko/melissa-quiz
from random import randint
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
| 33.357143 | 81 | 0.621842 |
3a818c77d8d52a71bd103be2681594c2e4e919a8 | 1,246 | py | Python | Automate the Boring Stuff with Python/readDocx.py | m-barnes/Python | 0940d5f9b832c28703a32691db287b1361ce6ecc | [
"MIT"
] | null | null | null | Automate the Boring Stuff with Python/readDocx.py | m-barnes/Python | 0940d5f9b832c28703a32691db287b1361ce6ecc | [
"MIT"
] | null | null | null | Automate the Boring Stuff with Python/readDocx.py | m-barnes/Python | 0940d5f9b832c28703a32691db287b1361ce6ecc | [
"MIT"
] | null | null | null | import docx
import time
import os
from os import system
from pprint import pprint
finished = False
while finished == False:
parseFile()
| 20.766667 | 139 | 0.652488 |
3a84202d32e5e1c571adc31fc572e8596c4a5a08 | 87 | py | Python | rura/pipeline/__init__.py | fdabek1/rura | 6779733149d7e4181be54ecb72fbd4de6d71c678 | [
"MIT"
] | null | null | null | rura/pipeline/__init__.py | fdabek1/rura | 6779733149d7e4181be54ecb72fbd4de6d71c678 | [
"MIT"
] | null | null | null | rura/pipeline/__init__.py | fdabek1/rura | 6779733149d7e4181be54ecb72fbd4de6d71c678 | [
"MIT"
] | null | null | null | from .dataset import Dataset
from .model import Model
from .transform import Transform
| 21.75 | 32 | 0.827586 |
3a8504e3359b503e6fccca39bd8d8317686a767b | 5,707 | py | Python | originalCode_JMG/data/dataimportallIN.py | dataforgoodfr/batch9_validalab | f333bea8203fd1e4ed098b6e2d51a7f7b05ae530 | [
"MIT"
] | null | null | null | originalCode_JMG/data/dataimportallIN.py | dataforgoodfr/batch9_validalab | f333bea8203fd1e4ed098b6e2d51a7f7b05ae530 | [
"MIT"
] | null | null | null | originalCode_JMG/data/dataimportallIN.py | dataforgoodfr/batch9_validalab | f333bea8203fd1e4ed098b6e2d51a7f7b05ae530 | [
"MIT"
] | 1 | 2021-04-19T21:27:58.000Z | 2021-04-19T21:27:58.000Z | import networkx as nx
from py2neo import Graph, Node, Relationship
import pandas as pd
import random
graph = Graph("bolt://localhost:7687", auth=("neo4j", "Password"))
def importGexf(gexffilepath, depth = 0):
'''
Reads gexf network file from hyphe, update or create all nodes and relationships in neo4j database
Print . for each 100 nodes/links imported, 1000 for each 1000
"depth" is used to prefix new properties on node and rel. Value can be 0, 1 or 2
'''
# imports or update all nodes / relationships in gexf file from hyphe
G= nx.read_gexf(gexffilepath, node_type=None, relabel=False, version='1.1draft')
data = nx.json_graph.node_link_data(G)
totnbnodes=len(data['nodes'])
print(totnbnodes," nodes found in gexf")
i=1
for node in data['nodes']:
i=i+1
nodematch = graph.nodes.match(site_name =node['label']).first()
if nodematch == None:
try:
nodematch = Node('Website', site_name = node['label'])
nodematch.__primarylabel__ = 'Website'
nodematch.__primarykey__ = 'site_name'
graph.merge(nodematch)
except:
print("could not import ", node['label'])
for key in node.keys():
nodematch["D" + str(depth) + "_" + key] = node[key]
graph.push(nodematch)
if i%100 == 0:
print(".", end=" ")
if i%1000 ==0:
print(i,"/",totnbnodes)
print(i," nodes imported")
print(len(graph.nodes.match("Website")), "nodes in db after import")
totnblinks=len(data['links'])
print(totnblinks," links found in gexf")
j=0
for link in data['links']:
if depth ==0:
source_n = graph.nodes.match("Website", D0_id = link['source']).first()
target_n = graph.nodes.match("Website", D0_id = link['target']).first()
if depth == 1:
source_n = graph.nodes.match("Website", D1_id = link['source']).first()
target_n = graph.nodes.match("Website", D1_id = link['target']).first()
if depth == 2:
source_n = graph.nodes.match("Website", D2_id = link['source']).first()
target_n = graph.nodes.match("Website", D2_id = link['target']).first()
if depth == 3:
source_n = graph.nodes.match("Website", D3_id = link['source']).first()
target_n = graph.nodes.match("Website", D3_id = link['target']).first()
relmatch = graph.relationships.match((source_n,target_n),r_type="LINKS_TO").first()
try:
if relmatch == None:
rel = Relationship(source_n, "LINKS_TO", target_n)
rel["count_D" + str(depth)]=link['count']
graph.merge(rel)
else:
relmatch["count_D" + str(depth)]=link['count']
graph.push(relmatch)
if j%100 == 0:
print(".", end=" ")
if j%1000 ==0:
print(j, "/", totnblinks)
j=j+1
except:
pass
print(j," links imported")
print(len(graph.relationships.match()), "links in db after import")
def importGexfLinks(gexffilepath, depth = 0):
'''
Reads gexf network file from hyphe, update or create relationships in neo4j database
Print . for each 100 links imported, 1000 for each 1000
"depth" is used to prefix new properties on rel. Value can be 0, 1 or 2
'''
G= nx.read_gexf(gexffilepath, node_type=None, relabel=False, version='1.1draft')
data = nx.json_graph.node_link_data(G)
totnblinks=len(data['links'])
print(totnblinks," links found in gexf")
j=1
for link in data['links']:
if depth ==0:
source_n = graph.nodes.match("Website", D0_id = link['source']).first()
target_n = graph.nodes.match("Website", D0_id = link['target']).first()
if depth == 1:
source_n = graph.nodes.match("Website", D1_id = link['source']).first()
target_n = graph.nodes.match("Website", D1_id = link['target']).first()
if depth == 2:
source_n = graph.nodes.match("Website", D2_id = link['source']).first()
target_n = graph.nodes.match("Website", D2_id = link['target']).first()
if depth == 3:
source_n = graph.nodes.match("Website", D3_id = link['source']).first()
target_n = graph.nodes.match("Website", D3_id = link['target']).first()
relmatch = graph.relationships.match((source_n,target_n),r_type="LINKS_TO").first()
try:
if relmatch == None:
rel = Relationship(source_n, "LINKS_TO", target_n)
rel["count_D" + str(depth)]=link['count']
graph.merge(rel)
else:
relmatch["count_D" + str(depth)]=link['count']
graph.push(relmatch)
if j%100 == 0:
print(".", end=" ")
if j%1000 ==0:
print(j ,"/",totnblinks)
j=j+1
except:
pass
print(j," links imported")
print(len(graph.relationships.match()), "links in db after import")
# This imports all gexf files (takes time)
pathD0IN = "C:\\Users\\Jo\\Documents\\Tech\\Atom_prj\\MyMedia-FillDB\\data\\HypheExport20200520\\202005Websites01_D0_IN.gexf"
importGexf(pathD0IN, 0)
pathD1IN = "C:\\Users\\Jo\\Documents\\Tech\\Atom_prj\\MyMedia-FillDB\\data\\HypheExport20200520\\202005Websites01_D1_IN.gexf"
importGexf(pathD1IN, 1)
# This has not been done entirely
pathD2IN = "C:\\Users\\Jo\\Documents\\Tech\\Atom_prj\\MyMedia-FillDB\\data\\HypheExport20200520\\202005_Websites01_D2_IN.gexf"
importGexf(pathD2IN, 2)
| 38.823129 | 126 | 0.58805 |
3a851eb1905fe0976754043eca815207cc550202 | 4,023 | py | Python | code_base/excess_mortality/decode_args.py | Mlad-en/COV-BG | dabc6875e49b1fdb113ed691fbf70d5bdcb1846c | [
"MIT"
] | null | null | null | code_base/excess_mortality/decode_args.py | Mlad-en/COV-BG | dabc6875e49b1fdb113ed691fbf70d5bdcb1846c | [
"MIT"
] | null | null | null | code_base/excess_mortality/decode_args.py | Mlad-en/COV-BG | dabc6875e49b1fdb113ed691fbf70d5bdcb1846c | [
"MIT"
] | null | null | null | from code_base.excess_mortality.decode_loc_vars import *
DECODE_DEMO_COL = {
'excess_mortality_by_sex_age_country': 'age,sex,unit,geo\\time',
'excess_mortality_by_sex_age_nuts3': 'unit,sex,age,geo\\time',
'europe_population_by_age_and_sex': 'freq;unit;sex;age;geo\TIME_PERIOD'
}
DECODE_DEMO_REPL = {
'excess_mortality_by_sex_age_country': ['Age', 'Sex', 'Unit', 'Location'],
'excess_mortality_by_sex_age_nuts3': ['Unit', 'Sex', 'Age', 'Location'],
'europe_population_by_age_and_sex': ['Frequency', 'Unit', 'Sex', 'Age', 'Location']
}
DECODE_DEMO_SEPARATOR = {
'excess_mortality_by_sex_age_country': ',',
'excess_mortality_by_sex_age_nuts3': ',',
'europe_population_by_age_and_sex': ';'
}
RETAIN_COLUMNS = {
'excess_mortality_by_sex_age_country': ['Age', 'Sex', 'Location'],
'excess_mortality_by_sex_age_nuts3': ['Age', 'Sex', 'Location'],
'europe_population_by_age_and_sex': ['Age', 'Sex', 'Location', '2020']
}
COUNTRY_REPLACE = {
'excess_mortality_by_sex_age_country': EU_COUNTRIES_ISO_2_DECODES,
'excess_mortality_by_sex_age_nuts3': EU_DECODE_NUTS3_REGIONS,
'europe_population_by_age_and_sex': EU_COUNTRIES_ISO_2_DECODES
}
FILE_EXT_TYPE = {
'csv': '.csv',
'excel': '.xlsx',
}
EUROSTAT_AGES_CONVERSION = {
'TOTAL': 'Total',
'Y_LT5': '(0-4)',
'Y5-9': '(5-9)',
'Y10-14': '(10-14)',
'Y15-19': '(15-19)',
'Y20-24': '(20-24)',
'Y25-29': '(25-29)',
'Y30-34': '(30-34)',
'Y35-39': '(35-39)',
'Y40-44': '(40-44)',
'Y45-49': '(45-49)',
'Y50-54': '(50-54)',
'Y55-59': '(55-59)',
'Y60-64': '(60-64)',
'Y65-69': '(65-69)',
'Y70-74': '(70-74)',
'Y75-79': '(75-79)',
'Y80-84': '(80-84)',
'Y85-89': '(85-89)',
'Y_GE90': '(90+)',
}
EUROSTAT_SEX_CONVERSION = {
'F': 'Female',
'M': 'Male',
'T': 'Total',
}
UN_DECODE_AGE_GROUPS = {
'Total': 'Total',
'0 - 4': '(0-4)',
'5 - 9': '(5-9)',
'10 - 14': '(10-14)',
'15 - 19': '(15-19)',
'20 - 24': '(20-24)',
'25 - 29': '(25-29)',
'30 - 34': '(30-34)',
'35 - 39': '(35-39)',
'40 - 44': '(40-44)',
'45 - 49': '(45-49)',
'50 - 54': '(50-54)',
'55 - 59': '(55-59)',
'60 - 64': '(60-64)',
'65 - 69': '(65-69)',
'70 - 74': '(70-74)',
'75 - 79': '(75-79)',
'80 - 84': '(80-84)',
'85 - 89': '(85-89)',
'90 - 94': '(90+)',
'95 - 99': '(90+)',
'100 +': '(90+)',
'90 +': '(90+)',
}
UN_DECODE_SEX_GROUPS = {
'Both Sexes': 'Total',
'Male': 'Male',
'Female': 'Female',
}
std_eu_pop_2013_decode_age = {
# Combine 0-4 by decoding uder 1 and 1-4 as the same value
'Under 1 year': '(0-4)',
'1 year to under 5 years': '(0-4)',
'5 to under 10 years': '(5-9)',
'10 to under 15 years': '(10-14)',
'15 to under 20 years': '(15-19)',
'20 to under 25 years': '(20-24)',
'25 to under 30 years': '(25-29)',
'30 to under 35 years': '(30-34)',
'35 to under 40 years': '(35-39)',
'40 to under 45 years': '(40-44)',
'45 to under 50 years': '(45-49)',
'50 to under 55 years': '(50-54)',
'55 to under 60 years': '(55-59)',
'60 to under 65 years': '(60-64)',
'65 to under 70 years': '(65-69)',
'70 to under 75 years': '(70-74)',
'75 to under 80 years': '(75-79)',
'80 to under 85 years': '(80-84)',
'85 to under 90 years': '(85-89)',
'90 years and older': '(90+)',
}
INFOSTAT_DECODE_AGE_GROUPS = {
'Total': 'Total',
'0': '(0-4)',
'1 - 4': '(0-4)',
'5 - 9': '(5-9)',
'10 - 14': '(10-14)',
'15 - 19': '(15-19)',
'20 - 24': '(20-24)',
'25 - 29': '(25-29)',
'30 - 34': '(30-34)',
'35 - 39': '(35-39)',
'40 - 44': '(40-44)',
'45 - 49': '(45-49)',
'50 - 54': '(50-54)',
'55 - 59': '(55-59)',
'60 - 64': '(60-64)',
'65 - 69': '(65-69)',
'70 - 74': '(70-74)',
'75 - 79': '(75-79)',
'80 - 84': '(80-84)',
'85 - 89': '(85-89)',
'90 - 94': '(90+)',
'95 - 99': '(90+)',
'100+': '(90+)',
}
| 27 | 87 | 0.505096 |
3a862072dc82d94cea5c675c09cf65fbf2cd377c | 4,510 | py | Python | concord/ext/audio/middleware.py | nariman/concord-ext-audio | c7662507f641bfdba277509838433dbb24fe11a3 | [
"MIT"
] | null | null | null | concord/ext/audio/middleware.py | nariman/concord-ext-audio | c7662507f641bfdba277509838433dbb24fe11a3 | [
"MIT"
] | 14 | 2019-02-19T03:14:07.000Z | 2021-06-25T15:15:55.000Z | concord/ext/audio/middleware.py | narimanized/concord-ext-audio | c7662507f641bfdba277509838433dbb24fe11a3 | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2017-2018 Nariman Safiulin
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import asyncio
from typing import Callable, Optional
import discord
from concord.context import Context
from concord.middleware import Middleware, MiddlewareState
from concord.ext.audio.state import State
| 34.166667 | 80 | 0.645455 |
3a862c1dfb4c8e4aff3392df9183017ab88ec2ab | 775 | py | Python | umtk/image/utils.py | kyle0x54/umtk | 883090d84fce924e65184847e6b3048014616f5d | [
"Apache-2.0"
] | 1 | 2020-08-03T12:27:02.000Z | 2020-08-03T12:27:02.000Z | umtk/image/utils.py | kyle0x54/umtk | 883090d84fce924e65184847e6b3048014616f5d | [
"Apache-2.0"
] | null | null | null | umtk/image/utils.py | kyle0x54/umtk | 883090d84fce924e65184847e6b3048014616f5d | [
"Apache-2.0"
] | 1 | 2020-11-28T03:27:10.000Z | 2020-11-28T03:27:10.000Z | import os
from pathlib import Path
from typing import Any, Dict, Union
import numpy as np
def isdicom(path: Union[str, Path]) -> bool:
""" Judge whether a given file is a valid dicom.
Args:
path: given file path.
Returns:
True if given path is a valid dicom, otherwise False.
"""
if not os.path.isfile(path):
return False
# read preamble and magic code
with open(path, "rb") as f:
header = f.read(132)
if not header:
return False
# magic code of a dicom file should be "DICM"
return False if header[128:132] != b"DICM" else True
| 22.142857 | 61 | 0.618065 |
3a866ce737d90dd7710156bcd56f1d122772201c | 28,704 | py | Python | tf_rl/common/utils.py | Rowing0914/TF_RL | 68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7 | [
"MIT"
] | 23 | 2019-04-04T17:34:56.000Z | 2021-12-14T19:34:10.000Z | tf_rl/common/utils.py | Rowing0914/TF_RL | 68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7 | [
"MIT"
] | null | null | null | tf_rl/common/utils.py | Rowing0914/TF_RL | 68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7 | [
"MIT"
] | 3 | 2019-07-17T23:56:36.000Z | 2022-03-13T03:55:21.000Z | import tensorflow as tf
import numpy as np
import os, datetime, itertools, shutil, gym, sys
from tf_rl.common.visualise import plot_Q_values
from tf_rl.common.wrappers import MyWrapper, CartPole_Pixel, wrap_deepmind, make_atari
"""
TF basic Utility functions
"""
def eager_setup():
"""
it eables an eager execution in tensorflow with config that allows us to flexibly access to a GPU
from multiple python scripts
:return:
"""
config = tf.compat.v1.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
config.gpu_options.allow_growth = True
tf.compat.v1.enable_eager_execution(config=config)
tf.compat.v1.enable_resource_variables()
"""
Common Utility functions
"""
def get_alg_name():
"""Returns the name of the algorithm.
We assume that the directory architecutre for that algo looks like below
- Atari: `examples/algo_name/algo_name_eager.py`
- Cartpole: `examples/algo_name/algo_name_eager_cartpole.py`
* where algo_name must be uppercase/capital letters!!
"""
alg_name = sys.argv[0].rsplit("/")[-1].rsplit(".")[0].replace("_eager", "")
return alg_name
def invoke_agent_env(params, alg):
"""Returns the wrapped env and string name of agent, then Use `eval(agent)` to activate it from main script
"""
if params.mode == "Atari":
env = wrap_deepmind(make_atari("{}NoFrameskip-v4".format(params.env_name, skip_frame_k=params.skip_frame_k)),
skip_frame_k=params.skip_frame_k)
if params.debug_flg:
agent = "{}_debug".format(alg)
else:
agent = "{}".format(alg)
else:
agent = "{}".format(alg)
if params.mode == "CartPole":
env = MyWrapper(gym.make("CartPole-v0"))
elif params.mode == "CartPole-p":
env = CartPole_Pixel(gym.make("CartPole-v0"))
return agent, env
def create_log_model_directory(params, alg):
"""
Create a directory for log/model
this is compatible with Google colab and can connect to MyDrive through the authorisation step
:param params:
:return:
"""
if params.mode in ["Atari", "atari", "MuJoCo", "mujoco"]:
second_name = params.env_name
else:
second_name = params.mode
now = datetime.datetime.now()
if params.google_colab:
# mount the MyDrive on google drive and create the log directory for saving model and logging using tensorboard
params.log_dir, params.model_dir, params.log_dir_colab, params.model_dir_colab = _setup_on_colab(alg,
params.mode)
else:
if params.debug_flg:
params.log_dir = "../../logs/logs/" + now.strftime("%Y%m%d-%H%M%S") + "-{}_{}_debug/".format(alg,
second_name)
params.model_dir = "../../logs/models/" + now.strftime("%Y%m%d-%H%M%S") + "-{}_{}_debug/".format(alg,
second_name)
else:
params.log_dir = "../../logs/logs/" + now.strftime("%Y%m%d-%H%M%S") + "-{}_{}/".format(alg, second_name)
params.model_dir = "../../logs/models/" + now.strftime("%Y%m%d-%H%M%S") + "-{}_{}/".format(alg, second_name)
return params
def get_ready(params):
"""
Print out the content of params
:param params:
:return:
"""
for key, item in vars(params).items():
print(key, " : ", item)
def create_checkpoint(model, optimizer, model_dir):
"""
Create a checkpoint for managing a model
:param model:
:param optimizer:
:param model_dir:
:return:
"""
checkpoint_dir = model_dir
check_point = tf.train.Checkpoint(optimizer=optimizer,
model=model,
optimizer_step=tf.compat.v1.train.get_or_create_global_step())
manager = tf.train.CheckpointManager(check_point, checkpoint_dir, max_to_keep=3)
# try re-loading the previous training progress!
try:
print("Try loading the previous training progress")
check_point.restore(manager.latest_checkpoint)
assert tf.compat.v1.train.get_global_step().numpy() != 0
print("===================================================\n")
print("Restored the model from {}".format(checkpoint_dir))
print("Currently we are on time-step: {}".format(tf.compat.v1.train.get_global_step().numpy()))
print("\n===================================================")
except:
print("===================================================\n")
print("Previous Training files are not found in Directory: {}".format(checkpoint_dir))
print("\n===================================================")
return manager
def _setup_on_colab(alg_name, env_name):
"""
Mount MyDrive to current instance through authentication of Google account
Then use it as a backup of training related files
:param env_name:
:return:
"""
# mount your drive on google colab
from google.colab import drive
drive.mount("/content/gdrive")
log_dir = "/content/TF_RL/logs/logs/{}/{}".format(alg_name, env_name)
model_dir = "/content/TF_RL/logs/models/{}/{}".format(alg_name, env_name)
log_dir_colab = "/content/gdrive/My Drive/logs/logs/{}/{}".format(alg_name, env_name)
model_dir_colab = "/content/gdrive/My Drive/logs/models/{}/{}".format(alg_name, env_name)
# create the logs directory under the root dir
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
# if the previous directory existed in My Drive, then we would continue training on top of the previous training
if os.path.isdir(log_dir_colab):
print("=== {} IS FOUND ===".format(log_dir_colab))
copy_dir(log_dir_colab, log_dir, verbose=True)
else:
print("=== {} IS NOT FOUND ===".format(log_dir_colab))
os.makedirs(log_dir_colab)
print("=== FINISHED CREATING THE DIRECTORY ===")
if os.path.isdir(model_dir_colab):
print("=== {} IS FOUND ===".format(model_dir_colab))
copy_dir(model_dir_colab, model_dir, verbose=True)
else:
print("=== {} IS NOT FOUND ===".format(model_dir_colab))
os.makedirs(model_dir_colab)
print("=== FINISHED CREATING THE DIRECTORY ===")
return log_dir, model_dir, log_dir_colab, model_dir_colab
def copy_dir(src, dst, symlinks=False, ignore=None, verbose=False):
"""
copy the all contents in `src` directory to `dst` directory
Usage:
```python
delete_files("./bb/")
```
"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if verbose:
print("From:{}, To: {}".format(s, d))
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def delete_files(folder, verbose=False):
"""
delete the all contents in `folder` directory
Usage:
```python
copy_dir("./aa/", "./bb/")
```
"""
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
if verbose:
print("{} has been deleted".format(file_path))
except Exception as e:
print(e)
"""
Algorithm Specific Utility functions
"""
def state_unpacker(state):
"""
Given the dictionary of state, it unpacks and returns processed items as numpy.ndarray
Sample input:
{'observation': array([ 1.34193265e+00, 7.49100375e-01, 5.34722720e-01, 1.30179339e+00, 8.86399624e-01,
4.24702091e-01, -4.01392554e-02, 1.37299250e-01, -1.10020629e-01, 2.91834773e-06,
-4.72661656e-08, -3.85214084e-07, 5.92637053e-07, 1.12208536e-13, -7.74656889e-06,
-7.65027248e-08, 4.92570535e-05, 1.88857148e-07, -2.90549459e-07, -1.18156686e-18,
7.73934983e-06, 7.18103404e-08, -2.42928780e-06, 4.93607091e-07, 1.70999820e-07]),
'achieved_goal': array([1.30179339, 0.88639962, 0.42470209]),
'desired_goal': array([1.4018907 , 0.62021174, 0.4429846 ])}
:param state:
:return:
"""
obs = np.array(state["observation"])
achieved_goal = np.array(state["achieved_goal"])
desired_goal = np.array(state["desired_goal"])
remaining_goal = simple_goal_subtract(desired_goal, achieved_goal)
return obs, achieved_goal, desired_goal, remaining_goal
def simple_goal_subtract(goal, achieved_goal):
"""
We subtract the achieved goal from the desired one to see how much we are still far from the desired position
"""
assert goal.shape == achieved_goal.shape
return goal - achieved_goal
ALIVE_BONUS = 1.0
def get_distance(env_name):
"""
This returns the distance according to the implementation of env
For instance, halfcheetah and humanoid have the different way to return the distance
so that we need to deal with them accordingly.
:return: func to calculate the distance(float)
"""
obj_name = env_name.split("-")[0]
if not obj_name.find("Ant") == -1:
elif not obj_name.find("HalfCheetah") == -1:
elif not obj_name.find("Hopper") == -1:
elif not obj_name.find("Humanoid") == -1:
elif not obj_name.find("Swimmer") == -1:
elif not obj_name.find("Walker2d") == -1:
elif not obj_name.find("Centipede") == -1:
else:
assert False, "This env: {} is not supported yet.".format(env_name)
return func
"""
TODO: I think I will remove this.
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
===== Tracker is A class for storing iteration-specific metrics. ====
"""
"""
Update methods
"""
def sync_main_target(sess, target, source):
"""
Synchronise the models
from Denny Britz's excellent RL repo
https://github.com/dennybritz/reinforcement-learning/blob/master/DQN/Double%20DQN%20Solution.ipynb
:param main:
:param target:
:return:
"""
source_params = [t for t in tf.trainable_variables() if t.name.startswith(source.scope)]
source_params = sorted(source_params, key=lambda v: v.name)
target_params = [t for t in tf.trainable_variables() if t.name.startswith(target.scope)]
target_params = sorted(target_params, key=lambda v: v.name)
update_ops = []
for target_w, source_w in zip(target_params, source_params):
op = target_w.assign(source_w)
update_ops.append(op)
sess.run(update_ops)
def soft_target_model_update(sess, target, source, tau=1e-2):
"""
Soft update model parameters.
target = tau * source + (1 - tau) * target
:param main:
:param target:
:param tau:
:return:
"""
source_params = [t for t in tf.trainable_variables() if t.name.startswith(source.scope)]
source_params = sorted(source_params, key=lambda v: v.name)
target_params = [t for t in tf.trainable_variables() if t.name.startswith(target.scope)]
target_params = sorted(target_params, key=lambda v: v.name)
update_ops = []
for target_w, source_w in zip(target_params, source_params):
# target = tau * source + (1 - tau) * target
op = target_w.assign(tau * source_w + (1 - tau) * target_w)
update_ops.append(op)
sess.run(update_ops)
"""
Gradient Clipping
"""
def gradient_clip_fn(flag=None):
"""
given a flag, create the clipping function and returns it as a function
currently it supports:
- by_value
- norm
- None
:param flag:
:return:
"""
if flag == "":
elif flag == "by_value":
elif flag == "norm":
else:
assert False, "Choose the gradient clipping function from by_value, norm, or nothing!"
return _func
def ClipIfNotNone(grad, _min, _max):
"""
Reference: https://stackoverflow.com/a/39295309
:param grad:
:return:
"""
if grad is None:
return grad
return tf.clip_by_value(grad, _min, _max)
"""
Test Methods
"""
def eval_Agent(agent, env, n_trial=1):
"""
Evaluate the trained agent!
:return:
"""
all_rewards = list()
print("=== Evaluation Mode ===")
for ep in range(n_trial):
state = env.reset()
done = False
episode_reward = 0
while not done:
# epsilon-greedy for evaluation using a fixed epsilon of 0.05(Nature does this!)
if np.random.uniform() < 0.05:
action = np.random.randint(agent.num_action)
else:
action = np.argmax(agent.predict(state))
next_state, reward, done, _ = env.step(action)
state = next_state
episode_reward += reward
all_rewards.append(episode_reward)
tf.contrib.summary.scalar("Evaluation Score", episode_reward, step=agent.index_timestep)
print("| Ep: {}/{} | Score: {} |".format(ep + 1, n_trial, episode_reward))
# if this is running on Google Colab, we would store the log/models to mounted MyDrive
if agent.params.google_colab:
delete_files(agent.params.model_dir_colab)
delete_files(agent.params.log_dir_colab)
copy_dir(agent.params.log_dir, agent.params.log_dir_colab)
copy_dir(agent.params.model_dir, agent.params.model_dir_colab)
if n_trial > 2:
print("=== Evaluation Result ===")
all_rewards = np.array([all_rewards])
print("| Max: {} | Min: {} | STD: {} | MEAN: {} |".format(np.max(all_rewards), np.min(all_rewards),
np.std(all_rewards), np.mean(all_rewards)))
def eval_Agent_DDPG(env, agent, n_trial=1):
"""
Evaluate the trained agent with the recording of its behaviour
:return:
"""
all_distances, all_rewards, all_actions = list(), list(), list()
distance_func = get_distance(agent.params.env_name) # create the distance measure func
print("=== Evaluation Mode ===")
for ep in range(n_trial):
env.record_start()
state = env.reset()
done = False
episode_reward = 0
while not done:
action = agent.eval_predict(state)
# scale for execution in env (in DDPG, every action is clipped between [-1, 1] in agent.predict)
next_state, reward, done, info = env.step(action * env.action_space.high)
distance = distance_func(action, reward, info)
all_actions.append(action.mean() ** 2) # Mean Squared of action values
all_distances.append(distance)
state = next_state
episode_reward += reward
all_rewards.append(episode_reward)
tf.contrib.summary.scalar("Evaluation Score", episode_reward, step=agent.index_timestep)
print("| Ep: {}/{} | Score: {} |".format(ep + 1, n_trial, episode_reward))
env.record_end()
return all_rewards, all_distances, all_actions
def eval_Agent_TRPO(agent, env, n_trial=1):
"""
Evaluate the trained agent!
:return:
"""
all_rewards = list()
print("=== Evaluation Mode ===")
for ep in range(n_trial):
state = env.reset()
done = False
episode_reward = 0
while not done:
action = agent.predict(state)
# scale for execution in env (in DDPG, every action is clipped between [-1, 1] in agent.predict)
next_state, reward, done, _ = env.step(action)
state = next_state
episode_reward += reward
all_rewards.append(episode_reward)
tf.contrib.summary.scalar("Evaluation Score", episode_reward, step=agent.index_timestep)
print("| Ep: {}/{} | Score: {} |".format(ep + 1, n_trial, episode_reward))
if n_trial > 2:
print("=== Evaluation Result ===")
all_rewards = np.array([all_rewards])
print("| Max: {} | Min: {} | STD: {} | MEAN: {} |".format(np.max(all_rewards), np.min(all_rewards),
np.std(all_rewards), np.mean(all_rewards)))
def eval_Agent_HER(agent, env, n_trial=1):
"""
Evaluate the trained agent!
:return:
"""
successes = list()
for ep in range(n_trial):
state = env.reset()
# obs, achieved_goal, desired_goal in `numpy.ndarray`
obs, ag, dg, rg = state_unpacker(state)
success = list()
for ts in range(agent.params.num_steps):
# env.render()
action = agent.predict(obs, dg)
# action = action_postprocessing(action, agent.params)
next_state, reward, done, info = env.step(action)
success.append(info.get('is_success'))
# obs, achieved_goal, desired_goal in `numpy.ndarray`
next_obs, next_ag, next_dg, next_rg = state_unpacker(next_state)
obs = next_obs
dg = next_dg
successes.append(success)
return np.mean(np.array(successes))
| 35.745953 | 162 | 0.610089 |
3a8780a44ac5da348e337c07269fb06faa67e8cd | 2,284 | py | Python | common/serializers.py | kollad/turbo-ninja | 9c3f66b2af64aec01f522d19b309cfdd723e67cf | [
"MIT"
] | null | null | null | common/serializers.py | kollad/turbo-ninja | 9c3f66b2af64aec01f522d19b309cfdd723e67cf | [
"MIT"
] | 1 | 2017-12-14T05:35:38.000Z | 2017-12-14T05:35:38.000Z | common/serializers.py | kollad/turbo-ninja | 9c3f66b2af64aec01f522d19b309cfdd723e67cf | [
"MIT"
] | null | null | null | from collections import namedtuple, OrderedDict
import json
__author__ = 'kollad'
def isnamedtuple(obj):
"""Heuristic check if an object is a namedtuple."""
return isinstance(obj, tuple) \
and hasattr(obj, "_fields") \
and hasattr(obj, "_asdict") \
and callable(obj._asdict)
| 34.606061 | 83 | 0.609019 |
3a8812b8a7ce8889a96abd8e38c4d8b8f1956ab6 | 1,079 | py | Python | setup.py | mjw99/Musketeer | 0299a7974ad90c09d8d9206fcf862e45f9fddf30 | [
"MIT"
] | null | null | null | setup.py | mjw99/Musketeer | 0299a7974ad90c09d8d9206fcf862e45f9fddf30 | [
"MIT"
] | null | null | null | setup.py | mjw99/Musketeer | 0299a7974ad90c09d8d9206fcf862e45f9fddf30 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md") as readmeFile:
long_description = readmeFile.read()
setuptools.setup(
name="musketeer",
version="0.0.1",
author="Daniil Soloviev",
author_email="dos23@cam.ac.uk",
description="A tool for fitting data from titration experiments.",
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Chemistry"
],
url="https://github.com/daniilS/Musketeer",
packages=["musketeer"],
package_data={"": ["*.png"]},
include_package_data=True,
install_requires=[
"numpy",
"scipy",
"matplotlib",
"ttkbootstrap",
"tkscrolledframe",
"ttkwidgets"
],
python_requires=">=3"
)
| 29.162162 | 70 | 0.624652 |
3a886dc473f6df44d24e2498829541fd798461f5 | 1,358 | py | Python | webapp/tests.py | carolFrohlich/string_checker | 27a96ab9a315d47304b0eb6bdfd671be7a34b6f1 | [
"MIT"
] | null | null | null | webapp/tests.py | carolFrohlich/string_checker | 27a96ab9a315d47304b0eb6bdfd671be7a34b6f1 | [
"MIT"
] | null | null | null | webapp/tests.py | carolFrohlich/string_checker | 27a96ab9a315d47304b0eb6bdfd671be7a34b6f1 | [
"MIT"
] | null | null | null | from django.test import TestCase
# Create your tests here.
from webapp.forms import contains_all_letters
| 24.690909 | 55 | 0.734904 |
3a89f586494444a77daa3b34a1bc45b72a73f85e | 16,338 | py | Python | EvolutiveStrategies.py | ignacioct/GeneticAlgorithms | 6a92c3d5ec6f2796333576d93c3b6b421055b7a4 | [
"MIT"
] | 4 | 2020-11-26T16:18:23.000Z | 2021-06-28T08:43:35.000Z | EvolutiveStrategies.py | ignacioct/GeneticAlgorithms | 6a92c3d5ec6f2796333576d93c3b6b421055b7a4 | [
"MIT"
] | null | null | null | EvolutiveStrategies.py | ignacioct/GeneticAlgorithms | 6a92c3d5ec6f2796333576d93c3b6b421055b7a4 | [
"MIT"
] | null | null | null | import copy
import math
import operator
import random
import sys
from concurrent import futures
import numpy as np
import requests
def main():
# Code for strategy of 1 individual
# ee = EvolutiveStrategyOneIndividual(c=ce, is10=True)
# ee.trainingLoop(10000)
# Code for strategy with the best results
ee = EvolutiveStrategyMultiple(
population=300, family_number=2, tournament_factor=0.05, is10=True
)
ee.training_cycle(1000, scaling=True)
if __name__ == "__main__":
main()
| 35.135484 | 144 | 0.580487 |
3a8ac6ed77639549d9368218a7f979d0a6bcc7b7 | 1,638 | py | Python | src/arago/hiro/client/exception.py | 166MMX/hiro-python-library | fb29e3247a8fe1b0f7dc4e68141cf7340a8dd0a5 | [
"MIT"
] | null | null | null | src/arago/hiro/client/exception.py | 166MMX/hiro-python-library | fb29e3247a8fe1b0f7dc4e68141cf7340a8dd0a5 | [
"MIT"
] | null | null | null | src/arago/hiro/client/exception.py | 166MMX/hiro-python-library | fb29e3247a8fe1b0f7dc4e68141cf7340a8dd0a5 | [
"MIT"
] | null | null | null | from typing import Mapping, Any, List
| 27.762712 | 96 | 0.525031 |
3a8c95437dc709e3b0251893e5436db0d7890d0f | 8,098 | py | Python | yoongram/users/views.py | happyjy/yoonGram | 20555619721065296d5dab88e80c763b4a3f295e | [
"MIT"
] | null | null | null | yoongram/users/views.py | happyjy/yoonGram | 20555619721065296d5dab88e80c763b4a3f295e | [
"MIT"
] | 9 | 2021-03-09T02:00:36.000Z | 2022-02-26T10:13:36.000Z | yoongram/users/views.py | happyjy/yoonGram | 20555619721065296d5dab88e80c763b4a3f295e | [
"MIT"
] | null | null | null | from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from . import models, serializers
from yoongram.notifications import views as notifications_views
# class Based Viwes
# function Based Views
# views: path("<slug:username>/following/", view=views.UserFollowingUserFBV, name="user_profile")
# def UserFollowingFBV(request, username):
# if request.method == 'GET':
# try:
# found_user = models.User.objects.get(username=username)
# except models.User.DeosNotExist:
# return Response(status=status.HTTP_404_NOT_FOUND)
# user_following = found_user.following.all()
# serializer = serializers.ListUsersSerializer(user_following, many=True)
# return Response(data=serializer.data, status=status.HTTP_200_OK)
| 39.120773 | 261 | 0.645098 |
3a8cac712e69f85d4085b70791e0d285fbcb5630 | 2,507 | py | Python | BabysFirstNeuralNetwork/ToyNN.py | dwpicott/BasicNeuralNetwork | ad4f5878098e5ad167ee2280f5b9b03af02dfa27 | [
"MIT"
] | null | null | null | BabysFirstNeuralNetwork/ToyNN.py | dwpicott/BasicNeuralNetwork | ad4f5878098e5ad167ee2280f5b9b03af02dfa27 | [
"MIT"
] | null | null | null | BabysFirstNeuralNetwork/ToyNN.py | dwpicott/BasicNeuralNetwork | ad4f5878098e5ad167ee2280f5b9b03af02dfa27 | [
"MIT"
] | null | null | null | '''
Basic Python tutorial neural network.
Based on "A Neural Network in 11 Lines of Python" by i am trask
https://iamtrask.github.io/2015/07/12/basic-python-network/
'''
import numpy as np
# Training data: a 1 in the first column directly correlates with a 1 in the output
# training features
features = np.array([ [0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1] ])
# training targets
targets = np.array([ [0, 0, 1, 1] ]).T # 4x1 matrix
# Seed random number generator
np.random.seed(1)
nn = ToyNN()
print("Training neural network...")
nn.TrainNN(features, targets)
print("Training complete.\n")
print("Input training set:")
print(targets)
print("Expected output:")
print(targets)
print("\nOutput from training set after 10000 iterations:")
print(nn.FeedForward(features))
print("\n==============================\n")
newData = np.array([ [0, 0, 0],
[0, 1, 0],
[1, 0, 0] ])
print("New input data:")
print(newData)
print("Expected output:")
print(np.array([ [0, 0, 1] ]).T)
print("\nOutput for new data not in the training set:")
print(nn.FeedForward(newData)) | 27.25 | 83 | 0.59274 |
3a8dcfa7190ecc79bdaa94535eba0d246aff05b9 | 1,122 | py | Python | gaphor/UML/deployments/tests/test_connector.py | MartinIIOT/gaphor | b08bf6ddb8c92ec87fccabc2ddee697609f73e67 | [
"Apache-2.0"
] | null | null | null | gaphor/UML/deployments/tests/test_connector.py | MartinIIOT/gaphor | b08bf6ddb8c92ec87fccabc2ddee697609f73e67 | [
"Apache-2.0"
] | null | null | null | gaphor/UML/deployments/tests/test_connector.py | MartinIIOT/gaphor | b08bf6ddb8c92ec87fccabc2ddee697609f73e67 | [
"Apache-2.0"
] | null | null | null | import pytest
from gaphor import UML
from gaphor.core.modeling import Diagram
from gaphor.core.modeling.modelinglanguage import (
CoreModelingLanguage,
MockModelingLanguage,
)
from gaphor.SysML.modelinglanguage import SysMLModelingLanguage
from gaphor.UML.deployments.connector import ConnectorItem
from gaphor.UML.modelinglanguage import UMLModelingLanguage
def test_create(create):
"""Test creation of connector item."""
conn = create(ConnectorItem, UML.Connector)
assert conn.subject is not None
def test_persistence(create, element_factory, saver, loader):
"""Test connector item saving/loading."""
conn = create(ConnectorItem, UML.Connector)
end = element_factory.create(UML.ConnectorEnd)
conn.end = end
data = saver()
assert end.id in data
loader(data)
diagram = next(element_factory.select(Diagram))
assert diagram.select(ConnectorItem)
assert element_factory.lselect(UML.ConnectorEnd)
| 27.365854 | 78 | 0.762923 |
3a8e21c35da0565b1474e19643e2481a81691a35 | 14,317 | py | Python | utils/lists.py | luciano1337/legion-bot | 022d1ef9eb77a26b57929f800dd55770206f8852 | [
"MIT"
] | null | null | null | utils/lists.py | luciano1337/legion-bot | 022d1ef9eb77a26b57929f800dd55770206f8852 | [
"MIT"
] | null | null | null | utils/lists.py | luciano1337/legion-bot | 022d1ef9eb77a26b57929f800dd55770206f8852 | [
"MIT"
] | null | null | null | pozehug = [
'https://media1.tenor.com/images/4d89d7f963b41a416ec8a55230dab31b/tenor.gif?itemid=5166500',
'https://media1.tenor.com/images/c7efda563983124a76d319813155bd8e/tenor.gif?itemid=15900664',
'https://media1.tenor.com/images/daffa3b7992a08767168614178cce7d6/tenor.gif?itemid=15249774',
'https://media1.tenor.com/images/7e30687977c5db417e8424979c0dfa99/tenor.gif?itemid=10522729',
'https://media1.tenor.com/images/5ccc34d0e6f1dccba5b1c13f8539db77/tenor.gif?itemid=17694740'
]
raspunsuri = [
'Da', 'Nu', 'Ghiceste..', 'Absolut.',
'Desigur.', 'Fara indoiala fratimiu.',
'Cel mai probabil.', 'Daca vreau eu',
'Ajutor acest copil are iq-ul scazut!',
'https://i.imgur.com/9x18D5m.png',
'Sa speram', 'Posibil.',
'Ce vorbesti sampist cordit',
'Se prea poate', 'Atata poti cumetre',
'Daca doresc', 'Teapa cumetre',
'Milsugi grav', 'https://www.youtube.com/watch?v=1MwqNFO_rM4',
'Nu stiu ca nu sunt creativa', 'Nu stiu', 'Asa te-ai nascut bai asta', 'Yamete Kudasaiii.. ^_^', 'E prost dal in morti lui!',
'Nu il poti judeca.'
]
lovitura = [
'https://media1.tenor.com/images/9ea4fb41d066737c0e3f2d626c13f230/tenor.gif?itemid=7355956',
'https://media1.tenor.com/images/612e257ab87f30568a9449998d978a22/tenor.gif?itemid=16057834',
'https://media1.tenor.com/images/528ff731635b64037fab0ba6b76d8830/tenor.gif?itemid=17078255',
'https://media1.tenor.com/images/153b2f1bfd3c595c920ce60f1553c5f7/tenor.gif?itemid=10936993',
'https://media1.tenor.com/images/f9f121a46229ea904209a07cae362b3e/tenor.gif?itemid=7859254',
'https://media1.tenor.com/images/477821d58203a6786abea01d8cf1030e/tenor.gif?itemid=7958720'
]
pisica = [
'https://media1.tenor.com/images/730c85cb58041d4345404a67641fcd46/tenor.gif?itemid=4351869',
'https://media1.tenor.com/images/f78e68053fcaf23a6ba7fbe6b0b6cff2/tenor.gif?itemid=10614631',
'https://media1.tenor.com/images/8ab88b79885ab587f84cbdfbc3b87835/tenor.gif?itemid=15917800',
'https://media1.tenor.com/images/fea93362cd765a15b5b2f45fc6fca068/tenor.gif?itemid=14715148',
'https://media1.tenor.com/images/fb22e08583263754816e910f6a6ae4bd/tenor.gif?itemid=15310654',
'https://media1.tenor.com/images/9596d3118ddd5c600806a44da90c4863/tenor.gif?itemid=16014629',
'https://media1.tenor.com/images/ce038ac1010fa9514bb40d07c2dfed7b/tenor.gif?itemid=14797681',
'https://media1.tenor.com/images/4fbe2ab9d22992d0a42da37804f227e8/tenor.gif?itemid=9606395',
'https://media1.tenor.com/images/f6fe8d1d0463f4e51b6367bbecf56a3e/tenor.gif?itemid=6198981',
'https://media1.tenor.com/images/a862d2cb92bfbe6213e298871b1e8a9a/tenor.gif?itemid=15805236'
]
caini = [
''
]
pozehentai = [
'https://i.alexflipnote.dev/500ce4.gif',
'https://media1.tenor.com/images/832c34c525cc3b7dae850ce5e7ee451c/tenor.gif?itemid=9714277',
'https://media1.tenor.com/images/1169d1ab96669e13062c1b23ce5b9b01/tenor.gif?itemid=9035033',
'https://media1.tenor.com/images/583d46f95740b8dde76b47585d78f3a4/tenor.gif?itemid=19369487',
'https://media1.tenor.com/images/01b39c35fd1ce4bb6ce8be232c26d423/tenor.gif?itemid=12342539',
'https://media1.tenor.com/images/bd39500869eeedd72d94274282fd14f2/tenor.gif?itemid=9252323',
'https://media1.tenor.com/images/65c92e3932d7617146c7faab53e1063b/tenor.gif?itemid=11098571',
'https://media1.tenor.com/images/c344d38d1a2b799db53478b8ec302f9e/tenor.gif?itemid=14057537'
]
pozekiss = [
'https://media1.tenor.com/images/ef4a0bcb6e42189dc12ee55e0d479c54/tenor.gif?itemid=12143127',
'https://media1.tenor.com/images/f102a57842e7325873dd980327d39b39/tenor.gif?itemid=12392648',
'https://media1.tenor.com/images/3d56f6ef81e5c01241ff17c364b72529/tenor.gif?itemid=13843260',
'https://media1.tenor.com/images/503bb007a3c84b569153dcfaaf9df46a/tenor.gif?itemid=17382412',
'https://media1.tenor.com/images/6bd9c3ba3c06556935a452f0a3679ccf/tenor.gif?itemid=13387677',
'https://media1.tenor.com/images/f1dd2c4bade57949f49daeedbe3a4b86/tenor.gif?itemid=17092948'
]
lick = [
'https://media1.tenor.com/images/2ca4ca0d787ca3af4e27cdf71bb9796f/tenor.gif?itemid=15900645'
]
love = [
'https://media1.tenor.com/images/cf20ebeadcadcd54e6778dac16357644/tenor.gif?itemid=10805514'
]
pozegift = [
'https://i.imgur.com/xnHDSIb.jpg',
'https://i.imgur.com/uTrZDlC.jpg',
'https://i.imgur.com/fMgEDlZ.jpg',
'https://i.imgur.com/HZVKaYK.jpg',
'https://i.imgur.com/HvQnLpj.jpg',
'https://i.imgur.com/qRLPalh.jpg',
'https://i.imgur.com/fQaCCNF.jpg',
'https://i.imgur.com/BM8CoqI.jpg',
'https://i.imgur.com/bSTgzZj.jpg',
'https://i.imgur.com/bZOpa6H.jpg',
'https://i.imgur.com/xjHCbLq.jpg',
'https://i.imgur.com/pFn1b1H.jpg',
'https://i.imgur.com/wxA6Yhm.jpg',
'https://i.imgur.com/jw3ohim.jpg',
'https://i.imgur.com/cZOCcvO.jpg',
'https://i.imgur.com/dpDKiNh.jpg',
'https://i.imgur.com/MSmQjc2.jpg',
'https://i.imgur.com/8LXrQmy.jpg',
]
glumemafia = [
'bagameas pulan mata pleci la scoala cu 10lei an buzunar 5lei de drum 5 lei detigari trantimias pulan mata si ai figuri k ai jordani fake din targ si tricou armani luat de la turci k daca iti deschid sifonieru joak turci cu chinezi barbut',
'Cum plm sa iti ia mata telefonu adica dai un capac sa te stie de jupan',
'te lauzi ca stai la oras da tu stai in ultimu sat uitat de lume ciobanoaia cu 3 case si 2 se darama pisamas pe tn',
'Esti mare diva si ai 10k followeri pe insta da cand deschizi picioarele intreaba lumea cine a deschis punga de lotto cu cascaval',
'te dai mare fumator ca fumezi la narghilea si ai vape dar cand ti am zis de davidoff ai zis ca e ala cu ochelari din migos',
'Flexezi un tricou bape luat din obor cu 10 yang da il contactezi pe premieru chinei daca pui urechea la eticheta in rasa mati de saracie',
'cum frt nai auzit de adrian toma cel mai bun giungel wannabe de pa eune frt gen esti nub? :))))',
'gen cum morti mati sa te joci fortnite mai bine iesi afara si ti construiesti o casa ca si asa stai in pubela de gunoi :)))))))))',
'pui story ca mananci la restaurant meniuri scumpe si esti cu gagicatu mancati bn dar tie cand ti-am aratat prima oara pizza ai zis ca au scos astia de la rolex ceasuri cu salam pe el',
'ce corp ai zici ca e blenderu de facea reclama pe taraf la el',
'cand te am dus prima oara la kfc ai comandat parizer mentolat cu sos de lamaie',
'dai share la parazitii spui dalea cu cand soarele rasare am ochii injectati sau muie garda si dai share la poze cu maria si jointuri bai nebunule sa cada mie tot ce am pe casa de nu fumezi in spate la bloc cu batu ca daca afla mata aia descentrata iti fute o palma de singurul lucru pe care o sa il mai bagi in vene e perfuzia fututi morti mati ))))))',
'ho fa terminato cu fitele astea ca atunci cand te-am dus prima data la mc ai intrebat daca se poate manca cu mana',
'fa proasto te dai mare bad bici dar cand ti-am aratat h&m m-ai intrebat pe unde poti taia lemne',
'te crezi mare diva si iti faci poze pe masini si pe garduri da sa moara chilotii lu nelson daca te vede mata ca esti asa rebela iti fute un telefon nokia in cap de nu mai vezi orgoliul vreo 3 ani',
'fa andreio tiam dat felu 2 al meu la grdinia sa mnnci ca tiera foame siacu ai aruncat trandafiri fututen gura de stoarfa',
'Eu, Lorin Fortuna combatant ezoteric complex i corect privind din punct de vedere ezoteric prin rangul ezoteric precum i prin distinciile ezoterice care mi-au fost conferite de ctre conductori supremi abilitai, blestem ezoteric la nivelul maxim posibil la care dau dreptul rangul i distinctiile ezoterice care mi-au conferite menionate anterior. Blestem fr sfrit temporar n mod direct mpotriva fiinei colective superioare de tip civilizaie virtual numit: civilizaia virtual arahnidica tarantulara, androgina, neagr, emoional agresional civilizaional condus la nivel de conductor suprem de ctre fiina superioar androgin alctuit din: fiina individual superioar de gen masculin numit Satanos i fiina individual superioar de gen feminin numit Geea, pentru rul existenial comis mpotriva gruprii de civilizaie virtuale de tip gorilian individual neagresional civilizaional i autentic btinae n cadrul lumilor planetare ale planetei al crei lume planetare medie sunt integrate existenial cu precizarea c, rul existenial pentru care blestem civilizaia virtual pe care am numit-o anterior ultim ca civilizaie agresional civilizaional a fost comis n perioada temporal specific calendarului planetar cuprins ntre data de nceput n care s-a dat n funciune oficial prima baz civilizaional planetar n cadrul zonei existeniale a planetei a crei lume planetar medie sunt integrate existenial aferent i m refer la zona existenial n cauz i la concret la baza existenial civilizaional virtual planetar n cauza deci aferent civilizaiei virtuale pe care o blestem i pn n prezent.',
'fututi morti mati te dai mare smeker faci paneluri de samp da kand tiam zis de error_log ziceai sefu scuzama nam facut eu asa cv fututi morti mati olteanuadv',
'te dai mare futacios si mare fuckboy da singura fata careti zice so futi e reclama depe xnxx cu maria carei in apropierea ta',
'te dai bodybuilder ca tu faci sala sa pui pe tine da sami bag singur pulan cur ca dacati pui mana in sold zici ca esti cupa uefa esti nebun',
'cum sa te desparti de gagicata gen la inima mai ars dar tot nam sa te las',
'te dai mare smecher prin cluburi da cand era pe tv shaolin soudaun iti puneai manusa lu tac tu de sudura pe cap si ziceai ca e pumnu lu tedigong',
'Te dai mare ITst haker pula mea da nai mai trimis ss la nimeni de cnd i ai spart ecranu la tlf c i era ruine s nu se vad damia drumu n pipota matii',
'pai daten mm de pizda proasta, pui ss cu samsung health la instastory si ne arati cati pasi ai facut tu de la shaormerie pana acasa sau din pat pana la frigider, si te lauzi ca faci sport? sport e cand o sugi si nuti oboseste gura.',
'sa o fut pe mata in gura pana ii perforez laringele',
'Cum sati fie frica de fantome gen scubi dubi du unde esti tu',
'cand ti am aratat prima oara narghileaua ai crezut ca e pompa si ai scos mingea so umflam pt diseara la fotbal',
'ce nas ai zici ca e racheta lu Trump cu care bombardeaza Siria',
'daca esti scunda si folosesti expresia "sunt mai aproape de iad", nu daten mortii mati esti mai aproape sa-mi faci masaj la prostata cu gura din picioare',
'BAGAMIAS PULAN MORTI TAI DITULE AI CORPU ALA ZICI CAI AMBALAJ DE LA IKEA',
'cum sa nu sti nimic despre masini gen am creieru tdi cu motor de 6 mii ))))))',
'sa vedeti cioroilor, azi dimineata stateam linistit in pat si il gadilam pe fratimio in talpa, la care mama "macar asteapta pana se naste", gen cplm nu pot sa ma joc cu el',
'pray pt toti cioroi care lea fost inima ranita de puicute stei strong barbati mei',
'Ho nebunule ca groapa marianelor si mata sunt cele mai adanci puncte de pe planeta',
'te dai mare diva figuri de buftea cu iph 6 da daca conectez castile la buricu tau se aude balada foamei bass boosted',
'cum pulamea sa nadormi vere gen noapte buna somn usor sapte purici pun picior',
'comentezi de bataie dar te sponsorizeaza croco cu corpu ala fmm de stick',
'buna ziua muie la bozgori si o seara cat mai linistita sa aveti oameni buni',
'Baganeam pula n profii de matematic o vezi pe Ionela ca are curu mare i i pui 8 fututen gura si mie 5 luates in pula cu chelia ta',
'MAMA TA E ASA DE GRASA INCAT THANOS A BATUT DE 2 ORI DIN DEGETE SA O STEARGA DE PE PLANETA',
'esti frumoasa andreea da fara machiaj te striga lumea andrei pe strada',
'te dai mare smecher ca ai bani tu da dormi cu fratii tai pe rand in soba ca e frig afara pisa m as pe tn de sarantoc',
'vezi sa nu cazi in pumn baiatul meu ca poate te omori',
'Sa te fut in gura mort ca viu dai din picioare',
'Coaie te lauzi ca esti orasean ai vazut tranvaiu ai zis ca a fatat trenu copilu matii',
'ESTI ATAT DE URAT INCAT ATUNCI CAND PLANGI LACRIMILE SE INTALNESC LA CEAFA SA ITI OCOLEASCA FATA',
'Te dai mare culturist gen coaie ce spate am rup da sati scuipe unchiu Fester in ciorba mati de nu esti mai cocosat decat Rammus din lol in morti tai de ghertoi mars inapoi in papusoi',
'ma-ta aia curva cand imi vede pula zice "My precious" ca Gollum futu-ti rasa si neamu ma-tii de mamaligar',
'daca esti profesor si in timpul unei lucrari muti colegu ala mai bun din banca astfel incat ala mai prost sa nu poata copia meriti sa se prabuseasca pe tn si pe mata toate pulele pe care le a supt fieta sasi ia jordani la 3 milioane de pe olx',
'cand te am dus prima oara la pull&bear m ai intrebat unde i ursu',
'puneti poze cu bani pistoale si adidasi de la zanotti si valentino dar voi intrati in foame daca va scoate oferta de 5 lei combo de la mec',
'fmm ca te au dus astia la restaurant ca ai comandat ciorba si mancai cu furculita',
'am o dilema coaie, daca sperma are doar 7 calorii mata dc e obeza',
'Coaie ce prosti sunt straini cum plm sati dai 500-1000 eur pe un tricou guci cand in romania sunt la 10 sute 3 bucati ))))',
'Te lauzi ca tu ai geaca monclr daia scumpa si nu ai ca saraci tai de colegi de la pull and bear dar ai uitat ca anu trecut venei cu hanorac de la decathlon cu pelerina de ploaie fmm de nemancata',
'cand te-am dus prima data la orange m-ai intrebat unde-s portocalele fmm de agricultor',
'cand ti am aratat prima oara o shaorma ai zis ca de ce mananc clatite cu carne si cartofi',
'Te dai mare gigolo dar ti se scoala pula cand se apleaca ma-ta',
'ia st ma sami pun la instastory o poza cu bautura gen sa vada urmaritori mei ca ma respect beau vin la 9,5 lei ca pana atunci singurul alcol care lai gustat a fost tuica de la pomana cand sa imbatat mata de ai duso cu roaba acasa luavas in pula mari smecheri ca puneti 5 inji 10 sute sa beti bohoarca',
'Am facut o lista cu aia cu care nu sa futut mata:',
'dai check-in zi de zi la cinema pui descriere "Another day another movie" da sa moara Toni Montana daca te mint ca acasa inca mai ai antena lu tactu mare de la tara si prinzi 5 canale de tvr 1 in 5 stiluri diferite',
'te dai mare gamerita esti tot cu #pcmasterrace dar cand mai vazut ca ma joc fifa mai intrebat unde a disparut digisport de sus din colt a dracu ascilopata',
'usor cu atitudinea de babygirl pe net ca in realitate ai trezit krakenu cu ragaitu ala posedato',
'coaie cum sa nu sti cum sa ai grija de o tarantula gen lol pela coaie pela pula'
]
| 91.775641 | 1,661 | 0.777607 |
3a8f0982e03b38e05aa03eb45840308eeb8e3dc5 | 3,730 | py | Python | py_ti/helper_loops.py | tlpcap/tlp_ti | 8d72b316b332fd5e20785dbf19401883958c0666 | [
"MIT"
] | 7 | 2021-01-31T19:23:07.000Z | 2022-03-10T21:22:41.000Z | py_ti/helper_loops.py | tlpcap/tlp_ti | 8d72b316b332fd5e20785dbf19401883958c0666 | [
"MIT"
] | null | null | null | py_ti/helper_loops.py | tlpcap/tlp_ti | 8d72b316b332fd5e20785dbf19401883958c0666 | [
"MIT"
] | null | null | null | import numpy as np
from numba import jit
| 24.866667 | 77 | 0.507507 |
3a8feafe3391c0ddd2f78fb39a9371d4374c0a73 | 1,441 | py | Python | netlog_viewer/netlog_viewer_build/netlog_viewer_dev_server_config.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | netlog_viewer/netlog_viewer_build/netlog_viewer_dev_server_config.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | netlog_viewer/netlog_viewer_build/netlog_viewer_dev_server_config.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | # Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import netlog_viewer_project
import webapp2
from webapp2 import Route
| 26.2 | 72 | 0.727967 |
3a9084ba87c0f5c49b0d1b1f5827e460b297b88e | 3,991 | py | Python | src/app.py | eug/cron-rest | 2d0a2e0d0cf0cb464b71293802b85ac7076f9944 | [
"MIT"
] | 3 | 2021-05-10T13:42:59.000Z | 2022-03-28T02:07:23.000Z | src/app.py | eug/cron-rest | 2d0a2e0d0cf0cb464b71293802b85ac7076f9944 | [
"MIT"
] | null | null | null | src/app.py | eug/cron-rest | 2d0a2e0d0cf0cb464b71293802b85ac7076f9944 | [
"MIT"
] | 4 | 2018-05-12T13:43:00.000Z | 2021-10-30T01:23:00.000Z | # -*- coding: utf-8 -*-
import json
import os
from crontab import CronTab
from flask import Flask, request
from pathlib import Path
from pretty_cron import prettify_cron
app = Flask(__name__)
if __name__ == '__main__':
app.run()
| 26.256579 | 76 | 0.540466 |
3a90d1f158c36003df58478dbdda2afff682b6b2 | 1,196 | py | Python | 2017/examples/05_randomization.py | limunan/stanford-tensorflow-tutorials | 51e53daaa2a32cfe7a1966f060b28dbbd081791c | [
"MIT"
] | 9,180 | 2017-07-27T23:43:41.000Z | 2022-03-29T17:10:14.000Z | 2017/examples/05_randomization.py | Nianze/stanford-tensorflow-tutorials | 51e53daaa2a32cfe7a1966f060b28dbbd081791c | [
"MIT"
] | 86 | 2017-08-04T12:38:38.000Z | 2020-12-09T03:34:02.000Z | 2017/examples/05_randomization.py | joshosu/stanford-tensorflow-tutorials | b16899102bf07964a15494452a2e91c1b9f88e46 | [
"MIT"
] | 4,115 | 2017-07-28T06:53:12.000Z | 2022-03-23T12:36:55.000Z | """ Examples to demonstrate ops level randomization
Author: Chip Huyen
Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"
cs20si.stanford.edu
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
# Example 1: session is the thing that keeps track of random state
c = tf.random_uniform([], -10, 10, seed=2)
with tf.Session() as sess:
print(sess.run(c)) # >> 3.57493
print(sess.run(c)) # >> -5.97319
# Example 2: each new session will start the random state all over again.
c = tf.random_uniform([], -10, 10, seed=2)
with tf.Session() as sess:
print(sess.run(c)) # >> 3.57493
with tf.Session() as sess:
print(sess.run(c)) # >> 3.57493
# Example 3: with operation level random seed, each op keeps its own seed.
c = tf.random_uniform([], -10, 10, seed=2)
d = tf.random_uniform([], -10, 10, seed=2)
with tf.Session() as sess:
print(sess.run(c)) # >> 3.57493
print(sess.run(d)) # >> 3.57493
# Example 4: graph level random seed
tf.set_random_seed(2)
c = tf.random_uniform([], -10, 10)
d = tf.random_uniform([], -10, 10)
with tf.Session() as sess:
print(sess.run(c)) # >> 9.12393
print(sess.run(d)) # >> -4.53404
| 27.813953 | 74 | 0.664716 |
3a91c8f71ed1bbfb503d86a5235097fd88dfae4a | 5,651 | py | Python | python-CSDN博客爬虫/CSDN_article/utils/myutils.py | wangchuanli001/Project-experience | b563c5c3afc07c913c2e1fd25dff41c70533f8de | [
"Apache-2.0"
] | 12 | 2019-12-07T01:44:55.000Z | 2022-01-27T14:13:30.000Z | python-CSDN博客爬虫/CSDN_article/utils/myutils.py | hujiese/Project-experience | b563c5c3afc07c913c2e1fd25dff41c70533f8de | [
"Apache-2.0"
] | 23 | 2020-05-23T03:56:33.000Z | 2022-02-28T07:54:45.000Z | python-CSDN博客爬虫/CSDN_article/utils/myutils.py | hujiese/Project-experience | b563c5c3afc07c913c2e1fd25dff41c70533f8de | [
"Apache-2.0"
] | 7 | 2019-12-20T04:48:56.000Z | 2021-11-19T02:23:45.000Z | # -*- coding: utf-8 -*-
'''
'''
import time
import MySQLdb
import jieba
import ast
import random, sys
#
import requests
sys.setrecursionlimit(1000000)
#
def getproxyip(ip_file):
fo = open(ip_file, 'r', encoding='utf-8')
proxys = fo.read().split('\n')
proxy = ast.literal_eval(random.choice(proxys))
# print(proxy)
fo.close()
return proxy
#
'''
58.218.205.40:7754
221.229.196.234:6987
58.218.205.51:7038
58.218.205.57:2513
58.218.205.55:7817
58.218.205.52:5109
'''
# ip
ip_port = ["180.97.250.157:5147", "58.218.205.39:7893", "180.97.250.158:4107", "221.229.196.212:9311",
"221.229.196.212:6066", "221.229.196.192:6545",
"221.229.196.231:9975", "221.229.196.212:4953", "221.229.196.192:2133"]
#
proxyHost = "http-dyn.abuyun.com"
proxyPort = "9020"
#
proxyUser = "HP48W550C1X873PD"
proxyPass = "FED1B0BB31CE94A3"
proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
"host": proxyHost,
"port": proxyPort,
"user": proxyUser,
"pass": proxyPass,
}
#
#
if __name__ == '__main__':
print("test")
fo = open("proxy_ip.txt", 'r', encoding='utf-8')
port_list = fo.read().split("\n")
fo.close()
proxies = {
"https": random.choices(port_list)[0],
}
print(proxies)
| 33.838323 | 121 | 0.599186 |
3a92948a079a2d3f3db1feb98db4697c887b4594 | 140 | py | Python | Contest/DDCC2020-qual/b/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | Contest/DDCC2020-qual/b/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | Contest/DDCC2020-qual/b/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
from numpy import*
n, *a = map(int, open(0).read().split())
a = array(a)
print(int(min(abs(cumsum(a)-(sum(a)/2)))*2)) | 28 | 44 | 0.621429 |
3a948fad21b8a67c7efb20bb30784138fb309c60 | 11,836 | py | Python | oanda-api-v20-master/tests/test_contrib_orders.py | cdibble2011/OANDA | 68327d6d65dd92952d7a1dc49fe29efca766d900 | [
"MIT"
] | 389 | 2016-07-22T17:19:17.000Z | 2022-03-18T21:14:55.000Z | oanda-api-v20-master/tests/test_contrib_orders.py | cdibble2011/OANDA | 68327d6d65dd92952d7a1dc49fe29efca766d900 | [
"MIT"
] | 162 | 2016-10-04T18:17:48.000Z | 2021-12-22T10:53:54.000Z | oanda-api-v20-master/tests/test_contrib_orders.py | cdibble2011/OANDA | 68327d6d65dd92952d7a1dc49fe29efca766d900 | [
"MIT"
] | 120 | 2016-08-08T18:52:41.000Z | 2022-03-24T06:53:38.000Z | import unittest
try:
from nose_parameterized import parameterized
except:
print("*** Please install 'nose_parameterized' to run these tests ***")
exit(0)
import oandapyV20.contrib.requests as req
import oandapyV20.definitions.orders as OD
import oandapyV20.types as types
if __name__ == "__main__":
unittest.main()
| 30.193878 | 75 | 0.468233 |
3a951812ae9cbf0b5d1410cb7713acbb37c91294 | 371 | py | Python | ci_screen/automators/job_item.py | garyjohnson/ci_screen_2 | ea6a0ebd686148bb8977bd2d842e33e71fc2c3f0 | [
"MIT"
] | null | null | null | ci_screen/automators/job_item.py | garyjohnson/ci_screen_2 | ea6a0ebd686148bb8977bd2d842e33e71fc2c3f0 | [
"MIT"
] | null | null | null | ci_screen/automators/job_item.py | garyjohnson/ci_screen_2 | ea6a0ebd686148bb8977bd2d842e33e71fc2c3f0 | [
"MIT"
] | 1 | 2018-08-10T15:04:24.000Z | 2018-08-10T15:04:24.000Z | from kvaut.automator.custom_automator import CustomAutomator
| 30.916667 | 97 | 0.727763 |
3a95ae559435a30a68aba572eee4bea130369136 | 12,225 | py | Python | models/context.py | Hilbert70403/Infrared-Small-Target | 0b7bddc13ed3b2362735ea858af6e7d18d4374cd | [
"MIT"
] | 21 | 2021-11-08T08:06:36.000Z | 2022-03-26T14:22:35.000Z | models/context.py | Hilbert70403/Infrared-Small-Target | 0b7bddc13ed3b2362735ea858af6e7d18d4374cd | [
"MIT"
] | 4 | 2022-01-19T11:37:13.000Z | 2022-02-28T07:45:19.000Z | models/context.py | Hilbert70403/Infrared-Small-Target | 0b7bddc13ed3b2362735ea858af6e7d18d4374cd | [
"MIT"
] | 9 | 2021-11-15T09:24:41.000Z | 2022-03-24T08:11:00.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['NonLocalBlock', 'GCA_Channel', 'GCA_Element', 'AGCB_Element', 'AGCB_Patch', 'CPM']
| 35.641399 | 109 | 0.558937 |
3a96f177bdadd6a1d79e415e623de1950e19535a | 17,315 | py | Python | build/fbcode_builder/getdeps/cargo.py | dmitryvinn/watchman | 668d3536031acd9b65950c29d6e956bb42b972bb | [
"MIT"
] | null | null | null | build/fbcode_builder/getdeps/cargo.py | dmitryvinn/watchman | 668d3536031acd9b65950c29d6e956bb42b972bb | [
"MIT"
] | null | null | null | build/fbcode_builder/getdeps/cargo.py | dmitryvinn/watchman | 668d3536031acd9b65950c29d6e956bb42b972bb | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import re
import shutil
from .builder import BuilderBase
| 40.081019 | 119 | 0.551372 |
3a97bee3b980525a2f4756251f4575984854cc03 | 500 | py | Python | setup.py | EricCWWong/GSimulator | aee7dc81d2a709beb94c02ffc8a288cd7ba06747 | [
"MIT"
] | null | null | null | setup.py | EricCWWong/GSimulator | aee7dc81d2a709beb94c02ffc8a288cd7ba06747 | [
"MIT"
] | 7 | 2020-02-01T02:19:49.000Z | 2020-07-10T12:49:28.000Z | setup.py | EricCWWong/GSimulator | aee7dc81d2a709beb94c02ffc8a288cd7ba06747 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="GSimulator",
packages=find_packages(exclude=['*test']),
version="0.1.1",
author="Eric Wong",
description='This package allows user to \
simulate conductance of quantum wires',
author_email='c.wing.wong.19@ucl.ac.uk',
install_requires=['numpy', 'matplotlib', 'prettytable', 'qutip', 'tqdm'],
entry_points={
'console_scripts': [
'gsimulator = GSimulator.command:process'
]}
)
| 29.411765 | 77 | 0.644 |
3a98425fabf2f4efae0310710f9d76f3fbba768a | 3,995 | py | Python | donn/layers.py | sharan-amutharasu/DONN | c14557e8ef57f3e1c1b73c1fa98cb6ba19a82904 | [
"MIT"
] | 3 | 2018-08-17T05:31:25.000Z | 2020-02-13T19:43:02.000Z | tests/donn/layers.py | sharan-amutharasu/DONN | c14557e8ef57f3e1c1b73c1fa98cb6ba19a82904 | [
"MIT"
] | 1 | 2018-11-19T06:16:50.000Z | 2018-11-19T06:17:53.000Z | tests/donn/layers.py | sharan-amutharasu/DONN | c14557e8ef57f3e1c1b73c1fa98cb6ba19a82904 | [
"MIT"
] | 2 | 2018-12-06T05:01:07.000Z | 2018-12-06T11:59:47.000Z |
# coding: utf-8
# In[4]:
from keras.layers import Activation, Dense, Dropout
from keras.layers.advanced_activations import LeakyReLU, PReLU, ThresholdedReLU, ELU
from keras import regularizers
# In[5]:
def get_activation_layer(activation):
"""
Returns the activation layer given its name
"""
if activation == 'ELU':
return ELU()
if activation == 'LeakyReLU':
return LeakyReLU()
if activation == 'ThresholdedReLU':
return ThresholdedReLU()
if activation == 'PReLU':
return PReLU()
return Activation(activation)
# In[4]:
# if self.layer_type == "LSTM":
# units = params[str(self.layer_type + "_layer_" + str(count) + "_units")]
# count_LSTM = layers.count("LSTM")
# if count < count_LSTM:
# return_sequences = True
# else:
# return_sequences = False
# if input_dim is not None:
# model.add(LSTM(units, input_dim=input_dim, recurrent_activation=params["LSTM_recurrent_activation_function"], return_sequences=return_sequences))
# else:
# model.add(LSTM(units, recurrent_activation=params["LSTM_recurrent_activation_function"], return_sequences=return_sequences))
# return model
| 37.336449 | 163 | 0.606758 |
3a9e23b66225e7784226027da5b0c2acadfbb17f | 4,905 | py | Python | rurina5/input/map.py | TeaCondemns/rurina | 43725ebea5872953125271a9abb300a4e3a80a64 | [
"MIT"
] | null | null | null | rurina5/input/map.py | TeaCondemns/rurina | 43725ebea5872953125271a9abb300a4e3a80a64 | [
"MIT"
] | null | null | null | rurina5/input/map.py | TeaCondemns/rurina | 43725ebea5872953125271a9abb300a4e3a80a64 | [
"MIT"
] | null | null | null | from typing import Union, Sequence
import event
_actions = {}
"""_actions = {
'action_name': {
'event1event2':
{
'event1': ...,
'event2': ...
}
}
}"""
__all__ = (
'addaction',
'removeaction',
'haveaction',
'actionstatus',
'last_actionstatus',
'setevent',
'remove_event',
'have_event',
'eventstatus',
'last_eventstatus',
'flip',
)
| 29.196429 | 117 | 0.648114 |
3a9f119bf4f058c5f85a03cbf6f4da2b349b8dd5 | 1,604 | py | Python | data/ABC/filter_out_tiny_models.py | YoungXIAO13/6DPoseEstimationDatasets | b9cb1d9842870860a15bf3cf600cdfb68d1e195e | [
"MIT"
] | 383 | 2019-09-03T15:29:22.000Z | 2022-03-28T02:01:15.000Z | data/ABC/filter_out_tiny_models.py | Fang-Haoshu/ObjectPoseEstimationSummary | 2a11797e6b01e1820105740fcaeb7c049094c57f | [
"MIT"
] | 5 | 2019-10-18T13:04:07.000Z | 2021-09-29T05:26:52.000Z | data/ABC/filter_out_tiny_models.py | Fang-Haoshu/ObjectPoseEstimationSummary | 2a11797e6b01e1820105740fcaeb7c049094c57f | [
"MIT"
] | 63 | 2019-09-17T12:13:51.000Z | 2022-03-28T03:06:05.000Z | import os
from os.path import join, getsize
from PIL import Image
from tqdm import tqdm
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', type=str, help='dataset directory')
parser.add_argument('--model', type=str, default='abc_0000', help='subdirectory containing obj files')
parser.add_argument('--views', type=str, default='multiviews', help='subdirectory containing multiviews')
args = parser.parse_args()
obj_dir = join(args.dataset_dir, args.model)
view_dir = join(args.dataset_dir, args.views)
model_names = sorted(os.listdir(view_dir))
csv_file = join(args.dataset_dir, '{}.txt'.format(args.model))
with open(csv_file, 'w') as f:
f.write('model_name,size,ratio_min,ratio_max,occupy_min,occupy_max\n')
for model_name in tqdm(model_names):
size = int(getsize(join(obj_dir, '{}.obj'.format(model_name))) / (2 ** 20))
img_dir = join(view_dir, model_name, 'nocs')
images = os.listdir(img_dir)
ratio = []
occupy = []
for img in images:
try:
rgb = Image.open(join(img_dir, img))
w, h = rgb.size
left, upper, right, lower = rgb.getbbox()
ratio.append((lower - upper) / (right - left))
occupy.append(np.sum(np.array(rgb.convert('L')) != 0) / (w * h))
except TypeError:
ratio.append(0)
occupy.append(0)
with open(csv_file, 'a') as f:
f.write(model_name + ',' + str(size) + ',' + str(np.min(ratio)) + ',' + str(np.max(ratio)) + ',' +
str(np.min(occupy)) + ',' + str(np.max(occupy)) + '\n')
| 39.121951 | 106 | 0.639027 |
3aa041de8b903df622c3ee51ddf1f6842ee18d8c | 59 | py | Python | perception/navigator_vision/navigator_vision/__init__.py | czk100/NaviGator | c078c68768c1df4ad48c4c9a60a8c0bf4bdab63a | [
"MIT"
] | null | null | null | perception/navigator_vision/navigator_vision/__init__.py | czk100/NaviGator | c078c68768c1df4ad48c4c9a60a8c0bf4bdab63a | [
"MIT"
] | null | null | null | perception/navigator_vision/navigator_vision/__init__.py | czk100/NaviGator | c078c68768c1df4ad48c4c9a60a8c0bf4bdab63a | [
"MIT"
] | null | null | null | from scan_the_code_classifier import ScanTheCodeClassifier
| 29.5 | 58 | 0.932203 |
3aa10622900b7fd3873b3fb7ab47170cdb7c2440 | 2,959 | py | Python | assignments/06-python-first-lines/first_lines.py | patarajarina/biosys-analytics | a5e8845211797364ec6f7f8679911ed3b5312887 | [
"MIT"
] | null | null | null | assignments/06-python-first-lines/first_lines.py | patarajarina/biosys-analytics | a5e8845211797364ec6f7f8679911ed3b5312887 | [
"MIT"
] | null | null | null | assignments/06-python-first-lines/first_lines.py | patarajarina/biosys-analytics | a5e8845211797364ec6f7f8679911ed3b5312887 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Author : patarajarina
Date : 2019-02-25
Purpose: Rock the Casbah
"""
import argparse
import sys
import os
# --------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Argparse Python script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'positional', metavar='DIR', help='A positional argument', nargs='+')
# parser.add_argument(
# 'DIR',
# '--',
# help='A named string argument',
# metavar='DIR',
# type=dir,
# default=None,
# nargs='+',
# default='')
parser.add_argument(
'-w',
'--width',
help='A named integer argument',
metavar='int',
type=int,
default=50)
# parser.add_argument(
# '-f', '--flag', help='A boolean flag', action='store_true')
return parser.parse_args()
# --------------------------------------------------
def warn(msg):
"""Print a message to STDERR"""
print(msg, file=sys.stderr)
# --------------------------------------------------
def die(msg='Something bad happened'):
"""warn() and exit with error"""
warn(msg)
sys.exit(1)
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
# args = sys.argv[1:]
# str_arg = args.arg
DIRS = args.positional
# flag_arg = args.flag
width = args.width
# if not os.path.isdir(DIRS):
# print('"{}" is not a directory'.format(dirname), file=sys.stderr)
# print(DIRS)
# dirname = args[0] #check
for dirname in DIRS:
if not dirname[-1:] == '/':
dirname = dirname + '/'
if not os.path.isdir(dirname):
if dirname[-1:] == '/':
dirname = dirname[:-1]
print('"{}" is not a directory'.format(dirname), file=sys.stderr)
else:
#if len(DIRS)>1:
print(dirname[:-1])
# for tup in dirname.items():
# print(tup)
out = {}
for eachfile in os.listdir(dirname):
#print(eachfile)
f = open(dirname + eachfile, "r")
firstline = f.readline()
firstline=firstline.strip()
out[firstline]=eachfile
#print(out)
for keyline, valfile in sorted(out.items()):
leftlen = width - len(keyline) - len(valfile)
dots ='.'
for i in range(1,leftlen):
dots = dots+'.'
#print(len(dots+keyline+valfile))
print('{} {} {}'.format(keyline, dots,valfile))
# --------------------------------------------------
if __name__ == '__main__':
main()
| 26.9 | 82 | 0.460967 |