hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c2b13350784cb8342c6137b9b2e68b9d9cf9a32f | 46,745 | py | Python | covidatx/plot.py | Mlograda/covidatx | 336bd5874ec5c6915f621ff3960ea10f70f6319c | [
"MIT"
] | null | null | null | covidatx/plot.py | Mlograda/covidatx | 336bd5874ec5c6915f621ff3960ea10f70f6319c | [
"MIT"
] | null | null | null | covidatx/plot.py | Mlograda/covidatx | 336bd5874ec5c6915f621ff3960ea10f70f6319c | [
"MIT"
] | null | null | null | from .data import CovidData
import datetime as dt
from matplotlib.offsetbox import AnchoredText
import pandas as pd
import seaborn as sns
import geopandas as gpd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def pan_duration(date):
"""Return the duration in days of the pandemic.
As calculated from the gov.uk API. It subtracts the first date entry
in the API data from the most recent date entry.
Args:
date (datetime): DataFrame column (i.e Series) containing date
field as downloaded from the gov.uk API by get_national_data()
method from CovidData Class.
Returns:
datetime: Duration of pandemic in days as datetime object.
"""
return (date[0] - date[-1]).days
def validate_input(df):
"""Check that input into the plotting functions is of the correct type.
Args:
df (Pandas DataFrame): this is intended to be the plotting parameter
Raises:
TypeError: if parameter is not a DataFrame
"""
# if for_function == 'deaths' or for_function == 'cases':
# expected_cols = {'cases_cumulative', 'cases_demographics',
# 'cases_newDaily', 'case_rate', 'date',
# 'death_Demographics', 'name', 'vac_firstDose',
# 'vac_secondDose'}
if not isinstance(df, pd.DataFrame):
raise TypeError('Parameter must be DataFrame, use get_regional_data'
+ ' method from CovidData class.')
# if set(df.columns) != expected_cols:
# raise ValueError('Incorrect features. Expecting output from'
# + ' get_regional_data method from CovidData class')
def my_path():
"""Find correct path at module level for geo_data files.
Returns:
[type]: [description]
"""
from pathlib import Path
base = Path(__file__).resolve().parent / 'geo_data'
return base
def daily_case_plot(df, pan_duration=pan_duration, save=False):
"""Create a matplotlib plot of case numbers in the UK.
Calculated over the duration of the pandemic.Display text information
giving the most recent daily number, the highest daily number and the
date recorded, the total cumulative
number of cases and the duration of the pandemic in days.
Args:
df (DataFrame): containing covid data retrieved from CovidData
class using get_national_data() or get_UK_data() method.
pan_duration (function, optional): Defaults to pan_duration.
save (bool, optional): set True to save plot. Defaults to False.
Returns:
- Matplotlib plot, styled using matplotlib template 'ggplot'
"""
# Create Variables we wish to plot
cases = df['case_newCases'].to_list()
date = df['date'].to_list()
cumulative = df['case_cumulativeCases'].to_list()
# Find date of highest number of daily cases
high, arg_high = max(cases), cases.index(max(cases))
high_date = date[arg_high].strftime('%d %b %Y')
duration = pan_duration(date=date)
# Create matplotlib figure and specify size
fig = plt.figure(figsize=(12, 10))
plt.style.use('ggplot')
ax = fig.add_subplot()
# Plot varibles
ax.plot(date, cases)
# Style and label plot
ax.set_xlabel('Date')
ax.set_ylabel('Cases')
ax.fill_between(date, cases,
alpha=0.3)
ax.set_title('Number of people who tested positive for Covid-19 (UK)',
fontsize=18)
at = AnchoredText(f"Most recent new cases\n{cases[0]:,.0f}\
\nMax new cases\n{high:,.0f}: {high_date}\
\nCumulative cases\n{cumulative[0]:,.0f}\
\nPandemic duration\n{duration} days",
prop=dict(size=16), frameon=True, loc='upper left')
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
ax.annotate('Source: gov.uk https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, 0.0175), xycoords='figure fraction',
fontsize=12, color='#555555')
plt.style.use('ggplot')
if save:
plt.savefig(f"{date[0].strftime('%Y-%m-%d')}-case_numbers_plot");
plt.show()
def regional_plot_cases(save=False):
"""Plot regional case numbers on a map of the UK.
Function collects data using CovidData get_regional_data method.
Args:
save (bool, optional): If true will save plot. Defaults to False.
Returns:
Plot of regional case numbers on map of UK
"""
# Collect data
regions = CovidData().get_regional_data()
scotland = CovidData(nation='scotland').get_national_data()
wales = CovidData(nation='wales').get_national_data()
ni = CovidData(nation='northern ireland').get_national_data()
regions = regions.assign(case_newCases=regions['cases_newDaily'])
# Set date to plot
date_selector = regions['date'][0]
regions_date = regions.loc[regions['date'] == date_selector]
scotland_date = \
scotland.loc[scotland['date'] == date_selector,
['date', 'name', 'case_newCases']]
wales_date = wales.loc[wales['date'] == date_selector,
['date', 'name', 'case_newCases']]
ni_date = ni.loc[ni['date'] == date_selector,
['date', 'name', 'case_newCases']]
# Combine regional data into single dataframe
final_df = pd.concat([regions_date, scotland_date, wales_date, ni_date],
axis=0)
file_path = my_path() / 'NUTS_Level_1_(January_2018)_Boundaries.shp'
# Check required file exists
try:
# Read shape file
geo_df = gpd.read_file(file_path)
except: # bare except is not good practice, this should be changed
print('Ensure you have imported geo_data sub-folder')
geo_df['nuts118nm'] = \
geo_df['nuts118nm'].replace(['North East (England)',
'North West (England)',
'East Midlands (England)',
'West Midlands (England)',
'South East (England)',
'South West (England)'],
['North East', 'North West',
'East Midlands', 'West Midlands',
'South East', 'South West'])
merged = geo_df.merge(final_df, how='left', left_on="nuts118nm",
right_on="name")
# Column to plot
feature = 'case_newCases'
# Plot range
feature_min, feature_max = merged['case_newCases'].min(), \
merged['case_newCases'].max()
# Create plot
fig, ax = plt.subplots(1, figsize=(12, 10))
# Set style and labels
ax.axis('off')
ax.set_title(f'Number of new cases per region {date_selector}',
fontdict={'fontsize': '18', 'fontweight': '3'})
ax.annotate('Source: gov.uk'
+ ' https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, .05), xycoords='figure fraction',
fontsize=12, color='#555555')
# Create colorbar
sm = plt.cm.ScalarMappable(cmap='Reds',
norm=plt.Normalize(vmin=feature_min,
vmax=feature_max))
fig.colorbar(sm)
# Create map
merged.plot(column=feature, cmap='Reds', linewidth=0.8, ax=ax,
edgecolor='0.8');
plt.show()
if save:
image = merged.plot(column=feature, cmap='Reds', linewidth=0.8,
ax=ax, edgecolor='0.8');
image.figure.savefig(f'{date_selector}-regional_cases_plot')
def regional_plot_rate(save=False):
"""Plot regional case rate per 100,000 on a map of the UK.
Function collects data using CovidData get_regional_data method.
Args:
save (bool, optional): If true will save plot. Defaults to False.
Returns:
Plot of regional case rate on map of UK.
"""
# Collect data
regions = CovidData().get_regional_data()
scotland = CovidData(nation='scotland').get_national_data()
wales = CovidData(nation='wales').get_national_data()
ni = CovidData(nation='northern ireland').get_national_data()
# Set date to plot
date_selector = regions['date'][5]
regions_date = regions.loc[regions['date'] == date_selector]
scotland_date = scotland.loc[scotland['date'] == date_selector,
['date', 'name', 'case_rate']]
wales_date = wales.loc[wales['date'] == date_selector,
['date', 'name', 'case_rate']]
ni_date = ni.loc[ni['date'] == date_selector,
['date', 'name', 'case_rate']]
# Combine regional data into single dataframe
final_df = pd.concat([regions_date, scotland_date, wales_date, ni_date],
axis=0)
file_path = my_path() / 'NUTS_Level_1_(January_2018)_Boundaries.shp'
# Check required file exists
try:
# Read shape file
geo_df = gpd.read_file(file_path)
except: # bare except should be changed, will do so in later interation
print('Ensure you have imported geo_data sub-folder')
geo_df['nuts118nm'] = \
geo_df['nuts118nm'].replace(['North East (England)',
'North West (England)',
'East Midlands (England)',
'West Midlands (England)',
'South East (England)',
'South West (England)'],
['North East', 'North West',
'East Midlands', 'West Midlands',
'South East', 'South West'])
merged = geo_df.merge(final_df, how='left', left_on="nuts118nm",
right_on="name")
# Column to plot
feature = 'case_rate'
# Plot range
feature_min, feature_max = merged['case_rate'].min(),\
merged['case_rate'].max()
# Create plot
fig, ax = plt.subplots(1, figsize=(12, 10))
# Set style and labels
ax.axis('off')
ax.set_title('Regional rate per 100,000 (new cases)',
fontdict={'fontsize': '20', 'fontweight': '3'})
ax.annotate('Source: gov.uk'
+ ' https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, .05), xycoords='figure fraction',
fontsize=12, color='#555555')
# Create colorbar
sm = plt.cm.ScalarMappable(cmap='Reds',
norm=plt.Normalize(vmin=feature_min,
vmax=feature_max))
fig.colorbar(sm)
# Create map
merged.plot(column=feature, cmap='Reds', linewidth=0.8, ax=ax,
edgecolor='0.8');
plt.show()
if save:
image = merged.plot(column=feature, cmap='Reds', linewidth=0.8,
ax=ax, edgecolor='0.8');
image.figure.savefig(f'{date_selector}-regional_rate_plot')
def heatmap_cases(df):
"""Create heatmap of case numbers for duration of pandemic.
Args:
df (DataFrame): Covid case data retrieved by calling CovidData
class method.
Returns:
Seaborn heatmap plot of case numbers for each day of the pandemic.
"""
# Variables to plot
cases = df['case_newCases'].to_list()
date = df['date'].to_list()
# Create new DataFrame containing two columns: date and case numbers
heat_df = pd.DataFrame({'date': date, 'cases': cases}, index=date)
# Separate out date into year month and day
heat_df['year'] = heat_df.index.year
heat_df["month"] = heat_df.index.month
heat_df['day'] = heat_df.index.day
# Use groupby to convert data to wide format for heatmap plot
x = heat_df.groupby(["year", "month", "day"])["cases"].sum()
df_wide = x.unstack()
# Plot data
sns.set(rc={"figure.figsize": (12, 10)})
# Reverse colormap so that dark colours represent higher numbers
cmap = sns.cm.rocket_r
ax = sns.heatmap(df_wide, cmap=cmap)
ax.set_title('Heatmap of daily cases since start of pandemic',
fontsize=20)
ax.annotate('Source: gov.uk https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, 0.01), xycoords='figure fraction',
fontsize=12, color='#555555')
plt.show()
def local_rate_plot(save=False):
"""Plot local case rate per 100,000 on a map of the UK.
Function collects data using CovidData get_regional_data method.
Args:
save (bool, optional): If true will save plot. Defaults to False.
Returns:
Plot of local case rate on map of UK
"""
# Find latest data
recent_date = CovidData().get_regional_data()
recent_date = recent_date['date'][5]
# Select latest data from local data
local = CovidData().get_local_data(date=recent_date)
date_selector = recent_date
local_date = local.loc[local['date'] == date_selector,
['date', 'name', 'case_rate']]
file_path = my_path() / "Local_Authority_Districts.shp"
# Check required file exists
try:
# Read shape file
geo_df = gpd.read_file(file_path)
except: # bare except should be changed, will do so in later interation
print('Ensure you have imported geo_data sub-folder')
local_date['name'] = \
local_date['name'].replace(['Cornwall and Isles of Scilly'],
['Cornwall'])
merged = geo_df.merge(local_date, how='outer',
left_on="lad19nm", right_on="name")
# Column to plot
feature = 'case_rate'
# Plot range
vmin, vmax = merged['case_rate'].min(), merged['case_rate'].max()
# Create plot
fig, ax = plt.subplots(1, figsize=(12, 10))
# Set style and labels
ax.axis('off')
ax.set_title(f'Local rate per 100,000 {recent_date}',
fontdict={'fontsize': '20', 'fontweight': '3'})
ax.annotate('Source: gov.uk'
+ ' https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, .05), xycoords='figure fraction',
fontsize=12, color='#555555')
# Create colorbar
sm = plt.cm.ScalarMappable(cmap='Reds',
norm=plt.Normalize(vmin=vmin, vmax=vmax))
fig.colorbar(sm)
# Create map
merged.plot(column=feature, cmap='Reds', linewidth=0.2, ax=ax,
edgecolor='0.8')
plt.show()
if save:
image = merged.plot(column=feature, cmap='Reds', linewidth=0.2,
ax=ax, edgecolor='0.8');
image.figure.savefig(f'{date_selector}-local_rate_plot')
def local_cases_plot(save=False):
"""Plot local case numbers on a map of the UK.
Function collects data using CovidData get_regional_data method.
Args:
save (bool, optional): If true will save plot. Defaults to False.
"""
# Find latest data
recent_date = CovidData().get_regional_data()
recent_date = recent_date['date'][0]
# Select latest data from local data
local = CovidData().get_local_data(date=recent_date)
date_selector = recent_date
local_date = local.loc[local['date'] == date_selector,
['date', 'name', 'case_newDaily']]
file_path = my_path() / "Local_Authority_Districts.shp"
# Check required file exists
try:
# Read shape file
geo_df = gpd.read_file(file_path)
except: # bare except should be changed, will do so in later interation
print('Ensure you have imported geo_data sub-folder')
local_date['name'] = \
local_date['name'].replace(['Cornwall and Isles of Scilly'],
['Cornwall'])
merged = geo_df.merge(local_date, how='outer',
left_on="lad19nm", right_on="name")
# Column to plot
feature = 'case_newDaily'
# Plot range
vmin, vmax = merged['case_newDaily'].min(), \
merged['case_newDaily'].max()
# Create plot
fig, ax = plt.subplots(1, figsize=(12, 10))
# Set style and labels
ax.axis('off')
ax.set_title(f'Number of new cases by local authority {recent_date}',
fontdict={'fontsize': '20', 'fontweight': '3'})
ax.annotate('Source: gov.uk'
+ ' https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, .05), xycoords='figure fraction',
fontsize=12, color='#555555')
# Create colorbar
sm = plt.cm.ScalarMappable(cmap='Reds',
norm=plt.Normalize(vmin=vmin, vmax=vmax))
fig.colorbar(sm)
# Create map
merged.plot(column=feature, cmap='Reds', linewidth=0.2, ax=ax,
edgecolor='0.8')
plt.show()
if save:
image = merged.plot(column=feature, cmap='Reds', linewidth=0.2,
ax=ax, edgecolor='0.8');
image.figure.savefig(f'{date_selector}-local_cases_plot')
def case_demographics(df):
"""Produce a plot of the age demographics of cases across England.
Args:
df (DataFrame): this must be the dataframe produced by the
get_regional_data method from the CovidData class
Returns:
Plot of case numbers broken down by age
"""
validate_input(df)
df_list = df.loc[:, ['cases_demographics', 'date']]
age_df = []
for i in range(df_list.shape[0]):
if df_list.iloc[i, 0]:
temp_df = pd.DataFrame(df_list.iloc[i, 0])
temp_df['date'] = df_list.iloc[i, 1]
temp_df = temp_df.pivot(values='rollingRate',
columns='age', index='date')
age_df.append(temp_df)
data = pd.concat(age_df)
data.index = pd.to_datetime(data.index)
data = \
data.assign(under_15=(data['00_04']+data['05_09']+data['10_14'])/3,
age_15_29=(data['15_19']+data['20_24']+data['25_29'])/3,
age_30_39=(data['30_34']+data['35_39'])/2,
age_40_49=(data['40_44']+data['45_49'])/2,
age_50_59=(data['50_54']+data['55_59'])/2)
data.drop(columns=['00_04', '00_59', '05_09', '10_14', '15_19', '20_24',
'25_29', '30_34', '35_39', '40_44', '45_49', '50_54',
'55_59', '60_64', '65_69', '70_74', '75_79', '80_84',
'85_89', '90+', 'unassigned'], inplace=True)
date = data.index[0].strftime('%d-%b-%y')
ready_df = data.resample('W').mean()
ready_df.plot(figsize=(15, 10), subplots=True, layout=(3, 3),
title=f'{date} - England case rate per 100,000 by age'
+ ' (weekly)')
plt.style.use('ggplot')
plt.show()
def vaccine_demographics(df):
"""Plot of the age demographics of third vaccine uptake across England.
Args:
df ([DataFrame]): this must be the dataframe produced by the
get_regional_data method from the CovidData class
Returns:
Plot of cumulative third vaccination numbers broken down by age.
"""
validate_input(df)
df_list = df.loc[:, ['vac_demographics', 'date']]
age_df = []
for i in range(df_list.shape[0]):
if df_list.iloc[i, 0]:
temp_df = pd.DataFrame(df_list.iloc[i, 0])
temp_df['date'] = df_list.iloc[i, 1]
temp_df =\
temp_df.pivot(values=
'cumVaccinationThirdInjectionUptakeByVaccinationDatePercentage',
columns='age', index='date')
age_df.append(temp_df)
data = pd.concat(age_df)
data.index = pd.to_datetime(data.index)
date = data.index[0].strftime('%d-%b-%y')
ready_df = data.resample('W').mean()
ready_df.plot(figsize=(15, 10), subplots=True, layout=(6, 3),
title=f'{date} - England vaccine booster uptake (%) by age'
+ ' (weekly)')
plt.style.use('ggplot')
plt.show()
def death_demographics(df):
"""Plot of the age demographics of rate of deaths across England.
Args:
df (DataFrame): this must be the dataframe produced by the
get_regional_data method from the CovidData class
Returns:
Plot of death rate per 100,000 broken down by age.
"""
validate_input(df)
df_list = df.loc[:, ['death_Demographics', 'date']]
age_df = []
for i in range(df_list.shape[0]):
if df_list.iloc[i, 0]:
temp_df = pd.DataFrame(df_list.iloc[i, 0])
temp_df['date'] = df_list.iloc[i, 1]
temp_df = temp_df.pivot(values='rollingRate',
columns='age', index='date')
age_df.append(temp_df)
data = pd.concat(age_df)
data.index = pd.to_datetime(data.index)
data = \
data.assign(under_15=(data['00_04']+data['05_09']+data['10_14'])/3,
age_15_29=(data['15_19']+data['20_24']+data['25_29'])/3,
age_30_39=(data['30_34']+data['35_39'])/2,
age_40_49=(data['40_44']+data['45_49'])/2,
age_50_59=(data['50_54']+data['55_59'])/2)
data.drop(columns=['00_04', '00_59', '05_09', '10_14', '15_19', '20_24',
'25_29', '30_34', '35_39', '40_44', '45_49', '50_54',
'55_59', '60_64', '65_69', '70_74', '75_79', '80_84',
'85_89', '90+'], inplace=True)
date = data.index[0].strftime('%d-%b-%y')
ready_df = data.resample('W').mean()
ready_df.plot(figsize=(15, 10), subplots=True, layout=(3, 3),
title=f'{date} - England death rate per 100,000 by age'
+ ' (weekly)')
plt.style.use('ggplot')
plt.show()
def daily_deaths(df, pan_duration=pan_duration, save=False):
"""Plot number of people died per day within 28 days of 1st +ve test.
COVID-19 deaths over time, from the start of the pandemic March 2020.
Args:
df (DataFrame): requires data from get_uk_data method
pan_duration (function, optional): use pre specified pan_duration.
Defaults to pan_duration.
save (bool, optional): [description]. Defaults to False.
Returns:
Matplotlib plot, styled using matplotlib template 'ggplot'
"""
daily_deaths = df['death_dailyDeaths'].to_list()
date = df['date'].to_list()
# cumulative = df['case_cumulativeCases'].to_list()
# Find date of highest number of daily cases
high, arg_high = max(daily_deaths), daily_deaths.index(max(daily_deaths))
# daily = df['death_dailyDeaths'][0]
high_date = date[arg_high].strftime('%d %b %Y')
# added the number of death for the last seven days
duration = pan_duration(date=date)
# Create matplotlib figure and specify size
fig = plt.figure(figsize=(12, 10))
plt.style.use('ggplot')
ax = fig.add_subplot()
# Plot varibles
ax.plot(date, daily_deaths)
# Style and label plot
ax.set_xlabel('Date')
ax.set_ylabel('Daily deaths')
ax.fill_between(date, daily_deaths,
alpha=0.3)
ax.set_title('Deaths within 28 days of positive test (UK)',
fontsize=18)
at = AnchoredText(f"Most recent daily deaths\n{daily_deaths[0]:,.0f}\
\nMax daily deaths\n{high:,.0f}: {high_date}\
\nPandemic duration\n{duration} days",
prop=dict(size=16), frameon=True, loc='upper left')
# \nCumulative cases\n{cumulative[0]:,.0f}\
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
ax.annotate('Source: gov.uk https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, 0.0175), xycoords='figure fraction',
fontsize=12, color='#555555')
if save:
plt.savefig(f"casenumbers{date[0].strftime('%Y-%m-%d')}")
plt.show()
def cumulative_deaths(df, pan_duration=pan_duration, save=False):
"""Plot cum number of people who died within 28 days of +ve test.
Total COVID-19 deaths over time, from the start of the
pandemic March 2020.
Args:
df (DataFrame): containing covid data retrieved from CovidData
pan_duration ([function], optional): Defaults to pan_duration.
save (bool, optional): True to save plot. Defaults to False.
Returns:
Matplotlib plot, styled using matplotlib template 'ggplot'
"""
df = df.fillna(0)
cum_deaths = df["death_cumulativeDeaths"].to_list()
date = df['date'].to_list()
# cumulative = df['death_cumulativeDeaths'].to_list()
# Find date of highest number of daily cases
high, arg_high = max(cum_deaths), cum_deaths.index(max(cum_deaths))
# daily = df["death_cumulativeDeaths"][0]
high_date = date[arg_high].strftime('%d %b %Y')
# added the number of death for the last seven days
duration = pan_duration(date=date)
# Create matplotlib figure and specify size
fig = plt.figure(figsize=(12, 10))
ax = fig.add_subplot()
# Plot varibles
ax.plot(date, cum_deaths)
# Style and label plot
ax.set_xlabel('Date')
ax.set_ylabel('Cumulative deaths')
ax.fill_between(date, cum_deaths,
alpha=0.3)
ax.set_title('Cumulative deaths within 28 days of positive test (UK)',
fontsize=18)
at = AnchoredText(f"Last cumulative deaths\n{high:,.0f}: {high_date}\
\nPandemic duration\n{duration} days",
prop=dict(size=16), frameon=True, loc='upper left')
# \nCumulative cases\n{cumulative[0]:,.0f}\
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
ax.annotate('Source: gov.uk https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, 0.0175), xycoords='figure fraction',
fontsize=12, color='#555555')
plt.style.use('ggplot')
if save:
plt.savefig(f"casenumbers{date[0].strftime('%Y-%m-%d')}")
plt.show()
def regional_plot_death_rate(save=False):
"""Plot regional deaths rate per 100,000 on a map of the UK.
Function collects data using CovidData get_regional_data method.
Args:
save (bool, optional): True will save plot. Defaults to False.
Returns:
Plot of regional case rate on map of UK
"""
# Collect data
regions = CovidData().get_regional_data()
scotland = CovidData(nation='scotland').get_national_data()
wales = CovidData(nation='wales').get_national_data()
ni = CovidData(nation='northern ireland').get_national_data()
# Set date to plot
date_selector = regions['date'][7]
regions_date = regions.loc[regions['date'] == date_selector]
scotland_date = scotland.loc[scotland['date'] == date_selector,
['date', 'name', 'death_newDeathRate']]
wales_date = wales.loc[wales['date'] == date_selector,
['date', 'name', 'death_newDeathRate']]
ni_date = ni.loc[ni['date'] == date_selector,
['date', 'name', 'death_newDeathRate']]
# Combine regional data into single dataframe
final_df = pd.concat([regions_date, scotland_date, wales_date, ni_date],
axis=0)
file_path = my_path() / 'NUTS_Level_1_(January_2018)_Boundaries.shp'
# Check required file exists
try:
# Read shape file
geo_df = gpd.read_file(file_path)
except: # bare except should be changed, will do so in later interation
print('Ensure you have imported geo_data sub-folder')
geo_df['nuts118nm'] = \
geo_df['nuts118nm'].replace(['North East (England)',
'North West (England)',
'East Midlands (England)',
'West Midlands (England)',
'South East (England)',
'South West (England)'],
['North East', 'North West',
'East Midlands', 'West Midlands',
'South East', 'South West'])
merged = geo_df.merge(final_df, how='left', left_on="nuts118nm",
right_on="name")
# Column to plot
feature = 'death_newDeathRate'
# Plot range
feature_min, feature_max = merged['death_newDeathRate'].min(),\
merged['death_newDeathRate'].max()
# Create plot
fig, ax = plt.subplots(1, figsize=(12, 10))
# Set style and labels
ax.axis('off')
ax.set_title('Regional rate per 100,000 (new deaths)',
fontdict={'fontsize': '20', 'fontweight': '3'})
ax.annotate('Source: gov.uk \
https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, .05), xycoords='figure fraction',
fontsize=12, color='#555555')
# Create colorbar
sm = plt.cm.ScalarMappable(cmap='Reds',
norm=plt.Normalize(vmin=feature_min,
vmax=feature_max))
fig.colorbar(sm)
# Create map
merged.plot(column=feature, cmap='Reds', linewidth=0.8, ax=ax,
edgecolor='0.8')
plt.show()
if save:
image = merged.plot(column=feature, cmap='Reds', linewidth=0.8,
ax=ax, edgecolor='0.8')
image.figure.savefig(f'caserates{date_selector}')
def regional_deaths_demo(save=False):
"""Plot number of deaths in the UK.
Plot by age category (>60 , <60). Function collects data using
CovidData get_regional_data method.
Args:
save (bool, optional): True will save plot. Defaults to False.
Returns:
Plot of regional deaths by age category (UK)
"""
CovidDataE = CovidData("england")
regional = CovidDataE.get_regional_data()
regional = \
regional.drop(regional.columns.difference(["date",
"death_Demographics"]), 1)
regional
# remove empty lists in 'death_Demographcs column'
regional = regional[regional["death_Demographics"].astype(bool)]
# transform the regional dataframe to have 'age_categories' as columns
# with 'deaths' values and 'date' as rows
age_df = []
for i in range(regional.shape[0]):
if regional.iloc[i, 1]:
temp_df = pd.DataFrame(regional.iloc[i, 1])
temp_df['date'] = regional.iloc[i, 0]
temp_df = temp_df.pivot(values='deaths', columns=['age'],
index='date')
age_df.append(temp_df)
final_death_data = pd.concat(age_df)
# create a dataframe with columns 'age category' and 'number of deaths'
age_cat = ['00_04', '00_59', '05_09', '10_14', '15_19', '20_24', '25_29',
'30_34', '35_39', '40_44', '45_49', '50_54', '55_59', '60+',
'60_64', '65_69', '70_74', '75_79', '80_84', '85_89', '90+']
deaths = []
for ele in age_cat:
x = final_death_data[ele].sum()
deaths.append(x)
deaths_df = pd.DataFrame(list(zip(age_cat, deaths)),
columns=['age category', 'number of deaths'])
# group age categories to have only <60 old years and 60+
cat_1 = deaths_df.loc[deaths_df['age category'] == '00_59']
cat_2 = deaths_df.loc[deaths_df['age category'] == '60+']
below_60 = cat_1['number of deaths'].sum()
above_60 = cat_2['number of deaths'].sum()
lst1 = ['<60', '60+']
lst2 = [below_60, above_60]
final_deaths_age_cat = pd.DataFrame(list(zip(lst1, lst2)),
columns=['age category',
'number of deaths'])
# getting highest number of deaths for each age category
# PLOTTING A BAR PLOT OF NUMBER OF DEATHS vs AGE CATEGORY
fig = plt.figure(figsize=(12, 10))
ax = fig.add_subplot()
# Plot varibles
ax.bar(final_deaths_age_cat['age category'],
final_deaths_age_cat['number of deaths'])
# plot(date, cum_deaths)
# Style and label plot
ax.set_xlabel('Age category')
ax.set_ylabel('Number of deaths')
ax.fill_between(final_deaths_age_cat['age category'],
final_deaths_age_cat['number of deaths'],
alpha=0.3)
ax.set_title('Number of deaths per age category (England)',
fontsize=18)
at = AnchoredText(f"Number of deaths:\
\nAge <60: {below_60}\
\nAge >60: {above_60}",
prop=dict(size=16), frameon=True, loc='upper left')
# \nCumulative cases\n{cumulative[0]:,.0f}\
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
ax.annotate('Source: gov.uk https://api.coronavirus.data.gov.uk/v1/data',
xy=(0.25, 0.0175), xycoords='figure fraction',
fontsize=12, color='#555555')
plt.style.use('ggplot')
plt.show()
if save:
date = dt.now()
plt.savefig(f"casenumbers{date.strftime('%Y-%m-%d')}")
def collect_hosp_data(country='england'):
"""Collect data for hosp and vac functions.
Args:
country (str, optional): Select country data. Defaults to 'england'.
Returns:
DataFrame: data in correct format for hosp and vac functions
"""
if country == 'england':
hosp_data = CovidData("england").get_national_data()
hosp_data["date"] = hosp_data["date"].astype('datetime64[ns]')
hosp_data = hosp_data.fillna(0)
return hosp_data
else:
hosp_uk = CovidData("england").get_uk_data()
hosp_uk["date"] = hosp_uk["date"].astype('datetime64[ns]')
hosp_uk = hosp_uk.fillna(0)
return hosp_uk
def hosp_cases_plot():
"""Heatmap for the the daily number of hospital cases (England).
Args:
No args required, collects own data.
Returns :
Seaborn heatmap plot for the number of hospital cases
per day of the pandemic.
"""
hosp_data = collect_hosp_data()
hosp_cases_col = ["date", "hosp_hospitalCases"]
hosp_data1 = hosp_data.loc[:, hosp_cases_col]
hosp_data1.loc[:, ["Day"]] = hosp_data1["date"].apply(lambda x: x.day)
hosp_data1["date"] = hosp_data1.date.dt.strftime("%Y-%m")
newpivot = hosp_data1.pivot_table("hosp_hospitalCases", index="date",
columns="Day")
cmap = sns.cm.rocket_r
plt.figure(figsize=(16, 9))
hm2 = sns.heatmap(newpivot, cmap=cmap)
hm2.set_title("Heatmap of the daily number of hospital cases (England)",
fontsize=14)
hm2.set_xlabel("Day", fontsize=12)
hm2.set_ylabel("Month and Year", fontsize=12)
def hosp_newadmissions_plot():
"""Heatmap for the the daily number of new hospital admissions (England).
Args:
No args required, collects own data.
Returns :
Seaborn heatmap plot for the number of new hospital admissions per day
of the pandemic.
"""
hosp_data = collect_hosp_data()
hosp_cases_col = ["date", "hosp_newAdmissions"]
hosp_data2 = hosp_data.loc[:, hosp_cases_col]
hosp_data2["Day"] = hosp_data2.date.apply(lambda x: x.day)
hosp_data2["date"] = hosp_data2.date.dt.strftime("%Y-%m")
newpivot = hosp_data2.pivot_table("hosp_newAdmissions", index="date",
columns="Day")
cmap = sns.cm.rocket_r
plt.figure(figsize=(16, 9))
hm1 = sns.heatmap(newpivot, cmap=cmap)
hm1.set_title("Heatmap of the daily number of new hospital admissions"
+ " (England)", fontsize=14)
hm1.set_xlabel("Day", fontsize=12)
hm1.set_ylabel("Month and Year", fontsize=12)
def hosp_newadmissionschange_plot():
"""Change in hospital admissions (England).
Plot difference between the number of new hospital admissions
during the latest 7-day period and the previous non-overlapping week.
Args:
No args required, collects own data.
Returns :
Lineplot of this difference over the months.
"""
hosp_data = collect_hosp_data()
hosp_cases_col = ["date", "hosp_newAdmissionsChange"]
hosp_data3 = hosp_data.loc[:, hosp_cases_col]
x = hosp_data3["date"].dt.strftime("%Y-%m")
y = hosp_data3["hosp_newAdmissionsChange"]
fig, ax = plt.subplots(1, 1, figsize=(20, 3))
sns.lineplot(x=x, y=y, color="g")
ax.set_title("Daily new admissions change (England)", fontsize=14)
ax.invert_xaxis()
ax.set_xlabel("Date", fontsize=12)
ax.set_ylabel("New Admissions Change", fontsize=12)
def hosp_occupiedbeds_plot():
"""Plot daily number of COVID-19 patients in mechanical ventilator beds.
Plots information for England.
Args:
No args required, collects own data.
Returns :
- Lineplot of this difference over the months.
"""
hosp_data = collect_hosp_data()
hosp_cases_col = ["date", "hosp_covidOccupiedMVBeds"]
hosp_data4 = hosp_data.loc[:, hosp_cases_col]
fig, ax = plt.subplots(1, 1, figsize=(20, 3))
sns.lineplot(x=hosp_data4["date"].dt.strftime("%Y-%m"),
y=hosp_data4["hosp_covidOccupiedMVBeds"], ax=ax, color="b")
ax.set_title("Daily number of COVID occupied Mechanical Ventilator beds"
+ " (England)", fontsize=14)
ax.invert_xaxis()
ax.set_xlabel("Date", fontsize=12)
ax.set_ylabel("Number of occupied MV beds", fontsize=12)
def hosp_casesuk_plot():
"""Heatmap for the the daily number of hospital cases in UK.
Args:
No args required, collects own data.
Returns :
Seaborn heatmap plot for the number of hospital cases
per day of the pandemic.
"""
hosp_uk = collect_hosp_data(country='uk')
hosp_cases_col = ["date", "hosp_hospitalCases"]
hosp_data1 = hosp_uk.loc[:, hosp_cases_col]
hosp_data1["Day"] = hosp_data1["date"].apply(lambda x: x.day)
hosp_data1["date"] = hosp_data1.date.dt.strftime("%Y-%m")
newpivot = hosp_data1.pivot_table("hosp_hospitalCases", index="date",
columns="Day")
cmap = sns.cm.rocket_r
plt.figure(figsize=(16, 9))
hm2 = sns.heatmap(newpivot, cmap=cmap)
hm2.set_title("Heatmap of the daily number of hospital cases in the UK",
fontsize=14)
hm2.set_xlabel("Day", fontsize=12)
hm2.set_ylabel("Month and Year", fontsize=12)
def hosp_newadmissionsuk_plot():
"""Heatmap for the the daily number of new hospital admissions (UK).
Args:
No args required, collects own data.
Returns :
Seaborn heatmap plot for the number of new hospital admissions per day
of the pandemic (UK).
"""
hosp_uk = collect_hosp_data(country='uk')
hosp_cases_col = ["date", "hosp_newAdmissions"]
hosp_data2 = hosp_uk.loc[:, hosp_cases_col]
hosp_data2["Day"] = hosp_data2.date.apply(lambda x: x.day)
hosp_data2["date"] = hosp_data2.date.dt.strftime("%Y-%m")
newpivot = hosp_data2.pivot_table("hosp_newAdmissions", index="date",
columns="Day")
cmap = sns.cm.rocket_r
plt.figure(figsize=(16, 9))
hm1 = sns.heatmap(newpivot, cmap=cmap)
hm1.set_title("Heatmap of the daily number of new hospital admissions"
+ " in the UK", fontsize=14)
hm1.set_xlabel("Day", fontsize=12)
hm1.set_ylabel("Month and Year", fontsize=12)
def hosp_occupiedbedsuk_plot():
"""Plot daily number of COVID-19 patients in mechanical ventilator beds.
Plots information for UK.
Args:
No args required, collects own data.
Returns :
- Lineplot of this difference over the months.
"""
hosp_uk = collect_hosp_data(country='uk')
hosp_cases_col = ["date", "hosp_covidOccupiedMVBeds"]
hosp_data4 = hosp_uk.loc[:, hosp_cases_col]
fig, ax = plt.subplots(1, 1, figsize=(20, 3))
sns.lineplot(x=hosp_data4["date"].dt.strftime("%Y-%m"),
y=hosp_data4["hosp_covidOccupiedMVBeds"], ax=ax, color="b")
ax.set_title("Daily number of COVID occupied Mechanical Ventilator"
+ " beds in the UK", fontsize=14)
ax.invert_xaxis()
ax.set_xlabel("Date", fontsize=12)
ax.set_ylabel("Number of occupied MV beds", fontsize=12)
def vaccine_percentage(df):
"""Plot the percentage of the vaccinated population over time.
Args:
df (DataFrame): Requires data returned by get_uk_data
or get_national_data methods
Retuns:
Plot of total percentage of population vaccinated
"""
df['date'] = df['date'].astype('datetime64[ns]')
plt.figure(figsize=(14, 7))
plot1 = sns.lineplot(x='date', y='vac_total_perc', data=df)
plt.ylim(0, 100)
plot1.set_xlabel("Covid pandemic, up to date", fontsize=12)
plot1.set_ylabel("Percentage", fontsize=12)
plot1.set_title('Percentage of the vaccinated population over time',
fontsize=14)
# print(plot1)
def vaccine_doses_plot(df):
"""Pllot both the first and second doses of vaccines.
Daily information.
Args:
df (DataFrame): Requires data returned by get_national_data
Returns:
Plots of first and second vaccine doses since start of pandemic
records
"""
df['date'] = df['date'].astype('datetime64[ns]')
keep_col = ['date', 'vac_first_dose', 'vac_second_dose']
vaccines_melted = df[keep_col]
vaccines_melted = vaccines_melted.melt('date', var_name="vaccine_doses",
value_name='count')
plt.figure(figsize=(14, 7))
plot = sns.lineplot(x='date', y='count', hue='vaccine_doses',
data=vaccines_melted)
plt.grid()
plt.ylim(0, 50000000)
plot.set_ylabel("count", fontsize=12)
plot.set_xlabel("Covid pandemic, up to date", fontsize=12)
plot.set_title('daily amount of first and second doses' +
' of vaccination administered', fontsize=14)
# use hue = column to categorise the data
# print(plot)
def first_vaccination_hm(df):
"""Plot a heatmap of the first vaccine dose (daily).
Args:
df (DataFrame): Requires data returned by get_national_data
Returns:
Heatmap of first vaccine doses over time
"""
df['date'] = df['date'].astype('datetime64[ns]')
df = df.fillna(0)
keep_col_hm = ['date', 'vac_first_dose']
vaccines_hm = df.loc[:, keep_col_hm]
vaccines_hm["Day"] = vaccines_hm.date.apply(lambda x: x.strftime("%d"))
vaccines_hm.pivot_table(index="Day", columns="date",
values="vac_first_dose")
vaccines_hm.date = vaccines_hm.date.dt.strftime('%Y-%m')
keep_colu = ['date', 'Day', 'vac_first_dose']
vaccines_hm = vaccines_hm[keep_colu]
pivoted = vaccines_hm.pivot(columns='Day',
index='date',
values='vac_first_dose')
pivoted = pivoted.fillna(0)
plt.figure(figsize=(16, 9))
cmap = sns.cm.rocket_r
plot_hm1 = sns.heatmap(pivoted, cmap=cmap)
plot_hm1.set_title('heatmap of the first vaccination dose' +
' administered daily', fontsize=14)
plot_hm1.set_ylabel('Year and month', fontsize=12)
# print(plot_hm1)
def second_vaccination_hm(df):
"""Plot a heatmap of the second vaccine dose (daily).
Args:
df (DataFrame): Requires data returned by get_national_data
Returns:
Heatmap of second vaccine doses over time
"""
df['date'] = df['date'].astype('datetime64[ns]')
df = df.fillna(0)
keep_col_hm = ['date', 'vac_second_dose']
vaccines_hm = df.loc[:, keep_col_hm]
vaccines_hm["Day"] = vaccines_hm.date.apply(lambda x: x.strftime("%d"))
vaccines_hm.pivot_table(index="Day", columns="date",
values="vac_second_dose")
vaccines_hm.date = vaccines_hm.date.dt.strftime('%Y-%m')
keep_colu = ['date', 'Day', 'vac_second_dose']
vaccines_hm = vaccines_hm[keep_colu]
pivoted = vaccines_hm.pivot(columns='Day',
index='date',
values='vac_second_dose')
pivoted = pivoted.fillna(0)
plt.figure(figsize=(16, 9))
cmap = sns.cm.rocket_r
plot_hm2 = sns.heatmap(pivoted, cmap=cmap)
plot_hm2.set_title('heatmap of the second vaccination dose' +
' administered daily', fontsize=14)
plot_hm2.set_ylabel('Year and month', fontsize=12)
# print(plot_hm2)
def vaccines_across_regions(vaccines2):
"""Plot graph of the vaccination uptake percentage by English regions.
Args:
vaccines2 (DataFrame): data from get_regional_data required
Returns:
plot of vaccine uptake by regions in England
"""
keep_fd = ['date', 'name', 'vac_firstDose']
vaccines2['date'] = vaccines2['date'].astype('datetime64[ns]')
vaccines_fd = vaccines2.loc[:, keep_fd]
vaccines_fd.fillna(0, inplace=True)
vaccines_fd
plt.figure(figsize=(16, 9))
plot_fd = sns.lineplot(x='date', y='vac_firstDose', hue='name',
data=vaccines_fd)
plt.ylim(0, 100)
plt.grid()
plot_fd.set_ylabel("percentage", fontsize=12)
plot_fd.set_xlabel("Covid pandemic, up to date", fontsize=12)
plot_fd.set_title('Vaccination uptake by region', fontsize=14)
# print(plot_fd)
| 40.647826 | 79 | 0.58571 |
c2b24d9435ba2ee5e8600b5092a512734c93405a | 12,969 | py | Python | bkt/library/comrelease.py | pyro-team/bkt-toolbox | bbccba142a81ca0a46056f2bcda75899979158a5 | [
"MIT"
] | 12 | 2019-05-31T02:57:26.000Z | 2022-03-26T09:40:50.000Z | bkt/library/comrelease.py | mrflory/bkt-toolbox | bbccba142a81ca0a46056f2bcda75899979158a5 | [
"MIT"
] | 27 | 2021-11-27T16:33:19.000Z | 2022-03-27T17:47:26.000Z | bkt/library/comrelease.py | pyro-team/bkt-toolbox | bbccba142a81ca0a46056f2bcda75899979158a5 | [
"MIT"
] | 3 | 2019-06-12T10:59:20.000Z | 2020-04-21T15:13:50.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
from contextlib import contextmanager
from System.Runtime.InteropServices import Marshal
#separte logger for comrelease to avoid spamming of log file
logger = logging.getLogger().getChild("comrelease")
logger.setLevel(logging.INFO) #comment out this line for comrelease debugging
#FIXME: log comrelease in separate file?
| 36.532394 | 166 | 0.63806 |
c2b28cc65d1bcc4d30f8c76abcb566d6199f508e | 389 | py | Python | db/drop_db.py | muellerzr/capstone-2021 | a7f0c4de902735aece018d7c2ffedccc1995d51a | [
"Apache-2.0"
] | null | null | null | db/drop_db.py | muellerzr/capstone-2021 | a7f0c4de902735aece018d7c2ffedccc1995d51a | [
"Apache-2.0"
] | 1 | 2021-11-30T00:03:22.000Z | 2021-11-30T00:03:22.000Z | db/drop_db.py | muellerzr/capstone-2021 | a7f0c4de902735aece018d7c2ffedccc1995d51a | [
"Apache-2.0"
] | null | null | null | from pymongo import MongoClient
client = MongoClient('mongodb+srv://<username>:<password>@cluster0.27gwi.mongodb.net/Cluster0?retryWrites=true&w=majority')
username = ""
password = ""
url = f'mongodb+srv://{username}:{password}@cluster0.27gwi.mongodb.net/Cluster0?retryWrites=true&w=majority'
client = MongoClient(url)
# db = client.business
db = client.credentials
db.credentials.drop() | 35.363636 | 123 | 0.768638 |
c2b463e3b92836e2fb5a6f0fa7a7587ea2477928 | 750 | py | Python | advanced/image_processing/examples/plot_blur.py | rossbar/scipy-lecture-notes | 7f74e6925721c43bd81bf0bee34b4805ac4a3b57 | [
"CC-BY-4.0"
] | 2,538 | 2015-01-01T04:58:41.000Z | 2022-03-31T21:06:05.000Z | advanced/image_processing/examples/plot_blur.py | rossbar/scipy-lecture-notes | 7f74e6925721c43bd81bf0bee34b4805ac4a3b57 | [
"CC-BY-4.0"
] | 362 | 2015-01-18T14:16:23.000Z | 2021-11-18T16:24:34.000Z | advanced/image_processing/examples/plot_blur.py | rossbar/scipy-lecture-notes | 7f74e6925721c43bd81bf0bee34b4805ac4a3b57 | [
"CC-BY-4.0"
] | 1,127 | 2015-01-05T14:39:29.000Z | 2022-03-25T08:38:39.000Z | """
Blurring of images
===================
An example showing various processes that blur an image.
"""
import scipy.misc
from scipy import ndimage
import matplotlib.pyplot as plt
face = scipy.misc.face(gray=True)
blurred_face = ndimage.gaussian_filter(face, sigma=3)
very_blurred = ndimage.gaussian_filter(face, sigma=5)
local_mean = ndimage.uniform_filter(face, size=11)
plt.figure(figsize=(9, 3))
plt.subplot(131)
plt.imshow(blurred_face, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(132)
plt.imshow(very_blurred, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(133)
plt.imshow(local_mean, cmap=plt.cm.gray)
plt.axis('off')
plt.subplots_adjust(wspace=0, hspace=0., top=0.99, bottom=0.01,
left=0.01, right=0.99)
plt.show()
| 23.4375 | 63 | 0.716 |
c2b55d621ee927360546ea50eef9438d938401b2 | 2,845 | py | Python | wmcore_base/ContainerScripts/AggregatePylint.py | ddaina/Docker | 29e330fcbe774cdd0c05b597792c7c5f0e430e67 | [
"Apache-2.0"
] | null | null | null | wmcore_base/ContainerScripts/AggregatePylint.py | ddaina/Docker | 29e330fcbe774cdd0c05b597792c7c5f0e430e67 | [
"Apache-2.0"
] | 18 | 2016-12-02T19:56:53.000Z | 2022-02-04T13:21:24.000Z | wmcore_base/ContainerScripts/AggregatePylint.py | ddaina/Docker | 29e330fcbe774cdd0c05b597792c7c5f0e430e67 | [
"Apache-2.0"
] | 7 | 2016-06-03T18:32:26.000Z | 2021-11-05T21:04:19.000Z | #! /usr/bin/env python
import json
from optparse import OptionParser
usage = "usage: %prog [options] message"
parser = OptionParser(usage)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("You must supply a label")
label = args[0]
try:
with open('pylintReport.json', 'r') as reportFile:
report = json.load(reportFile)
except IOError:
report = {}
warnings = 0
errors = 0
comments = 0
refactors = 0
score = 0
with open('pylint.out', 'r') as pylintFile:
for line in pylintFile:
if line.startswith('Your code has been rated at '):
scorePart = line.strip('Your code has been rated at ')
score = scorePart.split('/')[0]
try:
if not filename in report:
report[filename] = {}
if not label in report[filename]:
report[filename][label] = {}
if filename and label:
report[filename][label]['score'] = score
except NameError:
print "Score of %s found, but no filename" % score
parts = line.split(':')
if len(parts) != 3:
continue
try:
newFilename, lineNumber, rawMessage = parts
newFilename = newFilename.strip()
if not newFilename: # Don't update filename if we didn't find one
continue
lineNumber = int(lineNumber)
filename = newFilename
rmParts = rawMessage.split(']', 1)
rawCode = rmParts[0].strip()
message = rmParts[1].strip()
severity = rawCode[1:2]
code = rawCode[2:6]
shortMsg = rawCode[7:]
msgParts = shortMsg.split(',')
objectName = msgParts[1].strip()
if severity == 'R':
refactors += 1
elif severity == 'W':
warnings += 1
elif severity == 'E':
errors += 1
elif severity == 'C':
comments += 1
if not filename in report:
report[filename] = {}
if not label in report[filename]:
report[filename][label] = {}
if not 'events' in report[filename][label]:
report[filename][label]['events'] = []
report[filename][label]['events'].append((lineNumber, severity, code, objectName, message))
report[filename][label]['refactors'] = refactors
report[filename][label]['warnings'] = warnings
report[filename][label]['errors'] = errors
report[filename][label]['comments'] = comments
except ValueError:
continue
with open('pylintReport.json', 'w') as reportFile:
json.dump(report, reportFile, indent=2)
reportFile.write('\n')
| 29.635417 | 103 | 0.53638 |
c2b5fc27d0f81bb0fc04c52397a1a93060a0b15c | 71 | py | Python | tests/__init__.py | lesleslie/jinja-inflection | a20c248a897aa95b38e860ecaee1517c3a5958fc | [
"BSD-3-Clause"
] | 1 | 2019-09-14T06:50:38.000Z | 2019-09-14T06:50:38.000Z | tests/__init__.py | lesleslie/jinja-inflection | a20c248a897aa95b38e860ecaee1517c3a5958fc | [
"BSD-3-Clause"
] | null | null | null | tests/__init__.py | lesleslie/jinja-inflection | a20c248a897aa95b38e860ecaee1517c3a5958fc | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Unit test package for jinja_inflection."""
| 17.75 | 45 | 0.619718 |
c2b8cf5ed62085b93846cc634a5c0abe566a9d50 | 4,376 | py | Python | smartsnippets_inherit/cms_plugins.py | pbs/django-cms-smartsnippets | 61727dbdf44678ebd7df3fbeca8e7e190e364cc8 | [
"BSD-3-Clause"
] | 5 | 2015-08-06T14:47:00.000Z | 2021-02-17T19:18:27.000Z | smartsnippets_inherit/cms_plugins.py | pbs/django-cms-smartsnippets | 61727dbdf44678ebd7df3fbeca8e7e190e364cc8 | [
"BSD-3-Clause"
] | 11 | 2015-03-10T23:16:40.000Z | 2018-07-01T22:44:55.000Z | smartsnippets_inherit/cms_plugins.py | pbs/django-cms-smartsnippets | 61727dbdf44678ebd7df3fbeca8e7e190e364cc8 | [
"BSD-3-Clause"
] | 5 | 2015-06-04T17:35:34.000Z | 2018-02-08T15:43:59.000Z | from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.plugins.utils import downcast_plugins
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from smartsnippets_inherit.models import InheritPageContent
from smartsnippets_inherit.forms import InheritPageForm
from smartsnippets_inherit.settings import USE_BOOTSTRAP_ACE
from smartsnippets.settings import inherit_variable_pattern
from smartsnippets.models import Variable, SmartSnippetPointer
from contextlib import contextmanager
from itertools import chain
plugin_pool.register_plugin(PageInheritPlugin)
| 37.084746 | 87 | 0.662934 |
c2b9340069bcb7200131d508a532aae97e280c02 | 1,389 | py | Python | drf_orjson_renderer/parsers.py | cblakkan/drf_orjson_renderer | 15fcfb65918d16cca087095216847594929f61c4 | [
"MIT"
] | 28 | 2020-01-22T05:57:49.000Z | 2022-03-17T09:07:44.000Z | drf_orjson_renderer/parsers.py | cblakkan/drf_orjson_renderer | 15fcfb65918d16cca087095216847594929f61c4 | [
"MIT"
] | 14 | 2020-02-18T16:17:34.000Z | 2022-03-23T01:07:35.000Z | drf_orjson_renderer/parsers.py | cblakkan/drf_orjson_renderer | 15fcfb65918d16cca087095216847594929f61c4 | [
"MIT"
] | 17 | 2020-02-17T22:31:28.000Z | 2022-03-10T04:48:10.000Z | from django.conf import settings
from rest_framework.exceptions import ParseError
from rest_framework.parsers import BaseParser
import orjson
__all__ = ["ORJSONParser"]
| 33.878049 | 81 | 0.662347 |
c2b97e17283bf6ef0af93d080c8e954cf0f1c1c7 | 1,942 | py | Python | testproject/test/test_templatetag.py | tdivis/django-sane-testing | 99dc7200593a7a59ffa33edb906d52acc7d8f577 | [
"BSD-3-Clause"
] | 4 | 2015-11-08T11:33:19.000Z | 2018-01-29T22:34:24.000Z | testproject/test/test_templatetag.py | tdivis/django-sane-testing | 99dc7200593a7a59ffa33edb906d52acc7d8f577 | [
"BSD-3-Clause"
] | 1 | 2021-03-19T11:04:29.000Z | 2021-03-19T11:38:52.000Z | testproject/test/test_templatetag.py | Almad/django-sane-testing | 99dc7200593a7a59ffa33edb906d52acc7d8f577 | [
"BSD-3-Clause"
] | null | null | null | from djangosanetesting.cases import TemplateTagTestCase
| 36.641509 | 81 | 0.634398 |
c2baf8408ea1139bafc4f15533339ffc776824ab | 680 | wsgi | Python | DistFiles/mercurial/Contrib/hgweb.wsgi | bobeaton/OneStoryEditor | dcb644c79a4d69b9558df72892636bb1cba97796 | [
"MIT"
] | 1 | 2021-06-08T11:53:32.000Z | 2021-06-08T11:53:32.000Z | DistFiles/mercurial/Contrib/hgweb.wsgi | bobeaton/OneStoryEditor | dcb644c79a4d69b9558df72892636bb1cba97796 | [
"MIT"
] | 4 | 2021-06-12T16:50:59.000Z | 2021-11-19T23:52:24.000Z | ChorusDeps/mercurial/Contrib/hgweb.wsgi | bobeaton/OneStoryEditor | dcb644c79a4d69b9558df72892636bb1cba97796 | [
"MIT"
] | 2 | 2020-05-03T07:23:12.000Z | 2021-07-14T15:58:17.000Z | # An example WSGI for use with mod_wsgi, edit as necessary
# See http://mercurial.selenic.com/wiki/modwsgi for more information
# Path to repo or hgweb config to serve (see 'hg help hgweb')
config = "/path/to/repo/or/config"
# Uncomment and adjust if Mercurial is not installed system-wide
# (consult "installed modules" path from 'hg debuginstall'):
#import sys; sys.path.insert(0, "/path/to/python/lib")
# Uncomment to send python tracebacks to the browser if an error occurs:
#import cgitb; cgitb.enable()
# enable demandloading to reduce startup time
from mercurial import demandimport; demandimport.enable()
from mercurial.hgweb import hgweb
application = hgweb(config)
| 35.789474 | 72 | 0.769118 |
c2bbc6212ba14cce222e1171cae69fdb2905ea98 | 727 | py | Python | uploadHelpers.py | BNUZ-China/iGem-Wiki | 18216737bbd1d5316e5302ff7202a9fa139ad033 | [
"MIT"
] | 1 | 2021-08-28T15:06:10.000Z | 2021-08-28T15:06:10.000Z | uploadHelpers.py | BNUZ-China/iGem-Wiki | 18216737bbd1d5316e5302ff7202a9fa139ad033 | [
"MIT"
] | null | null | null | uploadHelpers.py | BNUZ-China/iGem-Wiki | 18216737bbd1d5316e5302ff7202a9fa139ad033 | [
"MIT"
] | null | null | null | import os
from subprocess import run
import pyperclip
import webbrowser
from urllib import parse
location = 'production'
runOnSingleFolder('js')
runOnSingleFolder('css')
| 29.08 | 129 | 0.672627 |
c2bc8e02528d8ad4917cf1b72be4033e672be9ac | 31 | py | Python | model/mscff/__init__.py | LK-Peng/CNN-based-Cloud-Detection-Methods | 1393a6886e62f1ed5a612d57c5a725c763a6b2cc | [
"MIT"
] | 2 | 2022-02-16T03:30:19.000Z | 2022-03-18T08:02:39.000Z | model/mscff/__init__.py | LK-Peng/CNN-based-Cloud-Detection-Methods | 1393a6886e62f1ed5a612d57c5a725c763a6b2cc | [
"MIT"
] | null | null | null | model/mscff/__init__.py | LK-Peng/CNN-based-Cloud-Detection-Methods | 1393a6886e62f1ed5a612d57c5a725c763a6b2cc | [
"MIT"
] | 1 | 2022-02-16T03:30:20.000Z | 2022-02-16T03:30:20.000Z | from .mscff_model import MSCFF
| 15.5 | 30 | 0.83871 |
c2bd7d19cb0b1997605bb2bf0b20e39d01a29860 | 96 | py | Python | netensorflow/ann/macro_layer/layer_structure/__init__.py | psigelo/NeTensorflow | ec8bc09cc98346484d1b682a3dfd25c68c4ded61 | [
"MIT"
] | null | null | null | netensorflow/ann/macro_layer/layer_structure/__init__.py | psigelo/NeTensorflow | ec8bc09cc98346484d1b682a3dfd25c68c4ded61 | [
"MIT"
] | null | null | null | netensorflow/ann/macro_layer/layer_structure/__init__.py | psigelo/NeTensorflow | ec8bc09cc98346484d1b682a3dfd25c68c4ded61 | [
"MIT"
] | null | null | null | from .InputLayerStructure import InputLayerStructure
from .LayerStructure import LayerStructure
| 32 | 52 | 0.895833 |
c2bd92ea5b65d1f42b8e2aa98a412fc4debb102e | 1,180 | py | Python | Snake.py | ZippyCodeYT/Zippy_Codes | 91101085194ba2f30c74a82639b4730d52bb76dc | [
"CC-BY-4.0"
] | 64 | 2021-07-11T17:56:42.000Z | 2022-03-28T14:17:53.000Z | Snake.py | ZippyCodeYT/Zippy_Codes | 91101085194ba2f30c74a82639b4730d52bb76dc | [
"CC-BY-4.0"
] | 9 | 2021-07-10T23:26:39.000Z | 2022-03-04T17:39:57.000Z | Snake.py | ZippyCodeYT/Ursina_Codes | 91101085194ba2f30c74a82639b4730d52bb76dc | [
"CC-BY-4.0"
] | 57 | 2021-07-14T17:09:46.000Z | 2022-03-31T08:55:51.000Z |
from ursina import *
app = Ursina()
snake = Entity(model='cube', texture = 'assets\snake', scale=0.4, z=-1, collider='box')
ground = Entity(model='cube', texture='grass',rotation=(90,0,0),scale=(5,1,5), z=1)
apple = Entity(model='cube', texture='assets\\apple', scale=0.4, position=(1,-1,-1), collider='mesh')
body = [Entity(model='cube', scale =0.2, texture='assets\\body') for i in range(14)]
camera.orthographic = True
camera.fov = 8
from random import randint
dx = dy = 0
app.run()
| 16.857143 | 101 | 0.572881 |
c2bdcfb6eaabd65b263df02b6c6aceda6e9c5099 | 3,153 | py | Python | test/cp_request/test_entity.py | aquariumbio/experiment-request | 026e3eb767c47f980a35004e9ded5e4e33553693 | [
"MIT"
] | null | null | null | test/cp_request/test_entity.py | aquariumbio/experiment-request | 026e3eb767c47f980a35004e9ded5e4e33553693 | [
"MIT"
] | null | null | null | test/cp_request/test_entity.py | aquariumbio/experiment-request | 026e3eb767c47f980a35004e9ded5e4e33553693 | [
"MIT"
] | null | null | null | import json
from cp_request import Attribute, NamedEntity, Unit, Value
from cp_request.named_entity import NamedEntityEncoder, NamedEntityDecoder
| 35.829545 | 214 | 0.567396 |
c2bf3d5dd42932c24559dabc8a1b555f111001f2 | 4,282 | py | Python | model/loss_and_metric/loss_util.py | goodgodgd/vode-2020 | 98e34120d642780576ac51d57c2f0597e7e1e524 | [
"BSD-2-Clause"
] | 4 | 2020-08-15T02:14:03.000Z | 2021-01-30T08:18:18.000Z | model/loss_and_metric/loss_util.py | goodgodgd/vode-2020 | 98e34120d642780576ac51d57c2f0597e7e1e524 | [
"BSD-2-Clause"
] | 23 | 2020-01-24T07:25:40.000Z | 2021-06-02T00:50:32.000Z | model/loss_and_metric/loss_util.py | goodgodgd/vode-2020 | 98e34120d642780576ac51d57c2f0597e7e1e524 | [
"BSD-2-Clause"
] | 1 | 2020-07-02T12:26:45.000Z | 2020-07-02T12:26:45.000Z | import tensorflow as tf
from utils.decorators import shape_check
| 44.14433 | 101 | 0.685427 |
c2bf82786883e88ae221354f9ad562aa51a42fc8 | 23,127 | py | Python | my_version/craw_page_parse_2.py | xuerenlv/PaperWork | f096b57a80e8d771f080a02b925a22edbbee722a | [
"Apache-2.0"
] | 1 | 2015-10-15T12:26:07.000Z | 2015-10-15T12:26:07.000Z | my_version/craw_page_parse_2.py | xuerenlv/PaperWork | f096b57a80e8d771f080a02b925a22edbbee722a | [
"Apache-2.0"
] | null | null | null | my_version/craw_page_parse_2.py | xuerenlv/PaperWork | f096b57a80e8d771f080a02b925a22edbbee722a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Created on 2015-08-21
@author: xhj
'''
import requests
import StringIO
import gzip
import threading
from loginer import Loginer
import time
from my_log import WeiboSearchLog
import os
import traceback
from bs4 import BeautifulSoup
import re
from Queue import Queue
import datetime
from store_model import Single_weibo_store, UserInfo, UserInfo_store, \
UserInfo_loc, UserInfo_loc_store, Bie_Ming_store, \
UserInfo_for_regester_time_store, UserInfo_for_regester_time
from mongoengine.errors import NotUniqueError
import random
from craw_page_parse import Crawler_with_proxy, crawl_set_time_with_keyword
import sys
from urllib import quote, quote_plus
from mongoengine.queryset.visitor import Q
import json
reload(sys)
sys.setdefaultencoding('utf8')
# # nickname uid
# # uid_or_uname
# # uid_or_uname ()
# uid
#
############################################ ###########################################################
# http://weibo.cn/1806760610/info
def page_parser_from_search_for_UserInfoLoc(page, url):
bs_all = BeautifulSoup(page)
div_all = bs_all.findAll('div', attrs={'class':'c'})
nickname = ""
location = ""
sex = ""
birth = ""
intro = ""
check_or_not = u''
check_info = ""
op_uid = url[url.find('.cn'):]
uid = op_uid[op_uid.find('/') + 1:op_uid.rfind('/')]
for div in div_all:
for str_in in str(div.getText(u'\n')).split(u'\n'):
en_str = str_in.encode('utf-8')
if(en_str.startswith(u"")):
nickname = en_str[en_str.find(':') + 1:]
elif(en_str.startswith(u"")):
location = en_str[en_str.find(':') + 1:]
elif(en_str.startswith(u"")):
sex = en_str[en_str.find(':') + 1:]
elif(en_str.startswith(u"")):
birth = en_str[en_str.find(':') + 1:]
elif(en_str.startswith(u"")):
intro = en_str[en_str.find(':') + 1:]
elif(en_str.startswith(u"")):
check_or_not = u''
check_info = en_str
return UserInfo_loc(uid, nickname, location, sex, birth, intro, check_or_not, check_info)
pass
# http://weibo.cn/1730330447?f=search_0
# http://weibo.cn/breakingnews?f=search_0
# UserInfo
# # uid
# # http://weibo.com/1802646764/info
| 37.727569 | 293 | 0.589657 |
c2c2bc98c89407c449beca19dbbedcbb96369738 | 246 | py | Python | sureflap/resources/request_models.py | fabieu/sureflap-api | 711bb32a7add64367fa3e15b25d52468f8aa7904 | [
"Apache-2.0"
] | 1 | 2020-12-03T16:43:55.000Z | 2020-12-03T16:43:55.000Z | sureflap/resources/request_models.py | fabieu/sureflap-api | 711bb32a7add64367fa3e15b25d52468f8aa7904 | [
"Apache-2.0"
] | 3 | 2021-07-14T21:41:53.000Z | 2022-01-29T16:56:21.000Z | sureflap/resources/request_models.py | fabieu/sureflap-api | 711bb32a7add64367fa3e15b25d52468f8aa7904 | [
"Apache-2.0"
] | 2 | 2021-02-13T12:11:22.000Z | 2021-02-14T09:58:40.000Z | from datetime import datetime, time
from enum import Enum
from typing import Optional, Sequence, Union
from pydantic import BaseModel
| 16.4 | 44 | 0.756098 |
c2c2f2be9b86caf1ba37fe85783e830ab1aa9049 | 1,303 | py | Python | trecrts-clients/python/dumb-retweet-client/retweet_service.py | rosequ/RTS18 | 9a9b63c5d454e03dc996d56cb9e4b3e35e413f4d | [
"Apache-2.0"
] | 7 | 2016-03-02T15:39:09.000Z | 2016-04-04T10:31:40.000Z | trecrts-clients/python/dumb-retweet-client/retweet_service.py | rosequ/RTS18 | 9a9b63c5d454e03dc996d56cb9e4b3e35e413f4d | [
"Apache-2.0"
] | 14 | 2015-10-22T18:51:17.000Z | 2015-11-15T06:36:33.000Z | trecrts-clients/python/dumb-retweet-client/retweet_service.py | aroegies/trecrts-tools | 1afe7a4226e59ad963419d5f96401a191bbc0112 | [
"Apache-2.0"
] | null | null | null | ##########################
####
# WARNING: THIS FILE IS DEPRECATED AND IS ONLY RETAINED FOR INFORMATIONAL PURPOSES
# ../dumb_topic_client is the up-to-date sample program
###
#########################
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import requests
cred_file = "oauth_tokens.txt"
seen_tweets = set()
if __name__ == '__main__':
oauth = json.load(open('oauth_tokens.txt'))
listener = RetweetListener()
auth = OAuthHandler(oauth["consumer_key"],oauth["consumer_secret"])
auth.set_access_token(oauth["access_token"],oauth["access_token_secret"])
stream = Stream(auth,listener)
stream.sample(languages=['en'])
| 31.780488 | 82 | 0.711435 |
c2c31ca71ec1d801042e3c41eac4e04e937da0de | 11,186 | py | Python | instance_selection/_DROP3.py | dpr1005/Semisupervised-learning-and-instance-selection-methods | 646d9e729c85322e859928e71a3241f2aec6d93d | [
"MIT"
] | 3 | 2021-12-10T09:04:18.000Z | 2022-01-22T15:03:19.000Z | instance_selection/_DROP3.py | dpr1005/Semisupervised-learning-and-instance-selection-methods | 646d9e729c85322e859928e71a3241f2aec6d93d | [
"MIT"
] | 107 | 2021-12-02T07:43:11.000Z | 2022-03-31T11:02:46.000Z | instance_selection/_DROP3.py | dpr1005/Semisupervised-learning-and-instance-selection-methods | 646d9e729c85322e859928e71a3241f2aec6d93d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Filename: DROP3.py
# @Author: Daniel Puente Ramrez
# @Time: 31/12/21 16:00
# @Version: 5.0
import copy
from sys import maxsize
import numpy as np
import pandas as pd
from sklearn.neighbors import NearestNeighbors
from .utils import transform
| 39.111888 | 85 | 0.614339 |
c2c32defe3ff1b1dc5ec25d1e122b83a9bcc81b7 | 485 | py | Python | orchestra/contrib/issues/apps.py | RubenPX/django-orchestra | 5ab4779e1ae12ec99569d682601b7810587ed381 | [
"Unlicense"
] | 68 | 2015-02-09T10:28:44.000Z | 2022-03-12T11:08:36.000Z | orchestra/contrib/issues/apps.py | RubenPX/django-orchestra | 5ab4779e1ae12ec99569d682601b7810587ed381 | [
"Unlicense"
] | 17 | 2015-05-01T18:10:03.000Z | 2021-03-19T21:52:55.000Z | orchestra/contrib/issues/apps.py | RubenPX/django-orchestra | 5ab4779e1ae12ec99569d682601b7810587ed381 | [
"Unlicense"
] | 29 | 2015-03-31T04:51:03.000Z | 2022-02-17T02:58:50.000Z | from django.apps import AppConfig
from orchestra.core import accounts, administration
from orchestra.core.translations import ModelTranslation
| 30.3125 | 59 | 0.734021 |
c2c383eca130058829926eb64535622bb27a0128 | 178 | py | Python | python-data-analysis/matplotlib/ImshowDemo.py | meteor1993/python-learning | 4ee574c9360caf6e63bb6ee2ef31fa6a9918fa40 | [
"MIT"
] | 83 | 2019-10-15T06:54:06.000Z | 2022-03-28T14:08:21.000Z | python-data-analysis/matplotlib/ImshowDemo.py | wenxuefeng3930/python-learning | 4ee574c9360caf6e63bb6ee2ef31fa6a9918fa40 | [
"MIT"
] | 1 | 2020-04-16T08:13:19.000Z | 2020-07-14T01:52:46.000Z | python-data-analysis/matplotlib/ImshowDemo.py | wenxuefeng3930/python-learning | 4ee574c9360caf6e63bb6ee2ef31fa6a9918fa40 | [
"MIT"
] | 74 | 2019-11-02T08:10:36.000Z | 2022-02-19T12:23:36.000Z | import numpy as np
import matplotlib.pyplot as plt
x = np.random.rand(10, 10)
plt.imshow(x, cmap=plt.cm.hot)
#
plt.colorbar()
plt.savefig('imshow_demo.png')
| 16.181818 | 32 | 0.691011 |
c2c712a00d07acb813d9c64e92dbe982f58abfc3 | 942 | py | Python | tests/core/exceptions/test_exceptions_auto.py | ccrvs/attack_surface_pypy | f2bc9998cf42f4764f1c495e6243d970e01bd176 | [
"CC0-1.0"
] | null | null | null | tests/core/exceptions/test_exceptions_auto.py | ccrvs/attack_surface_pypy | f2bc9998cf42f4764f1c495e6243d970e01bd176 | [
"CC0-1.0"
] | null | null | null | tests/core/exceptions/test_exceptions_auto.py | ccrvs/attack_surface_pypy | f2bc9998cf42f4764f1c495e6243d970e01bd176 | [
"CC0-1.0"
] | null | null | null | # This test code was written by the `hypothesis.extra.ghostwriter` module
# and is provided under the Creative Commons Zero public domain dedication.
from pathlib import Path
from hypothesis import given, strategies as st
import attack_surface_pypy.core.exceptions
| 33.642857 | 87 | 0.814225 |
c2c78be72ea72b242adb4ca29ed829fd6b4d5b20 | 1,445 | py | Python | set4/challenge27.py | solfer/cryptopals_python | 6b22981a663b3dd2ef5fb5c30b1a6dc13eb0af1a | [
"MIT"
] | null | null | null | set4/challenge27.py | solfer/cryptopals_python | 6b22981a663b3dd2ef5fb5c30b1a6dc13eb0af1a | [
"MIT"
] | null | null | null | set4/challenge27.py | solfer/cryptopals_python | 6b22981a663b3dd2ef5fb5c30b1a6dc13eb0af1a | [
"MIT"
] | null | null | null | #! /usr/bin/python3
from Crypto.Cipher import AES
from random import randint
# https://www.cryptopals.com/sets/4/challenges/27
# Recover the key from CBC with IV=Key
import sys
sys.path.append('..')
from cryptopals import ctr,xor,random_aes_key,cbc_decrypt,cbc_encrypt
main()
| 21.567164 | 69 | 0.632526 |
c2c80dfda0a5984d9ce2a209c4604c7a22beaa47 | 577 | wsgi | Python | testproject/testproject.wsgi | c4mb0t/django-setman | 6551e3f6367bf8ee7c8f91e893c9e8439428f28a | [
"BSD-3-Clause"
] | 1 | 2015-05-30T15:05:14.000Z | 2015-05-30T15:05:14.000Z | testproject/testproject.wsgi | c4mb0t/django-setman | 6551e3f6367bf8ee7c8f91e893c9e8439428f28a | [
"BSD-3-Clause"
] | null | null | null | testproject/testproject.wsgi | c4mb0t/django-setman | 6551e3f6367bf8ee7c8f91e893c9e8439428f28a | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
DIRNAME = os.path.abspath(os.path.dirname(__file__))
rel = lambda *x: os.path.abspath(os.path.join(DIRNAME, *x))
PROJECT_DIR = rel('..')
activate_this = rel('env', 'bin', 'activate_this.py')
# Activate virtualenv
execfile(activate_this, {'__file__': activate_this})
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
os.environ['PYTHON_EGG_CACHE'] = '/srv/python_eggs/'
# Need to add upper-level dir to syspath to reproduce dev Django environ
sys.path.append(PROJECT_DIR)
from django.core.handlers.wsgi import WSGIHandler
application = WSGIHandler()
| 26.227273 | 72 | 0.753899 |
c2caaf55603ef2c7129fc78578663a36d8c83697 | 8,057 | py | Python | ntp/modules/generate.py | Michiel29/ntp-release | 567bf1ca823eeef5eeb2d63bbe16023ea63af766 | [
"Apache-2.0"
] | 3 | 2019-07-03T11:25:12.000Z | 2019-11-28T20:24:03.000Z | ntp/modules/generate.py | Michiel29/ntp-release | 567bf1ca823eeef5eeb2d63bbe16023ea63af766 | [
"Apache-2.0"
] | null | null | null | ntp/modules/generate.py | Michiel29/ntp-release | 567bf1ca823eeef5eeb2d63bbe16023ea63af766 | [
"Apache-2.0"
] | null | null | null | """Functions for generating random data with injected relationships"""
from itertools import product
import os
import json
import re
import random
import numpy as np
from numpy import random as rd
from scipy.special import comb
from ntp.util.util_kb import load_from_list
def gen_relationships(n_pred, n_rel, body_predicates=1):
"""
Generates random relationships between predicates of the form goal predicate <-- {set of body predicates}.
Goal predicates have a higher number than body predicates.
Args:
n_pred: number of total predicates
n_rel: number of relationships
body_predicates: number of body predicates for each relationship
Returns:
Dict, entries where keys are goal predicates and values are list of body predicates
"""
relationship_dict = {}
n_rel_possible = comb(n_pred, body_predicates + 1)
pred_probs = [comb(i, body_predicates)/n_rel_possible for i in range(n_pred)]
relationship_head_array = list(rd.choice(n_pred, size=n_rel, replace=False, p=pred_probs))
relationship_body_array = [set(rd.choice(range(relationship_head_array[i]), size=body_predicates, replace=False)) for i in range(len(relationship_head_array))]
for i in range(n_rel):
relationship_dict[relationship_head_array[i]] = relationship_body_array[i]
return relationship_dict
def gen_simple(n_pred, relationship_dict, p_normal, p_relationship, n_constants, order=1):
"""
Generates random truth values for predicates for a set number of constants, and given some relationships
Args:
n_pred: number of total predicates
relationship_dict: Dict of relationships
p_normal: probability of predicate truth given no relationship/relationship body not true
p_relationship: probability of goal predicate truth given body predicate truth
n_constants: number of constants
order: order of predicate (unary, binary)
Returns:
Numpy array where value j, i corresponds to the truth value of predicate i for constant j
"""
# Checks whether body predicates for a particular relationship hold for a particular constant
data = np.zeros([n_constants] * order + [n_pred])
for predicate in range(n_pred):
for index in product(*[range(n_constants) for i in range(order)]):
if predicate in relationship_dict:
if body_holds(data, relationship_dict[predicate], index):
data[index + (predicate,)] = rd.binomial(1, p_relationship)
continue
# Set variable normally if predicate from relationship doesn't hold
data[index + (predicate,)] = rd.binomial(1, p_normal)
return data
def write_data(data):
"""Convert numpy array of data into list of strings that the ntp algorithm can read"""
shape = np.shape(data)
text_list = []
for pred in range(shape[-1]):
for index in product(*[range(dim_size) for dim_size in shape[:-1]]):
if data[index + (pred,)] == 1:
write_string = "Predicate" + str(pred) + "("
for const in index:
write_string += "Constant" + str(const) + ","
write_string = write_string[:-1] + ").\n"
text_list.append(write_string)
return text_list
def write_relationships(relationships, path):
"""write relationship dict to file"""
with open(path, "w") as f:
json.dump(relationships, f)
return
def write_simple_templates(n_rules, body_predicates=1, order=1):
"""Generate rule template of form C < A ^ B of varying size and order"""
text_list = []
const_term = "("
for i in range(order):
const_term += chr(ord('X') + i) + ","
const_term = const_term[:-1] + ")"
write_string = "{0} #1{1} :- #2{1}".format(n_rules, const_term)
if body_predicates > 1:
for i in range(body_predicates - 1):
write_string += ", #" + str(i + 3) + const_term
text_list.append(write_string)
return text_list
def gen_transitivity(n_preds, n_rules, n_constants, p_base, max_iterations=1):
"""Generate data with transitivity relationships, and also rule templates"""
# active predicate is predicate 0 WLOG
active_values = np.random.binomial(1, p_base, size=[n_constants, n_constants])
edges = [(i, j) for i in range(n_constants) for j in range(n_constants) if active_values[i, j] == 1]
closure = set(edges)
while True:
new_edges = set((x,w) for x,y in closure for q,w in closure if q == y)
closure_until_now = closure | new_edges
if closure_until_now == closure:
break
closure = closure_until_now
edges = list(closure)
active_values[tuple(np.transpose(edges))] = 1
values = np.random.binomial(1, p_base, size=[n_constants, n_constants, n_preds])
values[:, :, 0] = active_values
fact_list = write_data(values)
template = "{0} #1(X, Z) :- #1(X, Y), #1(Y, Z).".format(n_rules)
return fact_list, template
def text_to_id(fact):
"""Given a fact in text form, convert to predicate and constant numbers"""
reduced = re.sub("[^0-9\(,]", '', fact)
reduced_split = tuple(re.split("[\(,]", reduced))
predicate = int(reduced_split[0])
constants = tuple([int(constant_text) for constant_text in reduced_split[1:]])
return predicate, constants
def gen_constant_dict(train_list):
"""Convert list of facts in text form to a dictionary of predicate truth values by constant"""
constant_dict = {}
for fact in train_list:
predicate, constants = text_to_id(fact)
if not constants in constant_dict:
constant_dict[constants] = set([predicate])
else:
constant_dict[constants].add(predicate)
return constant_dict
def test_fact_active(constant_dict, constants, predicate, relationships):
"""Given relationships, determine whether the truth value of a fact could be predicted by a relationship"""
if predicate in relationships:
if all(body_pred in constant_dict[constants] for body_pred in relationships[predicate]):
return True
return False
def count_active(constant_dict, relationships):
"""Given relationships and a dataset of constants, determine for how many facts the truth value could be predicted by a relationship"""
active_facts = 0
for constants, predicates in constant_dict.items():
for predicate in relationships:
if predicate in predicates and all(body_pred in predicates for body_pred in relationships[predicate]):
active_facts += 1
return active_facts
def gen_test_kb(train_list, n_test, test_active_only=False, relationships=None):
"""Given a list of facts, choose some facts to be split off to a test dataset in such a way that there is at least one training fact left for each constant"""
constant_dict = gen_constant_dict(train_list)
random.shuffle(train_list)
constant_set = set()
new_train_list = []
test_list = []
for fact in train_list:
predicate, constants = text_to_id(fact)
if test_active_only:
if test_fact_active(constant_dict, constants, predicate, relationships) and len(test_list) < n_test:
test_list.append(fact)
continue
else:
if all(constant in constant_set for constant in constants) and len(test_list) < n_test:
test_list.append(fact)
continue
else:
for constant in constants:
constant_set.add(constant)
new_train_list.append(fact)
train_list = new_train_list
test_kb = load_from_list(test_list)
return test_kb, train_list
| 37.129032 | 163 | 0.666749 |
c2cabc8b7c10f234c2f764e400a0eb0ee368ade4 | 1,116 | py | Python | accounts/tests/test_account_views.py | borzecki/django-paymate | 960e1dcce2682e57374663d87e47c5cff0c7aae4 | [
"MIT"
] | null | null | null | accounts/tests/test_account_views.py | borzecki/django-paymate | 960e1dcce2682e57374663d87e47c5cff0c7aae4 | [
"MIT"
] | null | null | null | accounts/tests/test_account_views.py | borzecki/django-paymate | 960e1dcce2682e57374663d87e47c5cff0c7aae4 | [
"MIT"
] | null | null | null | from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from accounts.models import Account
from accounts.serializers import AccountSerializer
from .utils import create_accounts
| 32.823529 | 97 | 0.689964 |
c2cb04716bb5f1c7ce9e0998301f2ac347c3c6dd | 202 | py | Python | CTF/Pico2017/level_two/forensics/little_school_bus/solve.py | RegaledSeer/netsecnoobie | d3366937ec8c67a9742f61e47698239ae693af49 | [
"MIT"
] | null | null | null | CTF/Pico2017/level_two/forensics/little_school_bus/solve.py | RegaledSeer/netsecnoobie | d3366937ec8c67a9742f61e47698239ae693af49 | [
"MIT"
] | null | null | null | CTF/Pico2017/level_two/forensics/little_school_bus/solve.py | RegaledSeer/netsecnoobie | d3366937ec8c67a9742f61e47698239ae693af49 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
FILE_PATH = "./littleschoolbus.bmp"
with open(FILE_PATH,"rb") as f:
bytes = bytearray(f.read())
result = ""
for byte in bytes[54:]:
result += str(byte & 1)
print(result)
| 14.428571 | 35 | 0.633663 |
c2ccffa0a75618b898fb390841847aab6f871afc | 412 | py | Python | isi_mip/climatemodels/migrations/0044_auto_20170116_1626.py | ISI-MIP/isimip | c2a78c727337e38f3695031e00afd607da7d6dcb | [
"MIT"
] | 4 | 2017-07-05T08:06:18.000Z | 2021-03-01T17:23:18.000Z | isi_mip/climatemodels/migrations/0044_auto_20170116_1626.py | ISI-MIP/isimip | c2a78c727337e38f3695031e00afd607da7d6dcb | [
"MIT"
] | 4 | 2020-01-31T09:02:57.000Z | 2021-04-20T14:04:35.000Z | isi_mip/climatemodels/migrations/0044_auto_20170116_1626.py | ISI-MIP/isimip | c2a78c727337e38f3695031e00afd607da7d6dcb | [
"MIT"
] | 4 | 2017-10-12T01:48:55.000Z | 2020-04-29T13:50:03.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-16 15:26
from __future__ import unicode_literals
from django.db import migrations
| 20.6 | 53 | 0.631068 |
c2ce62208e5d0f3a5f97c461255fe7d85b8afbee | 13,528 | py | Python | custom_utils/crop4patches.py | ziming-liu/ObjectDet | 6e25fa784114b9773b052d9d5465aa6fed93468a | [
"Apache-2.0"
] | null | null | null | custom_utils/crop4patches.py | ziming-liu/ObjectDet | 6e25fa784114b9773b052d9d5465aa6fed93468a | [
"Apache-2.0"
] | null | null | null | custom_utils/crop4patches.py | ziming-liu/ObjectDet | 6e25fa784114b9773b052d9d5465aa6fed93468a | [
"Apache-2.0"
] | null | null | null | import numpy
import os
import json
import cv2
import csv
import os.path as osp
import mmcv
import numpy as np
if __name__ == '__main__':
import fire
fire.Fire()
#img_prefix = '/home/share2/VisDrone2019/TASK1/VisDrone2019-DET-val/'
#img_writen= '/home/share2/VisDrone2019/TASK1/VisDrone2019-DET-val-patches/'
#crop4patches(img_prefix=img_prefix,img_writen=img_writen,istrain=False)
| 53.05098 | 190 | 0.53563 |
c2d008457b1988d06b4f36156a0cb0305d850324 | 1,121 | py | Python | rabbitgetapi/__main__.py | Sidon/get-rabbitmq-messages | 8feff8c9b9edee863d875966f5e5f3a5eb6ab06a | [
"MIT"
] | 11 | 2022-01-10T13:49:39.000Z | 2022-01-11T05:57:45.000Z | rabbitgetapi/__main__.py | Sidon/get-rabbitmq-messages | 8feff8c9b9edee863d875966f5e5f3a5eb6ab06a | [
"MIT"
] | null | null | null | rabbitgetapi/__main__.py | Sidon/get-rabbitmq-messages | 8feff8c9b9edee863d875966f5e5f3a5eb6ab06a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyleft 2021 Sidon Duarte
#
import http
import sys
from typing import Any
import colorama
import requests
from rabbitgetapi import cli
from rabbitgetapi import exceptions
from rabbitgetapi import build_parser
if __name__ == "__main__":
sys.exit(main())
| 26.069767 | 75 | 0.674398 |
c2d017d8eee0b960215a2642618960e9f03da11f | 245 | py | Python | src/allyoucanuse/etc/hashing.py | kunlubrain/allyoucanuse | c206d53fa9948cb335b406805d52125921fb71cf | [
"MIT"
] | null | null | null | src/allyoucanuse/etc/hashing.py | kunlubrain/allyoucanuse | c206d53fa9948cb335b406805d52125921fb71cf | [
"MIT"
] | null | null | null | src/allyoucanuse/etc/hashing.py | kunlubrain/allyoucanuse | c206d53fa9948cb335b406805d52125921fb71cf | [
"MIT"
] | null | null | null | from typing import Union, Iterable
import hashlib
def hash_id(seeds:Union[str, Iterable], n:int=32)->str:
"""For the moment, use the default simple python hash func
"""
h = hashlib.sha256(''.join(seeds)).hexdigest()[:n]
return h | 30.625 | 62 | 0.681633 |
c2d2914bf2009ddae6cb71f0693560922df3f83f | 12,182 | py | Python | SST/datasets/wrapperpolicy.py | shaoshitong/torchdistill | 709ca2d59442090d73a554d363e4c5e37538c707 | [
"MIT"
] | 1 | 2022-03-25T05:05:55.000Z | 2022-03-25T05:05:55.000Z | SST/datasets/wrapperpolicy.py | shaoshitong/torchdistill | 709ca2d59442090d73a554d363e4c5e37538c707 | [
"MIT"
] | null | null | null | SST/datasets/wrapperpolicy.py | shaoshitong/torchdistill | 709ca2d59442090d73a554d363e4c5e37538c707 | [
"MIT"
] | null | null | null | import os
import numpy as np
import torch
from torch.utils.data import Dataset
import math
import torch
import torch.nn.functional as F
import random
import torchvision.datasets
from torchvision.transforms import *
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from PIL import Image, ImageEnhance, ImageOps
from torch.utils.data import Dataset
from torchdistill.datasets.wrapper import register_dataset_wrapper,BaseDatasetWrapper
def policy_classes_compute(hot):
l=hot.shape[0]
exp=torch.arange(0,l)
weight=2**exp
return (hot*weight).sum().long()
| 40.471761 | 136 | 0.613692 |
c2d3138307df728361eddc71fedd71f1bcf4a126 | 67 | py | Python | Chapter 07/ch7_1m.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | f6a4194684515495d00aa38347a725dd08f39a0c | [
"MIT"
] | null | null | null | Chapter 07/ch7_1m.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | f6a4194684515495d00aa38347a725dd08f39a0c | [
"MIT"
] | null | null | null | Chapter 07/ch7_1m.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | f6a4194684515495d00aa38347a725dd08f39a0c | [
"MIT"
] | null | null | null | print(lambda x: x*x (10))
# may give address of lambda function | 22.333333 | 38 | 0.686567 |
c2d36fb4456d02f1a3cbf08824eb8cded948400d | 3,029 | py | Python | {{cookiecutter.project_slug}}/backend/app/app/tests/crud/test_item.py | Gjacquenot/full-stack-fastapi-couchbase | 5df16af2ffcb22d141c5e689a220611005747939 | [
"MIT"
] | 353 | 2019-01-03T09:53:17.000Z | 2022-03-27T12:24:45.000Z | {{cookiecutter.project_slug}}/backend/app/app/tests/crud/test_item.py | Gjacquenot/full-stack-fastapi-couchbase | 5df16af2ffcb22d141c5e689a220611005747939 | [
"MIT"
] | 21 | 2019-01-06T21:50:40.000Z | 2021-08-19T11:33:15.000Z | {{cookiecutter.project_slug}}/backend/app/app/tests/crud/test_item.py | Gjacquenot/full-stack-fastapi-couchbase | 5df16af2ffcb22d141c5e689a220611005747939 | [
"MIT"
] | 72 | 2019-03-07T21:59:55.000Z | 2022-03-18T04:59:22.000Z | from app import crud
from app.db.database import get_default_bucket
from app.models.config import ITEM_DOC_TYPE
from app.models.item import ItemCreate, ItemUpdate
from app.tests.utils.user import create_random_user
from app.tests.utils.utils import random_lower_string
| 35.22093 | 88 | 0.719379 |
c2d3808ea07cbe15ac6fd167c1f1d94408d838e4 | 32 | py | Python | src/constants.py | argho28/Translation | 11e24df4deb29d37dfb1f48cf686cef75eb68397 | [
"MIT"
] | 15 | 2019-09-26T09:59:14.000Z | 2021-08-14T16:54:42.000Z | src/constants.py | argho28/Translation | 11e24df4deb29d37dfb1f48cf686cef75eb68397 | [
"MIT"
] | 9 | 2020-03-24T17:53:25.000Z | 2022-01-13T01:36:39.000Z | src/constants.py | argho28/Translation | 11e24df4deb29d37dfb1f48cf686cef75eb68397 | [
"MIT"
] | 3 | 2019-12-30T15:35:32.000Z | 2021-01-05T18:02:41.000Z | MODEL_PATH = "./model/model.pt"
| 16 | 31 | 0.6875 |
c2d52797a4915efe6cf6a4bf7bb065954ba40d31 | 12,271 | py | Python | 03_ML_training.py | YunxiaoRen/ML-iAMR | 6bab74b4dccb5da8bc6155a7ee7ffa9d4811b894 | [
"MIT"
] | 4 | 2021-10-10T15:31:23.000Z | 2022-02-10T00:17:55.000Z | 03_ML_training.py | YunxiaoRen/ML-iAMR | 6bab74b4dccb5da8bc6155a7ee7ffa9d4811b894 | [
"MIT"
] | null | null | null | 03_ML_training.py | YunxiaoRen/ML-iAMR | 6bab74b4dccb5da8bc6155a7ee7ffa9d4811b894 | [
"MIT"
] | 2 | 2021-12-07T22:04:54.000Z | 2022-02-10T07:14:42.000Z |
##**************************************************************************************##
## Step1. Load Packages and Input Data ##
##**************************************************************************************##
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm,metrics
from sklearn.svm import SVC,LinearSVC
from sklearn.model_selection import KFold,StratifiedKFold
from sklearn.metrics import matthews_corrcoef,auc, roc_curve,plot_roc_curve, plot_precision_recall_curve,classification_report, confusion_matrix,average_precision_score, precision_recall_curve
from pandas.core.frame import DataFrame
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import imblearn
from collections import Counter
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
############################# Step2: input data processing #####################
## giessen data
gi_data = np.load("/gi_CIP_FCGR200/alt_cnn_input.npy")
gi_pheno = pd.read_csv("CIP_gi_pheno.csv",index_col=0)
gi_data.shape,gi_pheno.shape
gi_data2 = gi_data.reshape(900,40000)
gi_pheno2 = gi_pheno.values
gi_pheno3 = gi_pheno2.reshape(900,)
gi_data2.shape,gi_pheno3.shape
X = gi_data2
y = gi_pheno3
X.shape,y.shape
## pubdata
pub_data = np.load("/pub_CIP_FCGR200/alt_cnn_input.npy")
pub_pheno = pd.read_csv("CIP_pub_pheno.csv",index_col=0)
pub_data.shape
pub_data2 = pub_data.reshape(1496,40000)
pub_pheno2 = pub_pheno.values
pub_pheno3 = pub_pheno2.reshape(1496,)
pub_data2.shape,pub_pheno3.shape
x_test = pub_data2
y_test = pub_pheno3
undersample = RandomUnderSampler(sampling_strategy='majority')
pub_x_under,pub_y_under=undersample.fit_resample(pub_data2,pub_pheno3)
print(Counter(pub_y_under))
##**************************************************************************************##
## Step2. Training and evaluation of RF,LR, SVM ##
##**************************************************************************************##
## cross validation
cv = StratifiedKFold(n_splits=5)
rf = RandomForestClassifier(n_estimators=200, random_state=0)
lr = LogisticRegression(solver = 'lbfgs',max_iter=1000)
svm = SVC(kernel='linear', probability=True)
##*************** F1 + ROC curve
rf_tprs = []
rf_prs = []
rf_roc_aucs = []
rf_pr_aucs = []
rf_f1_matrix_out = []
rf_f1_report_out = []
rf_MCC_out = []
rf_pred_cls_out = []
rf_pred_prob_out = []
rf_y_test_out = []
rf_mean_fpr = np.linspace(0, 1, 100)
rf_mean_recall = np.linspace(0, 1, 100)
## LR
lr_tprs = []
lr_prs = []
lr_roc_aucs = []
lr_pr_aucs = []
lr_f1_matrix_out = []
lr_f1_report_out = []
lr_MCC_out = []
lr_pred_cls_out = []
lr_pred_prob_out = []
lr_y_test_out = []
lr_mean_fpr = np.linspace(0, 1, 100)
lr_mean_recall = np.linspace(0, 1, 100)
## SVM
svm_tprs = []
svm_prs = []
svm_roc_aucs = []
svm_pr_aucs = []
svm_f1_matrix_out = []
svm_f1_report_out = []
svm_MCC_out = []
svm_pred_cls_out = []
svm_pred_prob_out = []
svm_y_test_out = []
svm_mean_fpr = np.linspace(0, 1, 100)
svm_mean_recall = np.linspace(0, 1, 100)
fig,[ax1,ax2,ax3] = plt.subplots(nrows=1,ncols=3,figsize=(15, 4))
for i, (train, test) in enumerate(cv.split(X, y)):
## train the new model
rf.fit(X[train], y[train])
## roc curve
rf_viz = plot_roc_curve(rf, X[test], y[test],name='K fold {}'.format(i),alpha=0.3, lw=1,ax=ax1)
rf_interp_tpr = np.interp(rf_mean_fpr, rf_viz.fpr, rf_viz.tpr)
rf_interp_tpr[0] = 0.0
rf_tprs.append(rf_interp_tpr)
rf_roc_aucs.append(rf_viz.roc_auc)
## evaluation metrics
rf_pred_cls = rf.predict(X[test])
rf_pred_prob = rf.predict_proba(X[test])[:,1]
rf_f1_matrix = confusion_matrix(y[test],rf_pred_cls)
rf_f1_report = classification_report(y[test],rf_pred_cls)
rf_MCC = matthews_corrcoef(y[test],rf_pred_cls)
### save evalu_metrics out
rf_pred_cls_out.append(rf_pred_cls)
rf_pred_prob_out.append(rf_pred_prob)
rf_f1_matrix_out.append(rf_f1_matrix)
rf_f1_report_out.append(rf_f1_report)
rf_MCC_out.append(rf_MCC)
rf_y_test_out.append(y[test])
## LR
lr.fit(X[train], y[train])
## roc curve
lr_viz = plot_roc_curve(lr, X[test], y[test],name='K fold {}'.format(i),alpha=0.3, lw=1,ax=ax2)
lr_interp_tpr = np.interp(lr_mean_fpr, lr_viz.fpr, lr_viz.tpr)
lr_interp_tpr[0] = 0.0
lr_tprs.append(lr_interp_tpr)
lr_roc_aucs.append(lr_viz.roc_auc)
## evaluation metrics
lr_pred_cls = lr.predict(X[test])
lr_pred_prob = lr.predict_proba(X[test])[:,1]
lr_f1_matrix = confusion_matrix(y[test],lr_pred_cls)
lr_f1_report = classification_report(y[test],lr_pred_cls)
lr_MCC = matthews_corrcoef(y[test],lr_pred_cls)
### save evalu_metrics out
lr_pred_cls_out.append(lr_pred_cls)
lr_pred_prob_out.append(lr_pred_prob)
lr_f1_matrix_out.append(lr_f1_matrix)
lr_f1_report_out.append(lr_f1_report)
lr_MCC_out.append(lr_MCC)
lr_y_test_out.append(y[test])
## SVM
svm.fit(X[train], y[train])
## roc curve
svm_viz = plot_roc_curve(svm, X[test], y[test],name='K fold {}'.format(i),alpha=0.3, lw=1,ax=ax3)
svm_interp_tpr = np.interp(svm_mean_fpr, svm_viz.fpr, svm_viz.tpr)
svm_interp_tpr[0] = 0.0
svm_tprs.append(svm_interp_tpr)
svm_roc_aucs.append(svm_viz.roc_auc)
## evaluation metrics
svm_pred_cls = svm.predict(X[test])
svm_pred_prob = svm.predict_proba(X[test])[:,1]
svm_f1_matrix = confusion_matrix(y[test],svm_pred_cls)
svm_f1_report = classification_report(y[test],svm_pred_cls)
svm_MCC = matthews_corrcoef(y[test],svm_pred_cls)
### save evalu_metrics out
svm_pred_cls_out.append(svm_pred_cls)
svm_pred_prob_out.append(svm_pred_prob)
svm_f1_matrix_out.append(svm_f1_matrix)
svm_f1_report_out.append(svm_f1_report)
svm_MCC_out.append(svm_MCC)
svm_y_test_out.append(y[test])
#### save predit_prob out
np.save("CIP_gi_FCGR_RF_y_pred_prob_out.npy",rf_pred_prob_out)
np.save("CIP_gi_FCGR_RF_y_test_out.npy",rf_y_test_out)
np.save("CIP_gi_FCGR_LR_y_pred_prob_out.npy",lr_pred_prob_out)
np.save("CIP_gi_FCGR_LR_y_test_out.npy",lr_y_test_out)
np.save("CIP_gi_FCGR_SVM_y_pred_prob_out.npy",svm_pred_prob_out)
np.save("CIP_gi_FCGR_SVM_y_test_out.npy",svm_y_test_out)
#### evaluation
rf_eva_pred_prob = rf.predict_proba(pub_data2)[:,1]
lr_eva_pred_prob = lr.predict_proba(pub_data2)[:,1]
svm_eva_pred_prob = svm.predict_proba(pub_data2)[:,1]
np.save("CIP_FCGR_RF_test_y_pred_prob.npy",rf_eva_pred_prob)
np.save("CIP_FCGR_LR_test_y_pred_prob.npy",lr_eva_pred_prob)
np.save("CIP_FCGR_SVM_test_y_pred_prob.npy",svm_eva_pred_prob)
np.save("CIP_FCGR_test_y_out.npy",pub_pheno3)
#### evaluation for under sample
#pub_x_under,pub_y_under
rf_eva_under_pred_prob = rf.predict_proba(pub_x_under)[:,1]
lr_eva_under_pred_prob = lr.predict_proba(pub_x_under)[:,1]
svm_eva_under_pred_prob = svm.predict_proba(pub_x_under)[:,1]
##**************************************************************************************##
## Step3. Training and evaluation of CNN ##
##**************************************************************************************##
############################# Step1: load pacakge #####################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from keras.utils import to_categorical
from keras.models import Sequential
from tensorflow.keras import activations
from sklearn.model_selection import KFold,StratifiedKFold
from keras.layers import Dense,Dropout, Flatten, Conv1D, Conv2D, MaxPooling1D,MaxPooling2D
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from keras.layers import BatchNormalization
############################# Step2: load metrics function #####################
### F1 score, precision, recall and accuracy metrics
############################# Step3: input data processing #####################
X.shape,y.shape,pub_data2.shape,pub_pheno3.shape
#((900, 40000),(900,), (1496, 40000), (1496,))
x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=123)
x_train.shape,x_test.shape,y_train.shape,y_test.shape
#((720, 40000), (180, 40000), (720,), (180,))
inputs = x_train.reshape(720,200,200,1)
inputs = inputs.astype('float32')
targets = to_categorical(y_train)
inputs.shape,targets.shape
x_test2 = x_test.reshape(180,200,200,1)
x_test2 = x_test2.astype('float32')
y_test2 = to_categorical(y_test)
pub_x_test = pub_data2.reshape(1496,200,200,1)
pub_x_test = pub_x_test.astype('float32')
pub_y_test = pub_pheno3
############################# Step4: model training #####################
batch_size = 8
no_classes = 2
no_epochs = 50
verbosity = 1
num_folds = 5
# Define the K-fold Cross Validator
kfold = KFold(n_splits=num_folds, shuffle=True)
# K-fold Cross Validation model evaluation
fold_no = 1
model_history=[]
for train, test in kfold.split(inputs, targets):
model = Sequential()
model.add(Conv2D(filters=8, kernel_size=3,activation='relu', input_shape=(200,200,1)))
model.add(BatchNormalization())
model.add(Conv2D(filters=8, kernel_size=3, padding='same', activation='relu'))
#model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2)))
model.add(Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
#model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(2,activation='softmax'))
# Compile the model
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['acc',f1_m,precision_m, recall_m])
# Generate a print
print('--------------------------------')
print(f'Training for fold {fold_no} ...')
## checkpoint for saving model
filepath="CIP_gi_FCGR_CNN_weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True,mode='max')
callbacks_list = [checkpoint]
# Fit data to model
train_model = model.fit(inputs[train], targets[train],batch_size=batch_size,epochs=no_epochs,callbacks=callbacks_list,verbose=verbosity,validation_data=(inputs[test], targets[test]))
model_history.append(train_model.history)
# Increase fold number
fold_no = fold_no + 1
########## (2) save model
model.save_weights('CIP_gi_FCGR_CNN.model.h5')
# save model history
from pandas.core.frame import DataFrame
model_out = DataFrame(model_history)
model_out.to_csv("CIP_gi_FCGR_CNN_model_history_out.csv",index=False)
############# Evaluation on pub data
### ROC
y_pred_keras = model.predict_proba(pub_x_test)
### evaluation for under-sample
undersample = RandomUnderSampler(sampling_strategy='majority')
pub_x_under,pub_y_under=undersample.fit_resample(pub_data2,pub_pheno3)
print(Counter(pub_y_under))
pub_x_under = pub_x_under.reshape(534,200,200,1)
pub_x_under = pub_x_under.astype('float32')
y_pred_keras = model.predict_proba(pub_x_under)
| 39.079618 | 193 | 0.677043 |
c2d54bc8670fa3bdf4a2db5b9a515c8fa9d07665 | 189 | py | Python | testspeed/__init__.py | sc-1123/testspeed | 0dc560f9019087275d29eba2e4dfc351ba89566e | [
"MIT"
] | 1 | 2019-07-29T03:12:10.000Z | 2019-07-29T03:12:10.000Z | testspeed/__init__.py | sc-1123/testspeed | 0dc560f9019087275d29eba2e4dfc351ba89566e | [
"MIT"
] | null | null | null | testspeed/__init__.py | sc-1123/testspeed | 0dc560f9019087275d29eba2e4dfc351ba89566e | [
"MIT"
] | null | null | null | name = "testspeed"
from time import time
from sys import argv
from os import system
tic = time()
system('python %s' % (argv[1]))
toc = time()
print('used %s seconds' % (toc - tic))
| 21 | 39 | 0.640212 |
c2d5cfe13e3252b73bc2d506fd5f87805ad7437d | 6,660 | py | Python | gdalhelpers/functions/create_points_at_angles_distance_in_direction.py | JanCaha/gdalhelpers | 925ecb2552b697b5970617484f1fc259f844ba04 | [
"MIT"
] | null | null | null | gdalhelpers/functions/create_points_at_angles_distance_in_direction.py | JanCaha/gdalhelpers | 925ecb2552b697b5970617484f1fc259f844ba04 | [
"MIT"
] | null | null | null | gdalhelpers/functions/create_points_at_angles_distance_in_direction.py | JanCaha/gdalhelpers | 925ecb2552b697b5970617484f1fc259f844ba04 | [
"MIT"
] | null | null | null | from osgeo import ogr
from typing import List, Union
import math
import os
import warnings
import numpy as np
from gdalhelpers.checks import values_checks, datasource_checks, layer_checks
from gdalhelpers.helpers import layer_helpers, datasource_helpers, geometry_helpers
def create_points_at_angles_distance_in_direction(start_points: ogr.DataSource,
main_direction_point: ogr.DataSource,
distance: Union[int, float] = 10,
angle_offset: Union[int, float] = 10,
angle_density: Union[int, float] = 1,
angles_specification_degrees: bool = True,
input_points_id_field: str = None) -> ogr.DataSource:
"""
Function that generates for every `Feature` in `start_points` set of points at specified `distance` in direction of
`main_direction_point`.
Parameters
----------
start_points : ogr.DataSource
Points to generate new points around. Can be of geometrical types: `ogr.wkbPoint, ogr.wkbPoint25D,
ogr.wkbPointM, ogr.wkbPointZM`.
main_direction_point : ogr.DataSource
Layer with single feature that specifies the direction in which the new points are generated.
distance : float or int
Distance at which the new points are generated. Default value is `10` and it is specified in units of layer
`start_points`.
angle_offset : float or int
Specification of angle offset on each side from `main_direction_point`. The points are generated in interval
`[main_angle - angle_offset, main_angle + angle_offset]`, where `main_angle` is angle between specific feature
of `start_points` and `main_direction_point`. Default value is `10`, which gives over angle width of `20`.
angle_density : float or int
How often points are generated in inverval given by `angle_offset`. Default value is `1`.
angles_specification_degrees : bool
Are the angles specified in degrees? Default values is `True`, if `False` the values are in radians.
input_points_id_field : str
Name of ID (or other) field from `input_points_ds` that should be carried over the resulting DataSource.
Returns
-------
ogr.DataSource
Virtual `ogr.DataSource` in memory with one layer (named `points`) containing the points.
Raises
------
Various Errors can be raise while checking for validity of inputs.
Warns
-------
UserWarning
If the field of given name (`input_points_id_field`) is not present or if its not of type `ogr.OFTInteger`.
"""
output_points_ds = datasource_helpers.create_temp_gpkg_datasource()
datasource_checks.check_is_ogr_datasource(start_points, "start_points")
datasource_checks.check_is_ogr_datasource(main_direction_point, "main_direction_point")
values_checks.check_value_is_zero_or_positive(distance, "distance")
values_checks.check_number(angle_offset, "angle_offset")
values_checks.check_number(angle_density, "angle_density")
if angles_specification_degrees:
angle_offset = ((2*math.pi)/360)*angle_offset
angle_density = ((2*math.pi)/360)*angle_density
input_points_layer = start_points.GetLayer()
layer_checks.check_is_layer_geometry_type(input_points_layer, "input_points_layer", [ogr.wkbPoint, ogr.wkbPoint25D,
ogr.wkbPointM, ogr.wkbPointZM])
input_points_srs = input_points_layer.GetSpatialRef()
main_point_layer = main_direction_point.GetLayer()
layer_checks.check_is_layer_geometry_type(main_point_layer, "main_point_layer", [ogr.wkbPoint, ogr.wkbPoint25D,
ogr.wkbPointM, ogr.wkbPointZM])
layer_checks.check_number_of_features(main_point_layer, "main_point_layer", 1)
if input_points_id_field is not None:
if not layer_checks.does_field_exist(input_points_layer, input_points_id_field):
input_points_id_field = None
warnings.warn(
"Field {0} does not exist in {1}. Defaulting to FID.".format(input_points_id_field,
os.path.basename(start_points.GetDescription()))
)
else:
if not layer_checks.is_field_of_type(input_points_layer, input_points_id_field, ogr.OFTInteger):
input_points_id_field = None
warnings.warn(
"Field {0} in {1} is not `Integer`. Defaulting to FID.".format(input_points_id_field,
os.path.basename(start_points.GetDescription()))
)
if input_points_id_field is None:
field_name_id = "input_point_FID"
else:
field_name_id = "input_point_ID"
field_name_angle = "angle"
layer_helpers.create_layer_points(output_points_ds, input_points_srs, "points")
output_points_layer = output_points_ds.GetLayer()
fields = {field_name_id: ogr.OFTInteger,
field_name_angle: ogr.OFTReal}
layer_helpers.add_fields_from_dict(output_points_layer, fields)
output_points_def = output_points_layer.GetLayerDefn()
for main_feature in main_point_layer:
main_geom = main_feature.GetGeometryRef()
for feature in input_points_layer:
geom = feature.GetGeometryRef()
if input_points_id_field is None:
f_id = feature.GetFID()
else:
f_id = feature.GetField(input_points_id_field)
main_angle = geometry_helpers.angle_points(geom, main_geom)
angles = np.arange(main_angle - angle_offset,
np.nextafter(main_angle + angle_offset, np.Inf),
step=angle_density)
for angle in angles:
p = geometry_helpers.point_at_angle_distance(geom, distance, angle)
output_point_feature = ogr.Feature(output_points_def)
output_point_feature.SetGeometry(p)
values = {field_name_id: f_id,
field_name_angle: angle}
layer_helpers.add_values_from_dict(output_point_feature, values)
output_points_layer.CreateFeature(output_point_feature)
return output_points_ds
| 43.815789 | 131 | 0.640841 |
c2d600c10308080eab8bee5be84ed3ffe2e71757 | 151 | py | Python | test.py | Imagio/enigma | 31c84a40cabe6ed7fc75743dbe9292a1bb622c4e | [
"MIT"
] | null | null | null | test.py | Imagio/enigma | 31c84a40cabe6ed7fc75743dbe9292a1bb622c4e | [
"MIT"
] | null | null | null | test.py | Imagio/enigma | 31c84a40cabe6ed7fc75743dbe9292a1bb622c4e | [
"MIT"
] | null | null | null | import unittest
from rotor_tests import *
from rotor_settings_tests import *
from reflector_tests import *
from enigma_tests import *
unittest.main()
| 18.875 | 34 | 0.821192 |
c2d85ba1664d0e7d0a642dbaf8af0b812fb9a534 | 320 | py | Python | forums/__init__.py | sharebears/pulsar-forums | 6c1152a181c30bb82c49556fd072f47c2eeaf1cb | [
"MIT"
] | null | null | null | forums/__init__.py | sharebears/pulsar-forums | 6c1152a181c30bb82c49556fd072f47c2eeaf1cb | [
"MIT"
] | null | null | null | forums/__init__.py | sharebears/pulsar-forums | 6c1152a181c30bb82c49556fd072f47c2eeaf1cb | [
"MIT"
] | null | null | null | from werkzeug import find_modules, import_string
from forums import routes
from forums.modifications import modify_core
modify_core()
| 21.333333 | 59 | 0.73125 |
c2d8aaeb7cd07de199497544ee9bb719305bd800 | 1,380 | py | Python | polybot/views/ingest.py | evanpcosta/IEEEPolybot | 75fd70680f4f9fec8b1b77b4e116e4869eb8c079 | [
"Apache-2.0"
] | null | null | null | polybot/views/ingest.py | evanpcosta/IEEEPolybot | 75fd70680f4f9fec8b1b77b4e116e4869eb8c079 | [
"Apache-2.0"
] | null | null | null | polybot/views/ingest.py | evanpcosta/IEEEPolybot | 75fd70680f4f9fec8b1b77b4e116e4869eb8c079 | [
"Apache-2.0"
] | 1 | 2021-03-07T20:46:43.000Z | 2021-03-07T20:46:43.000Z | """Routes related to ingesting data from the robot"""
import os
import logging
from pathlib import Path
from flask import Blueprint, request, current_app
from pydantic import ValidationError
from werkzeug.utils import secure_filename
from polybot.models import UVVisExperiment
logger = logging.getLogger(__name__)
bp = Blueprint('ingest', __name__, url_prefix='/ingest')
| 30 | 70 | 0.674638 |
c2d96673325c088ac08245ec7ce49cbb6c73160f | 405 | py | Python | novice/02-04/lat_DIModule.py | septiannurtrir/praxis-academy | 1ef7f959c372ae991d74ccd373123142c2fbc542 | [
"MIT"
] | 1 | 2019-08-27T17:06:13.000Z | 2019-08-27T17:06:13.000Z | novice/02-04/lat_DIModule.py | septiannurtrir/praxis-academy | 1ef7f959c372ae991d74ccd373123142c2fbc542 | [
"MIT"
] | null | null | null | novice/02-04/lat_DIModule.py | septiannurtrir/praxis-academy | 1ef7f959c372ae991d74ccd373123142c2fbc542 | [
"MIT"
] | null | null | null | #dependency Module
if __name__ == '__main__':
injector = Injector(AppModule())
logic = injector.get(BusinessLogic)
logic.do_stuff() | 18.409091 | 64 | 0.654321 |
c2daf1fd3438e639b7a66547964461828db43284 | 639 | py | Python | qiita_pet/uimodules/base_uimodule.py | JWDebelius/qiita | 3378e0fabe40a846691600e5de4fb72a3db70dd1 | [
"BSD-3-Clause"
] | null | null | null | qiita_pet/uimodules/base_uimodule.py | JWDebelius/qiita | 3378e0fabe40a846691600e5de4fb72a3db70dd1 | [
"BSD-3-Clause"
] | null | null | null | qiita_pet/uimodules/base_uimodule.py | JWDebelius/qiita | 3378e0fabe40a846691600e5de4fb72a3db70dd1 | [
"BSD-3-Clause"
] | null | null | null | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from tornado.web import UIModule
| 35.5 | 79 | 0.524257 |
c2db78f1fd6b3b030ac80b311ec8e5f6c6ad3962 | 1,572 | py | Python | test/test_mpdstats.py | dfc/beets | 96c5121f65b9477e9b424f166dc57369b6457e42 | [
"MIT"
] | 1 | 2017-11-15T23:24:35.000Z | 2017-11-15T23:24:35.000Z | test/test_mpdstats.py | dfc/beets | 96c5121f65b9477e9b424f166dc57369b6457e42 | [
"MIT"
] | null | null | null | test/test_mpdstats.py | dfc/beets | 96c5121f65b9477e9b424f166dc57369b6457e42 | [
"MIT"
] | null | null | null | # This file is part of beets.
# Copyright 2015
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from mock import Mock
from test._common import unittest
from test.helper import TestHelper
from beets.library import Item
from beetsplug.mpdstats import MPDStats
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| 30.230769 | 71 | 0.720738 |
c2db90e9e6960ed73fac71500e3d37978e19257c | 1,714 | py | Python | renderer/console.py | deeredman1991/CreepSmash | 566b87c6d70f3663016f1c6d41d63432f9d0e785 | [
"MIT"
] | null | null | null | renderer/console.py | deeredman1991/CreepSmash | 566b87c6d70f3663016f1c6d41d63432f9d0e785 | [
"MIT"
] | null | null | null | renderer/console.py | deeredman1991/CreepSmash | 566b87c6d70f3663016f1c6d41d63432f9d0e785 | [
"MIT"
] | null | null | null | import tools.libtcod.libtcodpy as libtcod
| 39.860465 | 157 | 0.625438 |
c2dbb6ec5c6b594157bfe877b67a7b2cb451fd8a | 48,942 | py | Python | tests/examples/minlplib/sfacloc2_3_90.py | ouyang-w-19/decogo | 52546480e49776251d4d27856e18a46f40c824a1 | [
"MIT"
] | 2 | 2021-07-03T13:19:10.000Z | 2022-02-06T10:48:13.000Z | tests/examples/minlplib/sfacloc2_3_90.py | ouyang-w-19/decogo | 52546480e49776251d4d27856e18a46f40c824a1 | [
"MIT"
] | 1 | 2021-07-04T14:52:14.000Z | 2021-07-15T10:17:11.000Z | tests/examples/minlplib/sfacloc2_3_90.py | ouyang-w-19/decogo | 52546480e49776251d4d27856e18a46f40c824a1 | [
"MIT"
] | null | null | null | # MINLP written by GAMS Convert at 04/21/18 13:54:11
#
# Equation counts
# Total E G L N X C B
# 497 61 388 48 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 292 217 75 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 1283 1148 135 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,0.26351883),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,0.26351883),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,0.26351883),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,0.22891574),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,0.22891574),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,0.22891574),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,0.21464835),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,0.21464835),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,0.21464835),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,0.17964414),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,0.17964414),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,0.17964414),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,0.17402843),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,0.17402843),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,0.17402843),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,0.15355962),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,0.15355962),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,0.15355962),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,0.1942283),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,0.1942283),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,0.1942283),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,0.25670555),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,0.25670555),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,0.25670555),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,0.27088619),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,0.27088619),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,0.27088619),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,0.28985675),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,0.28985675),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,0.28985675),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,0.25550303),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,0.25550303),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,0.25550303),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,0.19001726),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,0.19001726),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,0.19001726),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,0.23803143),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,0.23803143),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,0.23803143),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,0.23312962),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,0.23312962),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,0.23312962),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,0.27705307),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,0.27705307),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,0.27705307),initialize=0)
m.x46 = Var(within=Reals,bounds=(1.92,2.02),initialize=1.92)
m.x47 = Var(within=Reals,bounds=(3.82,4.01333333333333),initialize=3.82)
m.x48 = Var(within=Reals,bounds=(4.53333333333333,4.76),initialize=4.53333333333333)
m.x49 = Var(within=Reals,bounds=(5.39333333333333,5.96),initialize=5.39333333333333)
m.x50 = Var(within=Reals,bounds=(36.3533333333333,42.0933333333333),initialize=36.3533333333333)
m.x51 = Var(within=Reals,bounds=(85.7466666666667,99.28),initialize=85.7466666666667)
m.x52 = Var(within=Reals,bounds=(6.28,6.59333333333333),initialize=6.28)
m.x53 = Var(within=Reals,bounds=(53.4333333333333,61.8666666666667),initialize=53.4333333333333)
m.x54 = Var(within=Reals,bounds=(48.6133333333333,56.2866666666667),initialize=48.6133333333333)
m.x55 = Var(within=Reals,bounds=(33.9533333333333,41.5),initialize=33.9533333333333)
m.x56 = Var(within=Reals,bounds=(53.9666666666667,62.4933333333333),initialize=53.9666666666667)
m.x57 = Var(within=Reals,bounds=(77.0533333333333,80.9066666666667),initialize=77.0533333333333)
m.x58 = Var(within=Reals,bounds=(24.9066666666667,26.1466666666667),initialize=24.9066666666667)
m.x59 = Var(within=Reals,bounds=(36.1866666666667,38),initialize=36.1866666666667)
m.x60 = Var(within=Reals,bounds=(56.3133333333333,62.24),initialize=56.3133333333333)
m.b61 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b62 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b63 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b64 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b65 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b66 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b67 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b68 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b69 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b70 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b71 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b72 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b73 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b74 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b75 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b76 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b77 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b78 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b79 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b80 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b81 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b82 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b83 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b84 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b85 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b86 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b87 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b88 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b89 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b90 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b91 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b92 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b93 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b94 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b95 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b96 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b97 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b98 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b99 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b100 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b101 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b102 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b103 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b104 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b105 = Var(within=Binary,bounds=(0,1),initialize=0)
m.x106 = Var(within=Reals,bounds=(0,0.5323080366),initialize=0)
m.x107 = Var(within=Reals,bounds=(0,0.918715169866666),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,1.021726146),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,1.0706790744),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,7.32543671346667),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,15.2453990736),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,1.28061192466667),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,15.8815166933333),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,15.2472806811333),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,12.029055125),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,15.9672360214667),initialize=0)
m.x117 = Var(within=Reals,bounds=(0,15.3736631157333),initialize=0)
m.x118 = Var(within=Reals,bounds=(0,6.2237284564),initialize=0)
m.x119 = Var(within=Reals,bounds=(0,8.85892556),initialize=0)
m.x120 = Var(within=Reals,bounds=(0,17.2437830768),initialize=0)
m.x121 = Var(within=Reals,bounds=(0.25788969,0.35227087),initialize=0.25788969)
m.x122 = Var(within=Reals,bounds=(0.25788969,0.35227087),initialize=0.25788969)
m.x123 = Var(within=Reals,bounds=(0.25788969,0.35227087),initialize=0.25788969)
m.x124 = Var(within=Reals,bounds=(-0.98493628,-0.7794471),initialize=-0.7794471)
m.x125 = Var(within=Reals,bounds=(-0.98493628,-0.7794471),initialize=-0.7794471)
m.x126 = Var(within=Reals,bounds=(-0.98493628,-0.7794471),initialize=-0.7794471)
m.x127 = Var(within=Reals,bounds=(0,0.0580296499999999),initialize=0)
m.x128 = Var(within=Reals,bounds=(0,0.0580296499999999),initialize=0)
m.x129 = Var(within=Reals,bounds=(0,0.0580296499999999),initialize=0)
m.x130 = Var(within=Reals,bounds=(0,0.0546689399999999),initialize=0)
m.x131 = Var(within=Reals,bounds=(0,0.0546689399999999),initialize=0)
m.x132 = Var(within=Reals,bounds=(0,0.0546689399999999),initialize=0)
m.x133 = Var(within=Reals,bounds=(0,0.09360565),initialize=0)
m.x134 = Var(within=Reals,bounds=(0,0.09360565),initialize=0)
m.x135 = Var(within=Reals,bounds=(0,0.09360565),initialize=0)
m.x136 = Var(within=Reals,bounds=(0,0.0476880399999999),initialize=0)
m.x137 = Var(within=Reals,bounds=(0,0.0476880399999999),initialize=0)
m.x138 = Var(within=Reals,bounds=(0,0.0476880399999999),initialize=0)
m.x139 = Var(within=Reals,bounds=(0,0.05276021),initialize=0)
m.x140 = Var(within=Reals,bounds=(0,0.05276021),initialize=0)
m.x141 = Var(within=Reals,bounds=(0,0.05276021),initialize=0)
m.x142 = Var(within=Reals,bounds=(0,0.04905388),initialize=0)
m.x143 = Var(within=Reals,bounds=(0,0.04905388),initialize=0)
m.x144 = Var(within=Reals,bounds=(0,0.04905388),initialize=0)
m.x145 = Var(within=Reals,bounds=(0,0.07731692),initialize=0)
m.x146 = Var(within=Reals,bounds=(0,0.07731692),initialize=0)
m.x147 = Var(within=Reals,bounds=(0,0.07731692),initialize=0)
m.x148 = Var(within=Reals,bounds=(0,0.08211741),initialize=0)
m.x149 = Var(within=Reals,bounds=(0,0.08211741),initialize=0)
m.x150 = Var(within=Reals,bounds=(0,0.08211741),initialize=0)
m.x151 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x152 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x153 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x154 = Var(within=Reals,bounds=(0,0.08436757),initialize=0)
m.x155 = Var(within=Reals,bounds=(0,0.08436757),initialize=0)
m.x156 = Var(within=Reals,bounds=(0,0.08436757),initialize=0)
m.x157 = Var(within=Reals,bounds=(0,0.06987597),initialize=0)
m.x158 = Var(within=Reals,bounds=(0,0.06987597),initialize=0)
m.x159 = Var(within=Reals,bounds=(0,0.06987597),initialize=0)
m.x160 = Var(within=Reals,bounds=(0,0.04788831),initialize=0)
m.x161 = Var(within=Reals,bounds=(0,0.04788831),initialize=0)
m.x162 = Var(within=Reals,bounds=(0,0.04788831),initialize=0)
m.x163 = Var(within=Reals,bounds=(0,0.0668875099999999),initialize=0)
m.x164 = Var(within=Reals,bounds=(0,0.0668875099999999),initialize=0)
m.x165 = Var(within=Reals,bounds=(0,0.0668875099999999),initialize=0)
m.x166 = Var(within=Reals,bounds=(0,0.07276512),initialize=0)
m.x167 = Var(within=Reals,bounds=(0,0.07276512),initialize=0)
m.x168 = Var(within=Reals,bounds=(0,0.07276512),initialize=0)
m.x169 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x170 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x171 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x172 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x173 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x174 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x175 = Var(within=Reals,bounds=(0,0.1742468),initialize=0)
m.x176 = Var(within=Reals,bounds=(0,0.1742468),initialize=0)
m.x177 = Var(within=Reals,bounds=(0,0.1742468),initialize=0)
m.x178 = Var(within=Reals,bounds=(0,0.1210427),initialize=0)
m.x179 = Var(within=Reals,bounds=(0,0.1210427),initialize=0)
m.x180 = Var(within=Reals,bounds=(0,0.1210427),initialize=0)
m.x181 = Var(within=Reals,bounds=(0,0.1319561),initialize=0)
m.x182 = Var(within=Reals,bounds=(0,0.1319561),initialize=0)
m.x183 = Var(within=Reals,bounds=(0,0.1319561),initialize=0)
m.x184 = Var(within=Reals,bounds=(0,0.12126822),initialize=0)
m.x185 = Var(within=Reals,bounds=(0,0.12126822),initialize=0)
m.x186 = Var(within=Reals,bounds=(0,0.12126822),initialize=0)
m.x187 = Var(within=Reals,bounds=(0,0.10450574),initialize=0)
m.x188 = Var(within=Reals,bounds=(0,0.10450574),initialize=0)
m.x189 = Var(within=Reals,bounds=(0,0.10450574),initialize=0)
m.x190 = Var(within=Reals,bounds=(0,0.11691138),initialize=0)
m.x191 = Var(within=Reals,bounds=(0,0.11691138),initialize=0)
m.x192 = Var(within=Reals,bounds=(0,0.11691138),initialize=0)
m.x193 = Var(within=Reals,bounds=(0,0.17458814),initialize=0)
m.x194 = Var(within=Reals,bounds=(0,0.17458814),initialize=0)
m.x195 = Var(within=Reals,bounds=(0,0.17458814),initialize=0)
m.x196 = Var(within=Reals,bounds=(0,0.17650501),initialize=0)
m.x197 = Var(within=Reals,bounds=(0,0.17650501),initialize=0)
m.x198 = Var(within=Reals,bounds=(0,0.17650501),initialize=0)
m.x199 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x200 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x201 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x202 = Var(within=Reals,bounds=(0,0.18562706),initialize=0)
m.x203 = Var(within=Reals,bounds=(0,0.18562706),initialize=0)
m.x204 = Var(within=Reals,bounds=(0,0.18562706),initialize=0)
m.x205 = Var(within=Reals,bounds=(0,0.14212895),initialize=0)
m.x206 = Var(within=Reals,bounds=(0,0.14212895),initialize=0)
m.x207 = Var(within=Reals,bounds=(0,0.14212895),initialize=0)
m.x208 = Var(within=Reals,bounds=(0,0.17114392),initialize=0)
m.x209 = Var(within=Reals,bounds=(0,0.17114392),initialize=0)
m.x210 = Var(within=Reals,bounds=(0,0.17114392),initialize=0)
m.x211 = Var(within=Reals,bounds=(0,0.1603645),initialize=0)
m.x212 = Var(within=Reals,bounds=(0,0.1603645),initialize=0)
m.x213 = Var(within=Reals,bounds=(0,0.1603645),initialize=0)
m.x214 = Var(within=Reals,bounds=(0,0.18267189),initialize=0)
m.x215 = Var(within=Reals,bounds=(0,0.18267189),initialize=0)
m.x216 = Var(within=Reals,bounds=(0,0.18267189),initialize=0)
m.x217 = Var(within=Reals,bounds=(0,0.5323080366),initialize=0)
m.x218 = Var(within=Reals,bounds=(0,0.5323080366),initialize=0)
m.x219 = Var(within=Reals,bounds=(0,0.5323080366),initialize=0)
m.x220 = Var(within=Reals,bounds=(0,0.918715169866666),initialize=0)
m.x221 = Var(within=Reals,bounds=(0,0.918715169866666),initialize=0)
m.x222 = Var(within=Reals,bounds=(0,0.918715169866666),initialize=0)
m.x223 = Var(within=Reals,bounds=(0,1.021726146),initialize=0)
m.x224 = Var(within=Reals,bounds=(0,1.021726146),initialize=0)
m.x225 = Var(within=Reals,bounds=(0,1.021726146),initialize=0)
m.x226 = Var(within=Reals,bounds=(0,1.0706790744),initialize=0)
m.x227 = Var(within=Reals,bounds=(0,1.0706790744),initialize=0)
m.x228 = Var(within=Reals,bounds=(0,1.0706790744),initialize=0)
m.x229 = Var(within=Reals,bounds=(0,7.32543671346667),initialize=0)
m.x230 = Var(within=Reals,bounds=(0,7.32543671346667),initialize=0)
m.x231 = Var(within=Reals,bounds=(0,7.32543671346667),initialize=0)
m.x232 = Var(within=Reals,bounds=(0,15.2453990736),initialize=0)
m.x233 = Var(within=Reals,bounds=(0,15.2453990736),initialize=0)
m.x234 = Var(within=Reals,bounds=(0,15.2453990736),initialize=0)
m.x235 = Var(within=Reals,bounds=(0,1.28061192466667),initialize=0)
m.x236 = Var(within=Reals,bounds=(0,1.28061192466667),initialize=0)
m.x237 = Var(within=Reals,bounds=(0,1.28061192466667),initialize=0)
m.x238 = Var(within=Reals,bounds=(0,15.8815166933333),initialize=0)
m.x239 = Var(within=Reals,bounds=(0,15.8815166933333),initialize=0)
m.x240 = Var(within=Reals,bounds=(0,15.8815166933333),initialize=0)
m.x241 = Var(within=Reals,bounds=(0,15.2472806811333),initialize=0)
m.x242 = Var(within=Reals,bounds=(0,15.2472806811333),initialize=0)
m.x243 = Var(within=Reals,bounds=(0,15.2472806811333),initialize=0)
m.x244 = Var(within=Reals,bounds=(0,12.029055125),initialize=0)
m.x245 = Var(within=Reals,bounds=(0,12.029055125),initialize=0)
m.x246 = Var(within=Reals,bounds=(0,12.029055125),initialize=0)
m.x247 = Var(within=Reals,bounds=(0,15.9672360214667),initialize=0)
m.x248 = Var(within=Reals,bounds=(0,15.9672360214667),initialize=0)
m.x249 = Var(within=Reals,bounds=(0,15.9672360214667),initialize=0)
m.x250 = Var(within=Reals,bounds=(0,15.3736631157333),initialize=0)
m.x251 = Var(within=Reals,bounds=(0,15.3736631157333),initialize=0)
m.x252 = Var(within=Reals,bounds=(0,15.3736631157333),initialize=0)
m.x253 = Var(within=Reals,bounds=(0,6.2237284564),initialize=0)
m.x254 = Var(within=Reals,bounds=(0,6.2237284564),initialize=0)
m.x255 = Var(within=Reals,bounds=(0,6.2237284564),initialize=0)
m.x256 = Var(within=Reals,bounds=(0,8.85892556),initialize=0)
m.x257 = Var(within=Reals,bounds=(0,8.85892556),initialize=0)
m.x258 = Var(within=Reals,bounds=(0,8.85892556),initialize=0)
m.x259 = Var(within=Reals,bounds=(0,17.2437830768),initialize=0)
m.x260 = Var(within=Reals,bounds=(0,17.2437830768),initialize=0)
m.x261 = Var(within=Reals,bounds=(0,17.2437830768),initialize=0)
m.b262 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b263 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b264 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b265 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b266 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b267 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b268 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b269 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b270 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b271 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b272 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b273 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b274 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b275 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b276 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b277 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b278 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b279 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b280 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b281 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b282 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b283 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b284 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b285 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b286 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b287 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b288 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b289 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b290 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b291 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr= m.x106 + m.x107 + m.x108 + m.x109 + m.x110 + m.x111 + m.x112 + m.x113 + m.x114 + m.x115
+ m.x116 + m.x117 + m.x118 + m.x119 + m.x120, sense=minimize)
m.c2 = Constraint(expr=-m.x46*m.x1*m.b61 + m.x217 >= 0)
m.c3 = Constraint(expr=-m.x46*m.x2*m.b62 + m.x218 >= 0)
m.c4 = Constraint(expr=-m.x46*m.x3*m.b63 + m.x219 >= 0)
m.c5 = Constraint(expr=-m.x47*m.x4*m.b64 + m.x220 >= 0)
m.c6 = Constraint(expr=-m.x47*m.x5*m.b65 + m.x221 >= 0)
m.c7 = Constraint(expr=-m.x47*m.x6*m.b66 + m.x222 >= 0)
m.c8 = Constraint(expr=-m.x48*m.x7*m.b67 + m.x223 >= 0)
m.c9 = Constraint(expr=-m.x48*m.x8*m.b68 + m.x224 >= 0)
m.c10 = Constraint(expr=-m.x48*m.x9*m.b69 + m.x225 >= 0)
m.c11 = Constraint(expr=-m.x49*m.x10*m.b70 + m.x226 >= 0)
m.c12 = Constraint(expr=-m.x49*m.x11*m.b71 + m.x227 >= 0)
m.c13 = Constraint(expr=-m.x49*m.x12*m.b72 + m.x228 >= 0)
m.c14 = Constraint(expr=-m.x50*m.x13*m.b73 + m.x229 >= 0)
m.c15 = Constraint(expr=-m.x50*m.x14*m.b74 + m.x230 >= 0)
m.c16 = Constraint(expr=-m.x50*m.x15*m.b75 + m.x231 >= 0)
m.c17 = Constraint(expr=-m.x51*m.x16*m.b76 + m.x232 >= 0)
m.c18 = Constraint(expr=-m.x51*m.x17*m.b77 + m.x233 >= 0)
m.c19 = Constraint(expr=-m.x51*m.x18*m.b78 + m.x234 >= 0)
m.c20 = Constraint(expr=-m.x52*m.x19*m.b79 + m.x235 >= 0)
m.c21 = Constraint(expr=-m.x52*m.x20*m.b80 + m.x236 >= 0)
m.c22 = Constraint(expr=-m.x52*m.x21*m.b81 + m.x237 >= 0)
m.c23 = Constraint(expr=-m.x53*m.x22*m.b82 + m.x238 >= 0)
m.c24 = Constraint(expr=-m.x53*m.x23*m.b83 + m.x239 >= 0)
m.c25 = Constraint(expr=-m.x53*m.x24*m.b84 + m.x240 >= 0)
m.c26 = Constraint(expr=-m.x54*m.x25*m.b85 + m.x241 >= 0)
m.c27 = Constraint(expr=-m.x54*m.x26*m.b86 + m.x242 >= 0)
m.c28 = Constraint(expr=-m.x54*m.x27*m.b87 + m.x243 >= 0)
m.c29 = Constraint(expr=-m.x55*m.x28*m.b88 + m.x244 >= 0)
m.c30 = Constraint(expr=-m.x55*m.x29*m.b89 + m.x245 >= 0)
m.c31 = Constraint(expr=-m.x55*m.x30*m.b90 + m.x246 >= 0)
m.c32 = Constraint(expr=-m.x56*m.x31*m.b91 + m.x247 >= 0)
m.c33 = Constraint(expr=-m.x56*m.x32*m.b92 + m.x248 >= 0)
m.c34 = Constraint(expr=-m.x56*m.x33*m.b93 + m.x249 >= 0)
m.c35 = Constraint(expr=-m.x57*m.x34*m.b94 + m.x250 >= 0)
m.c36 = Constraint(expr=-m.x57*m.x35*m.b95 + m.x251 >= 0)
m.c37 = Constraint(expr=-m.x57*m.x36*m.b96 + m.x252 >= 0)
m.c38 = Constraint(expr=-m.x58*m.x37*m.b97 + m.x253 >= 0)
m.c39 = Constraint(expr=-m.x58*m.x38*m.b98 + m.x254 >= 0)
m.c40 = Constraint(expr=-m.x58*m.x39*m.b99 + m.x255 >= 0)
m.c41 = Constraint(expr=-m.x59*m.x40*m.b100 + m.x256 >= 0)
m.c42 = Constraint(expr=-m.x59*m.x41*m.b101 + m.x257 >= 0)
m.c43 = Constraint(expr=-m.x59*m.x42*m.b102 + m.x258 >= 0)
m.c44 = Constraint(expr=-m.x60*m.x43*m.b103 + m.x259 >= 0)
m.c45 = Constraint(expr=-m.x60*m.x44*m.b104 + m.x260 >= 0)
m.c46 = Constraint(expr=-m.x60*m.x45*m.b105 + m.x261 >= 0)
m.c47 = Constraint(expr= m.b61 + m.b62 + m.b63 == 1)
m.c48 = Constraint(expr= m.b64 + m.b65 + m.b66 == 1)
m.c49 = Constraint(expr= m.b67 + m.b68 + m.b69 == 1)
m.c50 = Constraint(expr= m.b70 + m.b71 + m.b72 == 1)
m.c51 = Constraint(expr= m.b73 + m.b74 + m.b75 == 1)
m.c52 = Constraint(expr= m.b76 + m.b77 + m.b78 == 1)
m.c53 = Constraint(expr= m.b79 + m.b80 + m.b81 == 1)
m.c54 = Constraint(expr= m.b82 + m.b83 + m.b84 == 1)
m.c55 = Constraint(expr= m.b85 + m.b86 + m.b87 == 1)
m.c56 = Constraint(expr= m.b88 + m.b89 + m.b90 == 1)
m.c57 = Constraint(expr= m.b91 + m.b92 + m.b93 == 1)
m.c58 = Constraint(expr= m.b94 + m.b95 + m.b96 == 1)
m.c59 = Constraint(expr= m.b97 + m.b98 + m.b99 == 1)
m.c60 = Constraint(expr= m.b100 + m.b101 + m.b102 == 1)
m.c61 = Constraint(expr= m.b103 + m.b104 + m.b105 == 1)
m.c62 = Constraint(expr= 2.02*m.b61 + 4.01333333333333*m.b64 + 4.76*m.b67 + 5.96*m.b70 + 42.0933333333333*m.b73
+ 99.28*m.b76 + 6.59333333333333*m.b79 + 61.8666666666667*m.b82 + 56.2866666666667*m.b85
+ 41.5*m.b88 + 62.4933333333333*m.b91 + 80.9066666666667*m.b94 + 26.1466666666667*m.b97
+ 38*m.b100 + 62.24*m.b103 <= 213.053333333333)
m.c63 = Constraint(expr= 2.02*m.b62 + 4.01333333333333*m.b65 + 4.76*m.b68 + 5.96*m.b71 + 42.0933333333333*m.b74
+ 99.28*m.b77 + 6.59333333333333*m.b80 + 61.8666666666667*m.b83 + 56.2866666666667*m.b86
+ 41.5*m.b89 + 62.4933333333333*m.b92 + 80.9066666666667*m.b95 + 26.1466666666667*m.b98
+ 38*m.b101 + 62.24*m.b104 <= 213.053333333333)
m.c64 = Constraint(expr= 2.02*m.b63 + 4.01333333333333*m.b66 + 4.76*m.b69 + 5.96*m.b72 + 42.0933333333333*m.b75
+ 99.28*m.b78 + 6.59333333333333*m.b81 + 61.8666666666667*m.b84 + 56.2866666666667*m.b87
+ 41.5*m.b90 + 62.4933333333333*m.b93 + 80.9066666666667*m.b96 + 26.1466666666667*m.b99
+ 38*m.b102 + 62.24*m.b105 <= 213.053333333333)
m.c65 = Constraint(expr= m.x121 + m.x127 >= 0.29424122)
m.c66 = Constraint(expr= m.x122 + m.x128 >= 0.29424122)
m.c67 = Constraint(expr= m.x123 + m.x129 >= 0.29424122)
m.c68 = Constraint(expr= m.x121 + m.x130 >= 0.29760193)
m.c69 = Constraint(expr= m.x122 + m.x131 >= 0.29760193)
m.c70 = Constraint(expr= m.x123 + m.x132 >= 0.29760193)
m.c71 = Constraint(expr= m.x121 + m.x133 >= 0.35149534)
m.c72 = Constraint(expr= m.x122 + m.x134 >= 0.35149534)
m.c73 = Constraint(expr= m.x123 + m.x135 >= 0.35149534)
m.c74 = Constraint(expr= m.x121 + m.x136 >= 0.30458283)
m.c75 = Constraint(expr= m.x122 + m.x137 >= 0.30458283)
m.c76 = Constraint(expr= m.x123 + m.x138 >= 0.30458283)
m.c77 = Constraint(expr= m.x121 + m.x139 >= 0.29951066)
m.c78 = Constraint(expr= m.x122 + m.x140 >= 0.29951066)
m.c79 = Constraint(expr= m.x123 + m.x141 >= 0.29951066)
m.c80 = Constraint(expr= m.x121 + m.x142 >= 0.30694357)
m.c81 = Constraint(expr= m.x122 + m.x143 >= 0.30694357)
m.c82 = Constraint(expr= m.x123 + m.x144 >= 0.30694357)
m.c83 = Constraint(expr= m.x121 + m.x145 >= 0.33520661)
m.c84 = Constraint(expr= m.x122 + m.x146 >= 0.33520661)
m.c85 = Constraint(expr= m.x123 + m.x147 >= 0.33520661)
m.c86 = Constraint(expr= m.x121 + m.x148 >= 0.3400071)
m.c87 = Constraint(expr= m.x122 + m.x149 >= 0.3400071)
m.c88 = Constraint(expr= m.x123 + m.x150 >= 0.3400071)
m.c89 = Constraint(expr= m.x121 + m.x151 >= 0.35227087)
m.c90 = Constraint(expr= m.x122 + m.x152 >= 0.35227087)
m.c91 = Constraint(expr= m.x123 + m.x153 >= 0.35227087)
m.c92 = Constraint(expr= m.x121 + m.x154 >= 0.34225726)
m.c93 = Constraint(expr= m.x122 + m.x155 >= 0.34225726)
m.c94 = Constraint(expr= m.x123 + m.x156 >= 0.34225726)
m.c95 = Constraint(expr= m.x121 + m.x157 >= 0.32776566)
m.c96 = Constraint(expr= m.x122 + m.x158 >= 0.32776566)
m.c97 = Constraint(expr= m.x123 + m.x159 >= 0.32776566)
m.c98 = Constraint(expr= m.x121 + m.x160 >= 0.30438256)
m.c99 = Constraint(expr= m.x122 + m.x161 >= 0.30438256)
m.c100 = Constraint(expr= m.x123 + m.x162 >= 0.30438256)
m.c101 = Constraint(expr= m.x121 + m.x163 >= 0.28538336)
m.c102 = Constraint(expr= m.x122 + m.x164 >= 0.28538336)
m.c103 = Constraint(expr= m.x123 + m.x165 >= 0.28538336)
m.c104 = Constraint(expr= m.x121 + m.x166 >= 0.27950575)
m.c105 = Constraint(expr= m.x122 + m.x167 >= 0.27950575)
m.c106 = Constraint(expr= m.x123 + m.x168 >= 0.27950575)
m.c107 = Constraint(expr= - m.x121 + m.x127 >= -0.29424122)
m.c108 = Constraint(expr= - m.x122 + m.x128 >= -0.29424122)
m.c109 = Constraint(expr= - m.x123 + m.x129 >= -0.29424122)
m.c110 = Constraint(expr= - m.x121 + m.x130 >= -0.29760193)
m.c111 = Constraint(expr= - m.x122 + m.x131 >= -0.29760193)
m.c112 = Constraint(expr= - m.x123 + m.x132 >= -0.29760193)
m.c113 = Constraint(expr= - m.x121 + m.x133 >= -0.35149534)
m.c114 = Constraint(expr= - m.x122 + m.x134 >= -0.35149534)
m.c115 = Constraint(expr= - m.x123 + m.x135 >= -0.35149534)
m.c116 = Constraint(expr= - m.x121 + m.x136 >= -0.30458283)
m.c117 = Constraint(expr= - m.x122 + m.x137 >= -0.30458283)
m.c118 = Constraint(expr= - m.x123 + m.x138 >= -0.30458283)
m.c119 = Constraint(expr= - m.x121 + m.x139 >= -0.29951066)
m.c120 = Constraint(expr= - m.x122 + m.x140 >= -0.29951066)
m.c121 = Constraint(expr= - m.x123 + m.x141 >= -0.29951066)
m.c122 = Constraint(expr= - m.x121 + m.x142 >= -0.30694357)
m.c123 = Constraint(expr= - m.x122 + m.x143 >= -0.30694357)
m.c124 = Constraint(expr= - m.x123 + m.x144 >= -0.30694357)
m.c125 = Constraint(expr= - m.x121 + m.x145 >= -0.33520661)
m.c126 = Constraint(expr= - m.x122 + m.x146 >= -0.33520661)
m.c127 = Constraint(expr= - m.x123 + m.x147 >= -0.33520661)
m.c128 = Constraint(expr= - m.x121 + m.x148 >= -0.3400071)
m.c129 = Constraint(expr= - m.x122 + m.x149 >= -0.3400071)
m.c130 = Constraint(expr= - m.x123 + m.x150 >= -0.3400071)
m.c131 = Constraint(expr= - m.x121 + m.x154 >= -0.34225726)
m.c132 = Constraint(expr= - m.x122 + m.x155 >= -0.34225726)
m.c133 = Constraint(expr= - m.x123 + m.x156 >= -0.34225726)
m.c134 = Constraint(expr= - m.x121 + m.x157 >= -0.32776566)
m.c135 = Constraint(expr= - m.x122 + m.x158 >= -0.32776566)
m.c136 = Constraint(expr= - m.x123 + m.x159 >= -0.32776566)
m.c137 = Constraint(expr= - m.x121 + m.x160 >= -0.30438256)
m.c138 = Constraint(expr= - m.x122 + m.x161 >= -0.30438256)
m.c139 = Constraint(expr= - m.x123 + m.x162 >= -0.30438256)
m.c140 = Constraint(expr= - m.x121 + m.x163 >= -0.28538336)
m.c141 = Constraint(expr= - m.x122 + m.x164 >= -0.28538336)
m.c142 = Constraint(expr= - m.x123 + m.x165 >= -0.28538336)
m.c143 = Constraint(expr= - m.x121 + m.x166 >= -0.27950575)
m.c144 = Constraint(expr= - m.x122 + m.x167 >= -0.27950575)
m.c145 = Constraint(expr= - m.x123 + m.x168 >= -0.27950575)
m.c146 = Constraint(expr= - m.x121 + m.x169 >= -0.25788969)
m.c147 = Constraint(expr= - m.x122 + m.x170 >= -0.25788969)
m.c148 = Constraint(expr= - m.x123 + m.x171 >= -0.25788969)
m.c149 = Constraint(expr= m.x124 + m.x175 >= -0.9536939)
m.c150 = Constraint(expr= m.x125 + m.x176 >= -0.9536939)
m.c151 = Constraint(expr= m.x126 + m.x177 >= -0.9536939)
m.c152 = Constraint(expr= m.x124 + m.x178 >= -0.9004898)
m.c153 = Constraint(expr= m.x125 + m.x179 >= -0.9004898)
m.c154 = Constraint(expr= m.x126 + m.x180 >= -0.9004898)
m.c155 = Constraint(expr= m.x124 + m.x181 >= -0.9114032)
m.c156 = Constraint(expr= m.x125 + m.x182 >= -0.9114032)
m.c157 = Constraint(expr= m.x126 + m.x183 >= -0.9114032)
m.c158 = Constraint(expr= m.x124 + m.x184 >= -0.90071532)
m.c159 = Constraint(expr= m.x125 + m.x185 >= -0.90071532)
m.c160 = Constraint(expr= m.x126 + m.x186 >= -0.90071532)
m.c161 = Constraint(expr= m.x124 + m.x187 >= -0.88043054)
m.c162 = Constraint(expr= m.x125 + m.x188 >= -0.88043054)
m.c163 = Constraint(expr= m.x126 + m.x189 >= -0.88043054)
m.c164 = Constraint(expr= m.x124 + m.x190 >= -0.8680249)
m.c165 = Constraint(expr= m.x125 + m.x191 >= -0.8680249)
m.c166 = Constraint(expr= m.x126 + m.x192 >= -0.8680249)
m.c167 = Constraint(expr= m.x124 + m.x193 >= -0.81034814)
m.c168 = Constraint(expr= m.x125 + m.x194 >= -0.81034814)
m.c169 = Constraint(expr= m.x126 + m.x195 >= -0.81034814)
m.c170 = Constraint(expr= m.x124 + m.x196 >= -0.80843127)
m.c171 = Constraint(expr= m.x125 + m.x197 >= -0.80843127)
m.c172 = Constraint(expr= m.x126 + m.x198 >= -0.80843127)
m.c173 = Constraint(expr= m.x124 + m.x199 >= -0.7794471)
m.c174 = Constraint(expr= m.x125 + m.x200 >= -0.7794471)
m.c175 = Constraint(expr= m.x126 + m.x201 >= -0.7794471)
m.c176 = Constraint(expr= m.x124 + m.x202 >= -0.79930922)
m.c177 = Constraint(expr= m.x125 + m.x203 >= -0.79930922)
m.c178 = Constraint(expr= m.x126 + m.x204 >= -0.79930922)
m.c179 = Constraint(expr= m.x124 + m.x205 >= -0.84280733)
m.c180 = Constraint(expr= m.x125 + m.x206 >= -0.84280733)
m.c181 = Constraint(expr= m.x126 + m.x207 >= -0.84280733)
m.c182 = Constraint(expr= m.x124 + m.x208 >= -0.81379236)
m.c183 = Constraint(expr= m.x125 + m.x209 >= -0.81379236)
m.c184 = Constraint(expr= m.x126 + m.x210 >= -0.81379236)
m.c185 = Constraint(expr= m.x124 + m.x211 >= -0.82457178)
m.c186 = Constraint(expr= m.x125 + m.x212 >= -0.82457178)
m.c187 = Constraint(expr= m.x126 + m.x213 >= -0.82457178)
m.c188 = Constraint(expr= m.x124 + m.x214 >= -0.80226439)
m.c189 = Constraint(expr= m.x125 + m.x215 >= -0.80226439)
m.c190 = Constraint(expr= m.x126 + m.x216 >= -0.80226439)
m.c191 = Constraint(expr= - m.x124 + m.x172 >= 0.98493628)
m.c192 = Constraint(expr= - m.x125 + m.x173 >= 0.98493628)
m.c193 = Constraint(expr= - m.x126 + m.x174 >= 0.98493628)
m.c194 = Constraint(expr= - m.x124 + m.x175 >= 0.9536939)
m.c195 = Constraint(expr= - m.x125 + m.x176 >= 0.9536939)
m.c196 = Constraint(expr= - m.x126 + m.x177 >= 0.9536939)
m.c197 = Constraint(expr= - m.x124 + m.x178 >= 0.9004898)
m.c198 = Constraint(expr= - m.x125 + m.x179 >= 0.9004898)
m.c199 = Constraint(expr= - m.x126 + m.x180 >= 0.9004898)
m.c200 = Constraint(expr= - m.x124 + m.x181 >= 0.9114032)
m.c201 = Constraint(expr= - m.x125 + m.x182 >= 0.9114032)
m.c202 = Constraint(expr= - m.x126 + m.x183 >= 0.9114032)
m.c203 = Constraint(expr= - m.x124 + m.x184 >= 0.90071532)
m.c204 = Constraint(expr= - m.x125 + m.x185 >= 0.90071532)
m.c205 = Constraint(expr= - m.x126 + m.x186 >= 0.90071532)
m.c206 = Constraint(expr= - m.x124 + m.x187 >= 0.88043054)
m.c207 = Constraint(expr= - m.x125 + m.x188 >= 0.88043054)
m.c208 = Constraint(expr= - m.x126 + m.x189 >= 0.88043054)
m.c209 = Constraint(expr= - m.x124 + m.x190 >= 0.8680249)
m.c210 = Constraint(expr= - m.x125 + m.x191 >= 0.8680249)
m.c211 = Constraint(expr= - m.x126 + m.x192 >= 0.8680249)
m.c212 = Constraint(expr= - m.x124 + m.x193 >= 0.81034814)
m.c213 = Constraint(expr= - m.x125 + m.x194 >= 0.81034814)
m.c214 = Constraint(expr= - m.x126 + m.x195 >= 0.81034814)
m.c215 = Constraint(expr= - m.x124 + m.x196 >= 0.80843127)
m.c216 = Constraint(expr= - m.x125 + m.x197 >= 0.80843127)
m.c217 = Constraint(expr= - m.x126 + m.x198 >= 0.80843127)
m.c218 = Constraint(expr= - m.x124 + m.x202 >= 0.79930922)
m.c219 = Constraint(expr= - m.x125 + m.x203 >= 0.79930922)
m.c220 = Constraint(expr= - m.x126 + m.x204 >= 0.79930922)
m.c221 = Constraint(expr= - m.x124 + m.x205 >= 0.84280733)
m.c222 = Constraint(expr= - m.x125 + m.x206 >= 0.84280733)
m.c223 = Constraint(expr= - m.x126 + m.x207 >= 0.84280733)
m.c224 = Constraint(expr= - m.x124 + m.x208 >= 0.81379236)
m.c225 = Constraint(expr= - m.x125 + m.x209 >= 0.81379236)
m.c226 = Constraint(expr= - m.x126 + m.x210 >= 0.81379236)
m.c227 = Constraint(expr= - m.x124 + m.x211 >= 0.82457178)
m.c228 = Constraint(expr= - m.x125 + m.x212 >= 0.82457178)
m.c229 = Constraint(expr= - m.x126 + m.x213 >= 0.82457178)
m.c230 = Constraint(expr= - m.x124 + m.x214 >= 0.80226439)
m.c231 = Constraint(expr= - m.x125 + m.x215 >= 0.80226439)
m.c232 = Constraint(expr= - m.x126 + m.x216 >= 0.80226439)
m.c233 = Constraint(expr= m.x1 - m.x127 - m.x172 == 0)
m.c234 = Constraint(expr= m.x2 - m.x128 - m.x173 == 0)
m.c235 = Constraint(expr= m.x3 - m.x129 - m.x174 == 0)
m.c236 = Constraint(expr= m.x4 - m.x130 - m.x175 == 0)
m.c237 = Constraint(expr= m.x5 - m.x131 - m.x176 == 0)
m.c238 = Constraint(expr= m.x6 - m.x132 - m.x177 == 0)
m.c239 = Constraint(expr= m.x7 - m.x133 - m.x178 == 0)
m.c240 = Constraint(expr= m.x8 - m.x134 - m.x179 == 0)
m.c241 = Constraint(expr= m.x9 - m.x135 - m.x180 == 0)
m.c242 = Constraint(expr= m.x10 - m.x136 - m.x181 == 0)
m.c243 = Constraint(expr= m.x11 - m.x137 - m.x182 == 0)
m.c244 = Constraint(expr= m.x12 - m.x138 - m.x183 == 0)
m.c245 = Constraint(expr= m.x13 - m.x139 - m.x184 == 0)
m.c246 = Constraint(expr= m.x14 - m.x140 - m.x185 == 0)
m.c247 = Constraint(expr= m.x15 - m.x141 - m.x186 == 0)
m.c248 = Constraint(expr= m.x16 - m.x142 - m.x187 == 0)
m.c249 = Constraint(expr= m.x17 - m.x143 - m.x188 == 0)
m.c250 = Constraint(expr= m.x18 - m.x144 - m.x189 == 0)
m.c251 = Constraint(expr= m.x19 - m.x145 - m.x190 == 0)
m.c252 = Constraint(expr= m.x20 - m.x146 - m.x191 == 0)
m.c253 = Constraint(expr= m.x21 - m.x147 - m.x192 == 0)
m.c254 = Constraint(expr= m.x22 - m.x148 - m.x193 == 0)
m.c255 = Constraint(expr= m.x23 - m.x149 - m.x194 == 0)
m.c256 = Constraint(expr= m.x24 - m.x150 - m.x195 == 0)
m.c257 = Constraint(expr= m.x25 - m.x151 - m.x196 == 0)
m.c258 = Constraint(expr= m.x26 - m.x152 - m.x197 == 0)
m.c259 = Constraint(expr= m.x27 - m.x153 - m.x198 == 0)
m.c260 = Constraint(expr= m.x28 - m.x154 - m.x199 == 0)
m.c261 = Constraint(expr= m.x29 - m.x155 - m.x200 == 0)
m.c262 = Constraint(expr= m.x30 - m.x156 - m.x201 == 0)
m.c263 = Constraint(expr= m.x31 - m.x157 - m.x202 == 0)
m.c264 = Constraint(expr= m.x32 - m.x158 - m.x203 == 0)
m.c265 = Constraint(expr= m.x33 - m.x159 - m.x204 == 0)
m.c266 = Constraint(expr= m.x34 - m.x160 - m.x205 == 0)
m.c267 = Constraint(expr= m.x35 - m.x161 - m.x206 == 0)
m.c268 = Constraint(expr= m.x36 - m.x162 - m.x207 == 0)
m.c269 = Constraint(expr= m.x37 - m.x163 - m.x208 == 0)
m.c270 = Constraint(expr= m.x38 - m.x164 - m.x209 == 0)
m.c271 = Constraint(expr= m.x39 - m.x165 - m.x210 == 0)
m.c272 = Constraint(expr= m.x40 - m.x166 - m.x211 == 0)
m.c273 = Constraint(expr= m.x41 - m.x167 - m.x212 == 0)
m.c274 = Constraint(expr= m.x42 - m.x168 - m.x213 == 0)
m.c275 = Constraint(expr= m.x43 - m.x169 - m.x214 == 0)
m.c276 = Constraint(expr= m.x44 - m.x170 - m.x215 == 0)
m.c277 = Constraint(expr= m.x45 - m.x171 - m.x216 == 0)
m.c278 = Constraint(expr= m.b269 + m.b270 >= 1)
m.c279 = Constraint(expr= m.b267 + m.b272 >= 1)
m.c280 = Constraint(expr= m.b266 + m.b270 >= 1)
m.c281 = Constraint(expr= m.b266 + m.b269 + m.b271 >= 1)
m.c282 = Constraint(expr= m.b266 + m.b268 + m.b272 >= 1)
m.c283 = Constraint(expr= m.b266 + m.b267 >= 1)
m.c284 = Constraint(expr= m.b265 + m.b272 >= 1)
m.c285 = Constraint(expr= m.b265 + m.b269 >= 1)
m.c286 = Constraint(expr= m.b264 + m.b271 >= 1)
m.c287 = Constraint(expr= m.b264 + m.b269 + m.b272 >= 1)
m.c288 = Constraint(expr= m.b264 + m.b268 >= 1)
m.c289 = Constraint(expr= m.b264 + m.b266 + m.b272 >= 1)
m.c290 = Constraint(expr= m.b264 + m.b266 + m.b269 >= 1)
m.c291 = Constraint(expr= m.b264 + m.b265 >= 1)
m.c292 = Constraint(expr= m.b263 + m.b271 >= 1)
m.c293 = Constraint(expr= m.b263 + m.b269 + m.b272 >= 1)
m.c294 = Constraint(expr= m.b263 + m.b268 >= 1)
m.c295 = Constraint(expr= m.b263 + m.b266 >= 1)
m.c296 = Constraint(expr= m.b263 + m.b264 >= 1)
m.c297 = Constraint(expr= m.b262 + m.b271 >= 1)
m.c298 = Constraint(expr= m.b262 + m.b269 + m.b272 >= 1)
m.c299 = Constraint(expr= m.b262 + m.b268 >= 1)
m.c300 = Constraint(expr= m.b262 + m.b266 + m.b272 >= 1)
m.c301 = Constraint(expr= m.b262 + m.b266 + m.b269 >= 1)
m.c302 = Constraint(expr= m.b262 + m.b265 >= 1)
m.c303 = Constraint(expr= m.b262 + m.b264 >= 1)
m.c304 = Constraint(expr= m.b262 + m.b263 >= 1)
m.c305 = Constraint(expr= m.b272 + m.b277 >= 1)
m.c306 = Constraint(expr= m.b272 + m.b276 + m.b278 >= 1)
m.c307 = Constraint(expr= m.b272 + m.b275 + m.b279 >= 1)
m.c308 = Constraint(expr= m.b272 + m.b274 >= 1)
m.c309 = Constraint(expr= m.b272 + m.b273 + m.b279 >= 1)
m.c310 = Constraint(expr= m.b272 + m.b273 + m.b276 >= 1)
m.c311 = Constraint(expr= m.b271 + m.b278 >= 1)
m.c312 = Constraint(expr= m.b271 + m.b276 + m.b279 >= 1)
m.c313 = Constraint(expr= m.b271 + m.b275 >= 1)
m.c314 = Constraint(expr= m.b271 + m.b273 >= 1)
m.c315 = Constraint(expr= m.b270 + m.b279 >= 1)
m.c316 = Constraint(expr= m.b270 + m.b276 >= 1)
m.c317 = Constraint(expr= m.b270 + m.b273 >= 1)
m.c318 = Constraint(expr= m.b269 + m.b277 >= 1)
m.c319 = Constraint(expr= m.b269 + m.b276 + m.b278 >= 1)
m.c320 = Constraint(expr= m.b269 + m.b275 + m.b279 >= 1)
m.c321 = Constraint(expr= m.b269 + m.b274 >= 1)
m.c322 = Constraint(expr= m.b269 + m.b273 + m.b279 >= 1)
m.c323 = Constraint(expr= m.b269 + m.b273 + m.b276 >= 1)
m.c324 = Constraint(expr= m.b269 + m.b272 + m.b278 >= 1)
m.c325 = Constraint(expr= m.b269 + m.b272 + m.b276 + m.b279 >= 1)
m.c326 = Constraint(expr= m.b269 + m.b272 + m.b275 >= 1)
m.c327 = Constraint(expr= m.b269 + m.b272 + m.b273 >= 1)
m.c328 = Constraint(expr= m.b269 + m.b271 + m.b279 >= 1)
m.c329 = Constraint(expr= m.b269 + m.b271 + m.b276 >= 1)
m.c330 = Constraint(expr= m.b269 + m.b271 + m.b273 >= 1)
m.c331 = Constraint(expr= m.b268 + m.b278 >= 1)
m.c332 = Constraint(expr= m.b268 + m.b276 + m.b279 >= 1)
m.c333 = Constraint(expr= m.b268 + m.b275 >= 1)
m.c334 = Constraint(expr= m.b268 + m.b273 >= 1)
m.c335 = Constraint(expr= m.b268 + m.b272 + m.b279 >= 1)
m.c336 = Constraint(expr= m.b268 + m.b272 + m.b276 >= 1)
m.c337 = Constraint(expr= m.b268 + m.b272 + m.b273 >= 1)
m.c338 = Constraint(expr= m.b268 + m.b271 + m.b279 >= 1)
m.c339 = Constraint(expr= m.b268 + m.b271 + m.b276 >= 1)
m.c340 = Constraint(expr= m.b268 + m.b271 + m.b273 >= 1)
m.c341 = Constraint(expr= m.b267 + m.b279 >= 1)
m.c342 = Constraint(expr= m.b267 + m.b276 >= 1)
m.c343 = Constraint(expr= m.b267 + m.b273 >= 1)
m.c344 = Constraint(expr= m.b266 + m.b277 >= 1)
m.c345 = Constraint(expr= m.b266 + m.b276 + m.b278 >= 1)
m.c346 = Constraint(expr= m.b266 + m.b275 + m.b279 >= 1)
m.c347 = Constraint(expr= m.b266 + m.b274 >= 1)
m.c348 = Constraint(expr= m.b266 + m.b273 + m.b279 >= 1)
m.c349 = Constraint(expr= m.b266 + m.b273 + m.b276 >= 1)
m.c350 = Constraint(expr= m.b266 + m.b272 + m.b278 >= 1)
m.c351 = Constraint(expr= m.b266 + m.b272 + m.b276 + m.b279 >= 1)
m.c352 = Constraint(expr= m.b266 + m.b272 + m.b275 >= 1)
m.c353 = Constraint(expr= m.b266 + m.b272 + m.b273 >= 1)
m.c354 = Constraint(expr= m.b266 + m.b271 + m.b279 >= 1)
m.c355 = Constraint(expr= m.b266 + m.b271 + m.b276 >= 1)
m.c356 = Constraint(expr= m.b266 + m.b271 + m.b273 >= 1)
m.c357 = Constraint(expr= m.b266 + m.b269 + m.b278 >= 1)
m.c358 = Constraint(expr= m.b266 + m.b269 + m.b276 + m.b279 >= 1)
m.c359 = Constraint(expr= m.b266 + m.b269 + m.b275 >= 1)
m.c360 = Constraint(expr= m.b266 + m.b269 + m.b273 >= 1)
m.c361 = Constraint(expr= m.b266 + m.b269 + m.b272 + m.b279 >= 1)
m.c362 = Constraint(expr= m.b266 + m.b269 + m.b272 + m.b276 >= 1)
m.c363 = Constraint(expr= m.b266 + m.b269 + m.b272 + m.b273 >= 1)
m.c364 = Constraint(expr= m.b266 + m.b268 + m.b279 >= 1)
m.c365 = Constraint(expr= m.b266 + m.b268 + m.b276 >= 1)
m.c366 = Constraint(expr= m.b266 + m.b268 + m.b273 >= 1)
m.c367 = Constraint(expr= m.b265 + m.b279 >= 1)
m.c368 = Constraint(expr= m.b265 + m.b276 >= 1)
m.c369 = Constraint(expr= m.b265 + m.b273 >= 1)
m.c370 = Constraint(expr= m.b264 + m.b278 >= 1)
m.c371 = Constraint(expr= m.b264 + m.b276 + m.b279 >= 1)
m.c372 = Constraint(expr= m.b264 + m.b275 >= 1)
m.c373 = Constraint(expr= m.b264 + m.b273 >= 1)
m.c374 = Constraint(expr= m.b264 + m.b272 + m.b279 >= 1)
m.c375 = Constraint(expr= m.b264 + m.b272 + m.b276 >= 1)
m.c376 = Constraint(expr= m.b264 + m.b272 + m.b273 >= 1)
m.c377 = Constraint(expr= m.b264 + m.b269 + m.b279 >= 1)
m.c378 = Constraint(expr= m.b264 + m.b269 + m.b276 >= 1)
m.c379 = Constraint(expr= m.b264 + m.b269 + m.b273 >= 1)
m.c380 = Constraint(expr= m.b264 + m.b266 + m.b279 >= 1)
m.c381 = Constraint(expr= m.b264 + m.b266 + m.b276 >= 1)
m.c382 = Constraint(expr= m.b264 + m.b266 + m.b273 >= 1)
m.c383 = Constraint(expr= m.b263 + m.b278 >= 1)
m.c384 = Constraint(expr= m.b263 + m.b276 + m.b279 >= 1)
m.c385 = Constraint(expr= m.b263 + m.b275 >= 1)
m.c386 = Constraint(expr= m.b263 + m.b273 >= 1)
m.c387 = Constraint(expr= m.b263 + m.b272 + m.b279 >= 1)
m.c388 = Constraint(expr= m.b263 + m.b272 + m.b276 >= 1)
m.c389 = Constraint(expr= m.b263 + m.b272 + m.b273 >= 1)
m.c390 = Constraint(expr= m.b263 + m.b269 + m.b279 >= 1)
m.c391 = Constraint(expr= m.b263 + m.b269 + m.b276 >= 1)
m.c392 = Constraint(expr= m.b263 + m.b269 + m.b273 >= 1)
m.c393 = Constraint(expr= m.b262 + m.b278 >= 1)
m.c394 = Constraint(expr= m.b262 + m.b276 + m.b279 >= 1)
m.c395 = Constraint(expr= m.b262 + m.b275 >= 1)
m.c396 = Constraint(expr= m.b262 + m.b273 >= 1)
m.c397 = Constraint(expr= m.b262 + m.b272 + m.b279 >= 1)
m.c398 = Constraint(expr= m.b262 + m.b272 + m.b276 >= 1)
m.c399 = Constraint(expr= m.b262 + m.b272 + m.b273 >= 1)
m.c400 = Constraint(expr= m.b262 + m.b269 + m.b279 >= 1)
m.c401 = Constraint(expr= m.b262 + m.b269 + m.b276 >= 1)
m.c402 = Constraint(expr= m.b262 + m.b269 + m.b273 >= 1)
m.c403 = Constraint(expr= m.b262 + m.b266 + m.b279 >= 1)
m.c404 = Constraint(expr= m.b262 + m.b266 + m.b276 >= 1)
m.c405 = Constraint(expr= m.b262 + m.b266 + m.b273 >= 1)
m.c406 = Constraint(expr= m.x46 - 2.02*m.b262 >= 0)
m.c407 = Constraint(expr= m.x47 - 4.01333333333333*m.b263 >= 0)
m.c408 = Constraint(expr= m.x48 - 4.76*m.b264 >= 0)
m.c409 = Constraint(expr= m.x49 - 5.68*m.b265 >= 0)
m.c410 = Constraint(expr= m.x49 - 5.96*m.b266 >= 0)
m.c411 = Constraint(expr= m.x50 - 38.2666666666667*m.b267 >= 0)
m.c412 = Constraint(expr= m.x50 - 40.18*m.b268 >= 0)
m.c413 = Constraint(expr= m.x50 - 42.0933333333333*m.b269 >= 0)
m.c414 = Constraint(expr= m.x51 - 90.2533333333333*m.b270 >= 0)
m.c415 = Constraint(expr= m.x51 - 94.7666666666667*m.b271 >= 0)
m.c416 = Constraint(expr= m.x51 - 99.28*m.b272 >= 0)
m.c417 = Constraint(expr= m.x52 - 6.59333333333333*m.b273 >= 0)
m.c418 = Constraint(expr= m.x53 - 56.24*m.b274 >= 0)
m.c419 = Constraint(expr= m.x53 - 59.0533333333333*m.b275 >= 0)
m.c420 = Constraint(expr= m.x53 - 61.8666666666667*m.b276 >= 0)
m.c421 = Constraint(expr= m.x54 - 51.1733333333333*m.b277 >= 0)
m.c422 = Constraint(expr= m.x54 - 53.7333333333333*m.b278 >= 0)
m.c423 = Constraint(expr= m.x54 - 56.2866666666667*m.b279 >= 0)
m.c424 = Constraint(expr= m.x55 - 35.84*m.b280 >= 0)
m.c425 = Constraint(expr= m.x55 - 37.7266666666667*m.b281 >= 0)
m.c426 = Constraint(expr= m.x55 - 39.6133333333333*m.b282 >= 0)
m.c427 = Constraint(expr= m.x55 - 41.5*m.b283 >= 0)
m.c428 = Constraint(expr= m.x56 - 56.8066666666667*m.b284 >= 0)
m.c429 = Constraint(expr= m.x56 - 59.6466666666667*m.b285 >= 0)
m.c430 = Constraint(expr= m.x56 - 62.4933333333333*m.b286 >= 0)
m.c431 = Constraint(expr= m.x57 - 80.9066666666667*m.b287 >= 0)
m.c432 = Constraint(expr= m.x58 - 26.1466666666667*m.b288 >= 0)
m.c433 = Constraint(expr= m.x59 - 38*m.b289 >= 0)
m.c434 = Constraint(expr= m.x60 - 59.2733333333333*m.b290 >= 0)
m.c435 = Constraint(expr= m.x60 - 62.24*m.b291 >= 0)
m.c436 = Constraint(expr= - m.x106 + m.x217 <= 0)
m.c437 = Constraint(expr= - m.x106 + m.x218 <= 0)
m.c438 = Constraint(expr= - m.x106 + m.x219 <= 0)
m.c439 = Constraint(expr= - m.x107 + m.x220 <= 0)
m.c440 = Constraint(expr= - m.x107 + m.x221 <= 0)
m.c441 = Constraint(expr= - m.x107 + m.x222 <= 0)
m.c442 = Constraint(expr= - m.x108 + m.x223 <= 0)
m.c443 = Constraint(expr= - m.x108 + m.x224 <= 0)
m.c444 = Constraint(expr= - m.x108 + m.x225 <= 0)
m.c445 = Constraint(expr= - m.x109 + m.x226 <= 0)
m.c446 = Constraint(expr= - m.x109 + m.x227 <= 0)
m.c447 = Constraint(expr= - m.x109 + m.x228 <= 0)
m.c448 = Constraint(expr= - m.x110 + m.x229 <= 0)
m.c449 = Constraint(expr= - m.x110 + m.x230 <= 0)
m.c450 = Constraint(expr= - m.x110 + m.x231 <= 0)
m.c451 = Constraint(expr= - m.x111 + m.x232 <= 0)
m.c452 = Constraint(expr= - m.x111 + m.x233 <= 0)
m.c453 = Constraint(expr= - m.x111 + m.x234 <= 0)
m.c454 = Constraint(expr= - m.x112 + m.x235 <= 0)
m.c455 = Constraint(expr= - m.x112 + m.x236 <= 0)
m.c456 = Constraint(expr= - m.x112 + m.x237 <= 0)
m.c457 = Constraint(expr= - m.x113 + m.x238 <= 0)
m.c458 = Constraint(expr= - m.x113 + m.x239 <= 0)
m.c459 = Constraint(expr= - m.x113 + m.x240 <= 0)
m.c460 = Constraint(expr= - m.x114 + m.x241 <= 0)
m.c461 = Constraint(expr= - m.x114 + m.x242 <= 0)
m.c462 = Constraint(expr= - m.x114 + m.x243 <= 0)
m.c463 = Constraint(expr= - m.x115 + m.x244 <= 0)
m.c464 = Constraint(expr= - m.x115 + m.x245 <= 0)
m.c465 = Constraint(expr= - m.x115 + m.x246 <= 0)
m.c466 = Constraint(expr= - m.x116 + m.x247 <= 0)
m.c467 = Constraint(expr= - m.x116 + m.x248 <= 0)
m.c468 = Constraint(expr= - m.x116 + m.x249 <= 0)
m.c469 = Constraint(expr= - m.x117 + m.x250 <= 0)
m.c470 = Constraint(expr= - m.x117 + m.x251 <= 0)
m.c471 = Constraint(expr= - m.x117 + m.x252 <= 0)
m.c472 = Constraint(expr= - m.x118 + m.x253 <= 0)
m.c473 = Constraint(expr= - m.x118 + m.x254 <= 0)
m.c474 = Constraint(expr= - m.x118 + m.x255 <= 0)
m.c475 = Constraint(expr= - m.x119 + m.x256 <= 0)
m.c476 = Constraint(expr= - m.x119 + m.x257 <= 0)
m.c477 = Constraint(expr= - m.x119 + m.x258 <= 0)
m.c478 = Constraint(expr= - m.x120 + m.x259 <= 0)
m.c479 = Constraint(expr= - m.x120 + m.x260 <= 0)
m.c480 = Constraint(expr= - m.x120 + m.x261 <= 0)
m.c481 = Constraint(expr= m.b265 - m.b266 >= 0)
m.c482 = Constraint(expr= m.b267 - m.b268 >= 0)
m.c483 = Constraint(expr= m.b268 - m.b269 >= 0)
m.c484 = Constraint(expr= m.b270 - m.b271 >= 0)
m.c485 = Constraint(expr= m.b271 - m.b272 >= 0)
m.c486 = Constraint(expr= m.b274 - m.b275 >= 0)
m.c487 = Constraint(expr= m.b275 - m.b276 >= 0)
m.c488 = Constraint(expr= m.b277 - m.b278 >= 0)
m.c489 = Constraint(expr= m.b278 - m.b279 >= 0)
m.c490 = Constraint(expr= m.b280 - m.b281 >= 0)
m.c491 = Constraint(expr= m.b281 - m.b282 >= 0)
m.c492 = Constraint(expr= m.b282 - m.b283 >= 0)
m.c493 = Constraint(expr= m.b284 - m.b285 >= 0)
m.c494 = Constraint(expr= m.b285 - m.b286 >= 0)
m.c495 = Constraint(expr= m.b290 - m.b291 >= 0)
m.c496 = Constraint(expr= m.x124 - m.x125 >= 0)
m.c497 = Constraint(expr= m.x125 - m.x126 >= 0)
| 37.077273 | 113 | 0.650954 |
c2dc055259ce8bd609c68240256323675bd4a1ec | 1,236 | py | Python | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/cloudsign/models/StampInfo.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/cloudsign/models/StampInfo.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/cloudsign/models/StampInfo.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
| 34.333333 | 107 | 0.706311 |
c2de7d7431503150ac6343d65fe89abecb277cb0 | 3,462 | py | Python | authors/apps/likedislike/tests/test_likedislike.py | andela/ah-code-titans | 4f1fc77c2ecdf8ca15c24327d39fe661eac85785 | [
"BSD-3-Clause"
] | null | null | null | authors/apps/likedislike/tests/test_likedislike.py | andela/ah-code-titans | 4f1fc77c2ecdf8ca15c24327d39fe661eac85785 | [
"BSD-3-Clause"
] | 20 | 2018-11-26T16:22:46.000Z | 2018-12-21T10:08:25.000Z | authors/apps/likedislike/tests/test_likedislike.py | andela/ah-code-titans | 4f1fc77c2ecdf8ca15c24327d39fe661eac85785 | [
"BSD-3-Clause"
] | 3 | 2019-01-24T15:39:42.000Z | 2019-09-25T17:57:08.000Z | from rest_framework import status
from django.urls import reverse
from authors.apps.articles.models import Article
from authors.base_test_config import TestConfiguration
slug = None
| 28.61157 | 76 | 0.593299 |
c2dfd049645c43b5bbb9f0aae0f7145cf2d53a0b | 6,843 | py | Python | start_gui.py | NIC-VICOROB/sub-cortical_segmentation | 324b8f998a666cee6ef94944acd85e2bcd503701 | [
"BSD-3-Clause"
] | 7 | 2018-06-17T02:48:49.000Z | 2021-02-16T05:38:10.000Z | start_gui.py | NIC-VICOROB/sub-cortical_segmentation | 324b8f998a666cee6ef94944acd85e2bcd503701 | [
"BSD-3-Clause"
] | null | null | null | start_gui.py | NIC-VICOROB/sub-cortical_segmentation | 324b8f998a666cee6ef94944acd85e2bcd503701 | [
"BSD-3-Clause"
] | 4 | 2017-09-22T08:52:36.000Z | 2019-07-15T14:44:51.000Z | # ------------------------------------------------------------
# Training script example for Keras implementation
#
# Kaisar Kushibar (2019)
# kaisar.kushibar@udg.edu
# ------------------------------------------------------------
import os
import sys
import numpy as np
from functools import partial
from tkinter import filedialog
from tkinter import *
from tkinter.ttk import *
import Queue
import ConfigParser
import nibabel as nib
from cnn_cort.load_options import *
from keras.utils import np_utils
CURRENT_PATH = os.getcwd()
user_config = ConfigParser.RawConfigParser()
user_config.read(os.path.join(CURRENT_PATH, 'configuration.cfg'))
options = load_options(user_config)
from cnn_cort.base import load_data, generate_training_set, testing
from cnn_cort.keras_net import get_callbacks, get_model
from train_test_task import TestTask, TrainTask
root = Tk()
app = Application(master=root)
app.mainloop()
| 37.80663 | 136 | 0.650592 |
c2dfea80584df5547d3541ae560b3208410a1788 | 3,875 | py | Python | source/yahoo_finance.py | mengwangk/myinvestor-toolkit | 3dca9e1accfccf1583dcdbec80d1a0fe9dae2e81 | [
"MIT"
] | 7 | 2019-10-13T18:58:33.000Z | 2021-08-07T12:46:22.000Z | source/yahoo_finance.py | mengwangk/myinvestor-toolkit | 3dca9e1accfccf1583dcdbec80d1a0fe9dae2e81 | [
"MIT"
] | 7 | 2019-12-16T21:25:34.000Z | 2022-02-10T00:11:22.000Z | source/yahoo_finance.py | mengwangk/myinvestor-toolkit | 3dca9e1accfccf1583dcdbec80d1a0fe9dae2e81 | [
"MIT"
] | 4 | 2020-02-01T11:23:51.000Z | 2021-12-13T12:27:18.000Z | """
=======================
Yahoo Finance source
=======================
"""
import re
import requests
import time
from json import loads
from bs4 import BeautifulSoup
from yahoofinancials import YahooFinancials
# Yahoo Finance data source
# Private method to get time interval code
def _build_historical_dividend_url(self, ticker, hist_oj, filter='div'):
url = self._BASE_YAHOO_URL + ticker + '/history?period1=' + str(hist_oj['start']) + '&period2=' + \
str(hist_oj['end']) + '&interval=' + hist_oj['interval'] + '&filter=' + filter + '&frequency=' + \
hist_oj['interval']
return url
# Private Method to take scrapped data and build a data dictionary with
# Public Method for user to get historical stock dividend data
# Public Method to get stock data
| 40.789474 | 112 | 0.627097 |
c2e0b6b1770d351e8357e3bd5c3075735bda47ee | 695 | py | Python | django_monitor/price_monitor/spider/enterprise/enterprise/items.py | jasonljc/enterprise-price-monitor | 616396243e909d3584f4cfcc53d4e156510da4bb | [
"MIT"
] | null | null | null | django_monitor/price_monitor/spider/enterprise/enterprise/items.py | jasonljc/enterprise-price-monitor | 616396243e909d3584f4cfcc53d4e156510da4bb | [
"MIT"
] | null | null | null | django_monitor/price_monitor/spider/enterprise/enterprise/items.py | jasonljc/enterprise-price-monitor | 616396243e909d3584f4cfcc53d4e156510da4bb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
| 26.730769 | 51 | 0.684892 |
c2e195ab4b278f23e01854b0146790e6742d3324 | 26,510 | py | Python | photoz.py | martinkilbinger/shapepipe_photoz | da4547774f6d599fb0106273eb8ab9819b7fd9eb | [
"MIT"
] | null | null | null | photoz.py | martinkilbinger/shapepipe_photoz | da4547774f6d599fb0106273eb8ab9819b7fd9eb | [
"MIT"
] | null | null | null | photoz.py | martinkilbinger/shapepipe_photoz | da4547774f6d599fb0106273eb8ab9819b7fd9eb | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 12 10:02:58 2020
@author: Xavier Jimenez
"""
#------------------------------------------------------------------#
# # # # # Imports # # # # #
#------------------------------------------------------------------#
import numpy as np
import os
import shutil
import glob
import pandas as pd
import importlib
from joblib import Parallel, delayed
from tqdm import tqdm
import argparse
import warnings
warnings.filterwarnings('ignore')
from functions import *
#------------------------------------------------------------------#
# # # # # Create catalog # # # # #
#------------------------------------------------------------------#
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--nodes", required=False, type=int, nargs="?", const=1)
parser.add_argument("-s", "--survey", required=False, type=str, nargs="?", const='test')
parser.add_argument("-c", "--clean", required=False, type=bool, nargs="?", const=False)
parser.add_argument("-m", "--make", required=False, type=bool, nargs="?", const=False)
parser.add_argument("-j", "--join", required=False, type=bool, nargs="?", const=False)
parser.add_argument("-g", "--generate_plots", required=False, type=bool, nargs="?", const=False)
parser.add_argument("-p", "--preprocess", required=False, type=str, nargs="?", const=None)
parser.add_argument("-l", "--learning", required=False, type=bool, nargs="?", const=False)
parser.add_argument("-o", "--optimize", required=False, type=str, nargs="?", const=None)
parser.add_argument("-a", "--algorithm", required=False, type=str, nargs="?", const='RF')
parser.add_argument("-i", "--input", required=False, type=str)
args = parser.parse_args()
#------------------------------------------------------------------#
# # # # # PS3PI # # # # #
#------------------------------------------------------------------#
path = os.getcwd() + '/'
if args.input is None:
import params
else:
params = importlib.import_module(args.input)
if args.nodes is None:
args.nodes = 1
if args.algorithm is None:
args.algorithm = 'RF'
if args.survey is None:
args.survey = 'test'
if args.survey == 'test':
print('Modules loaded properly')
if args.preprocess is None:
args.preprocess = 'drop'
elif args.survey == 'ps3pi_cfis' or args.survey == 'unions':
bands = params.bands
output_path = params.output_path
output_name = params.output_name
temp_path = params.temp_path
#------------------------------------------------------------------#
# # # # # CLEAN # # # # #
#------------------------------------------------------------------#
if args.clean == True:
GenFiles = GenerateFiles(args.survey, bands, temp_path, output_name, output_path)
GenFiles.clean_temp_directories()
GenFiles.make_directories()
#------------------------------------------------------------------#
# # # # # MAKE INDIVIDUAL TILE CATALOGS # # # # #
#------------------------------------------------------------------#
if args.make == True:
spectral_path = params.spectral_path
spectral_names = params.spectral_names
path_to_tile_run = params.path_to_tile_run
spectral_surveys = params.spectral_surveys
vignet = params.vignet
cat = MakeCatalogs(args.survey, bands, temp_path, output_name, output_path)
for i in range(len(spectral_names)):
cat.make_survey_catalog(spectral_path, spectral_names[i])
if params.input_path == None:
out_dir = os.listdir(path_to_tile_run + args.survey + '/%s/output/'%(spectral_surveys[i]))[-1]
input_path = path_to_tile_run + args.survey + '/%s/output/%s/paste_cat_runner/output/'%(spectral_surveys[i], out_dir)
else:
input_path = params.input_path
paste_dir = os.listdir(input_path)
Parallel(n_jobs=args.nodes)(delayed(cat.make_catalog)(p, paste_dir, input_path, spectral_names[i], vignet=vignet) for p in tqdm(range(len(paste_dir))))
#------------------------------------------------------------------#
# # # # # JOIN INDIVIDUAL TILE CATALOGS # # # # #
#------------------------------------------------------------------#
if args.join == True:
vignet = params.vignet
cat = MakeCatalogs(args.survey, bands, temp_path, output_name, output_path)
cat.merge_catalogs(vignet=vignet)
#------------------------------------------------------------------#
# # # # # SAVE FIGURES # # # # #
#------------------------------------------------------------------#
if args.generate_plots == True:
spectral_names = params.spectral_names
GenPlot = GeneratePlots(args.survey, bands, temp_path, output_name=output_name, spectral_names=spectral_names, output_path=output_path)
GenPlot.plot_matched_z_spec_hist()
GenPlot.plot_unmatched_z_spec_hist()
#------------------------------------------------------------------#
# # # # # MACHINE LEARNING ALGORITHMS # # # # #
#------------------------------------------------------------------#
if args.learning == True:
GenFiles = GenerateFiles(args.survey, bands, path, output_name, output_path=output_path)
GenFiles.make_directories(output=True)
path_to_csv = params.path_to_csv
spectral_names = params.spectral_names
weights = params.weights
cv = params.cv
max_evals = params.max_evals
feature_engineering = params.feature_engineering
feature_importance = params.feature_importance
plot = params.plot
if path_to_csv is None:
if args.survey == 'ps3pi_cfis':
path_to_csv = output_path + 'output/' + args.survey + '/' + output_name + '/files/' + output_name + '.csv'
ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, output_path=output_path, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
df, df_unmatched = ML.merge_cfis_r_cfht_u_medium_deep_i_g_z()
if feature_engineering == True:
# df_list = ML.feature_engineering(df, bands=['r', 'u', 'i', 'z', 'g'])
df_list = ML.feature_engineering(df, bands=['r', 'u', 'i', 'z', 'g'], color_order=['i', 'g' , 'r', 'z', 'u'])
else:
df_list = [df]
# print(df.head(10))
if plot == True:
ML.plot_corrmat(df)
GenPlot = GeneratePlots(args.survey, bands, temp_path, output_name=output_name, output_path=output_path, spectral_names=spectral_names)
# GenPlot.plot_mags(df, df_unmatched)
elif args.survey == 'unions':
path_to_csv = output_path + 'output/' + args.survey + '/' + output_name + '/files/' + output_name + '.csv'
ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, output_path=output_path, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
df = ML.dataframe()
df_unmatched = ML.unmatched_dataframe()
df = ML.gal_g()
if plot == True:
ML.plot_corrmat(df)
GenPlot = GeneratePlots(args.survey, bands, temp_path, output_name=output_name, output_path=output_path, spectral_names=spectral_names)
GenPlot.plot_mags(df, df_unmatched)
else:
raise TypeError("--survey needs to be set to 'unions' or 'ps3pi_cfis', please specify the full path to your DataFrame")
elif path_to_csv is not None:
ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
df = ML.dataframe()
# ML.plot_corrmat(df)
algs = {'RF': RandomForest, 'ANN': ArtificialNeuralNetwork, 'LASSO': LassoRegression, 'ENET': ElasticNetRegression,
'XGB':XGBoost, 'KRR':KernelRidgeRegression, 'SVR': SupportVectorRegression, 'LGB': LightGBM, 'GBR': GradientBoostingRegression}
if args.algorithm == 'BEST':
algs = {'RF': RandomForest, 'ANN': ArtificialNeuralNetwork, 'SVR': SupportVectorRegression, 'GBR': GradientBoostingRegression}
best_score = 1
best_alg = 'none'
# alg_names = np.array(list(algs.items()))[:,1]
if weights == True:
cat = MakeCatalogs(args.survey, bands, temp_path, output_name, output_path)
weights = cat.compute_weights(df, column = 'r')
elif type(weights) == str:
weights = np.load(weights)
else:
weights = None
global_score = 1
best_dict = pd.DataFrame(data={}, index=['score', 'score std'])
y_pred_dict = {}
y_test_dict = {}
for alg_name in algs:
best_score= 1
alg = algs[alg_name]
print('[Feature engineering]')
print('---------------------------------------------------------------')
for df in df_list:
method = alg(survey = args.survey, bands = bands, output_name = output_name, temp_path=temp_path, dataframe=df, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
score = method.score()
print(list(df.columns))
print('[preprocess] %s'%score[4])
print('[%s '%alg_name +'score] {:.3f} {:.3f}'.format(score[5], score[6]))
if score[5] < best_score:
print('[NEW BEST]')
print("%s: "%alg_name + "Sigma: {:.3f} {:.4f}, outlier rate: {:.3f} {:.3f} % ".format(score[0], score[1], score[2]*100, score[3]*100), end='\r')
best_score = score[5]
best_score_std = score[6]
bscore = score
df_best = df
best_columns = df.columns
best_preprocess = score[4]
best_dict[alg_name] = [best_score, best_score_std]
method = alg(survey = args.survey, bands = bands, output_name = output_name, temp_path=temp_path, dataframe=df_best, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=best_preprocess, n_jobs=args.nodes)
_, y_pred, y_test = method.model()
y_pred_dict[alg] = y_pred
y_test_dict[alg] = y_test
break
best_dict.to_cs(path + 'output/%s/%s/files/'%(args.survey, output_name) + 'Best_scores_' + output_name + '.csv', index=False)
# score = method.score()
print('---------------------------------------------------------------')
print("%s: "%alg_name + "Sigma: {:.3f} {:.4f}, outlier rate: {:.3f} {:.3f} % ".format(bscore[0], bscore[1], bscore[2]*100, bscore[3]*100))
if best_score < global_score:
global_score = best_score
global_score_std = best_score_std
gscore = bscore
best_alg = alg_name
df_global = df_best
global_columns = best_columns
global_preprocess = best_preprocess
print('[NEW BEST] %s'%best_alg + ' score: {:.3f} {:.3f}'.format(global_score, global_score_std))
print('---------------------------------------------------------------')
best_dict.sort_values(by = 'score', axis = 1, inplace=True)
print(best_dict.head())
df_best = df_global
alg = algs[best_alg]
method = alg(survey = args.survey, bands = bands, output_name = output_name, temp_path=temp_path, dataframe=df_best, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
if feature_importance == True:
if best_alg != 'ANN':
method.permutation()
if plot == True:
method.plot(lim=1.8)
print('---------------------------------------------------------------')
print('[BEST] preprocess: %s'%global_preprocess)
print('[BEST] score: {:.3f} {:.3f}'.format(global_score, global_score_std))
print(list(global_columns))
print("[%s] "%args.algorithm + "%s: "%best_alg + "Sigma: {:.3f} {:.4f}, outlier rate: {:.3f} {:.3f} % ".format(gscore[0], gscore[1], gscore[2]*100, bscore[3]*100))
print('---------------------------------------------------------------')
else:
try:
alg = algs[args.algorithm]
except:
raise TypeError('MLM is not defined')
if weights == True:
cat = MakeCatalogs(args.survey, bands, temp_path, output_name, output_path)
weights = cat.compute_weights(df, column = 'r')
elif type(weights) == str:
weights = np.load(weights)
else:
weights = None
best_score = 1
print('[Feature engineering]')
print('---------------------------------------------------------------')
for df in df_list:
method = alg(survey = args.survey, bands = bands, output_name = output_name, temp_path=temp_path, dataframe=df, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
# method.plot(lim=1.8)
# method.permutation()
# df = method.filter()
# df.drop(columns=['r-z'], inplace=True)
score = method.score(df)
print(list(df.columns))
print('[preprocess] %s'%score[4])
print('[%s '%args.algorithm + 'score] {:.3f} {:.3f}'.format(score[5], score[6]))
if score[5] < best_score:
print('[NEW BEST]')
print("%s: "%args.algorithm + "Sigma: {:.3f} {:.4f}, outlier rate: {:.3f} {:.3f} % ".format(score[0], score[1], score[2]*100, score[3]*100))
best_score = score[5]
best_score_std = score[6]
bscore = score
df_best = df
best_columns = df.columns
best_preprocess = score[4]
# break
method = alg(survey = args.survey, bands = bands, output_name = output_name, temp_path=temp_path, dataframe=df_best, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
if feature_importance == True:
if args.algorithm != 'ANN':
method.permutation()
if plot == True:
method.plot(lim=1.5)
if params.morph_importance == True and params.weights == False and args.algorithm == 'RF':
method.morph_importance(df_best)
print('---------------------------------------------------------------')
print('[BEST] preprocess: %s'%best_preprocess)
print('[BEST] score: {:.3f} {:.3f}'.format(best_score, best_score_std))
print(list(best_columns))
print("%s: "%args.algorithm + "Sigma: {:.3f} {:.4f}, outlier rate: {:.3f} {:.3f} % ".format(bscore[0], bscore[1], bscore[2]*100, bscore[3]*100))
print('---------------------------------------------------------------')
#------------------------------------------------------------------#
# # # # # OPTIMIZE LEARNING ALGORITHMS # # # # #
#------------------------------------------------------------------#
if args.optimize == 'HyperOpt' or args.optimize == 'RandomSearch' or args.optimize == 'GridSearch':
# GenFiles = GenerateFiles(args.survey, bands, path, output_name, output_path=output_path)
# GenFiles.make_directories(output=True)
# path_to_csv = params.path_to_csv
# max_evals = params.max_evals
weights = params.weights
# cv = params.cv
algs = {'RF': RandomForestOptimizer, 'SVR': SVROptimizer, 'XGB': XGBoostOptimizer, 'KRR': KRROptimizer, 'ANN': ANNOptimizer}
try:
alg = algs[args.algorithm]
except:
raise ValueError('Method does not have an optimization algorithm')
if weights == True:
cat = MakeCatalogs(args.survey, bands, temp_path, output_name, output_path)
weights = cat.compute_weights(df_best, column = 'r')
elif type(weights) == str:
weights = np.load(weights)
else:
weights = None
print('[%s] optimization'%args.optimize)
# if args.algorithm == 'ANN':
# ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, validation_set=True)
# X_train, X_val, X_test, Y_train, Y_val, Y_test = ML.data()
# X_train, Y_train, X_val, Y_val = data()
# trials = Trials()
# _, best_model = optim.minimize(model=model,data=data,algo=tpe.suggest, max_evals=max_evals, trials=trials)
# Y_pred = best_model.predict(X_test, verbose = 0)
# print(type(Y_pred), type(Y_test))
# sigma, eta = sigma_eta(Y_test.to_numpy().flatten(), Y_pred.flatten())
# print("%s Opt : "%args.algorithm + "Sigma: {:.3f}, outlier rate: {:.3f} % ".format(sigma, eta*100))
# ML.plot_zphot_zspec(Y_pred.flatten(), method='ANN_Opt', lim=1.8)
# ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
# df = ML.dataframe()
# ML.plot_corrmat(df)
# ModelOptimizer = alg(survey = args.survey, bands = bands, output_name = output_name, dataframe=df, path_to_csv=None, validation_set=False)
# _, sigma, eta = ModelOptimizer.best_params(max_evals=10)
# print("%s Opt : "%args.algorithm + "Sigma: {:.3f}, outlier rate: {:.3f} % ".format(sigma, eta*100))
# if path_to_csv is None:
# path_to_csv = output_path + 'output/' + args.survey + '/' + output_name + '/files/' + output_name + '.csv'
# ML = LearningAlgorithms(survey = args.survey, bands = bands, dataframe=df_best, output_name = output_name, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
# df, df_unmatched = ML.merge_cfis_r_cfht_u_medium_deep_i_g_z()
# ML.plot_corrmat(df_best, figure_name=args.algorithm+'_best_corrmat')
ModelOptimizer = alg(survey = args.survey, bands = bands, output_name = output_name, dataframe=df_best, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=best_preprocess, n_jobs=args.nodes)
# ModelOptimizer.debug()
_, sigma, eta, score = ModelOptimizer.best_params(max_evals=max_evals, method=args.optimize)
print('---------------------------------------------------------------')
print('[BEST OPT] score: {:.3f}'.format(score))
print("%s %s : "%(args.algorithm, args.optimize) + "Sigma: {:.3f}, outlier rate: {:.3f} % ".format(sigma, eta*100))
print('---------------------------------------------------------------')
# elif path_to_csv is not None:
# ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
# df = ML.dataframe()
# ML.plot_corrmat(df)
# ModelOptimizer = alg(survey = args.survey, bands = bands, output_name = output_name, dataframe=df, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
# _, sigma, eta = ModelOptimizer.best_params(max_evals=max_evals, method=args.optimize)
# print("%s %s : "%(args.algorithm, args.optimize) + "Sigma: {:.3f}, outlier rate: {:.3f} % ".format(sigma, eta*100))
# else:
# ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name)
# df = ML.dataframe()
# df = ML.preprocess(df, method = args.preprocess)
# ML.plot_corrmat(df)
# ModelOptimizer = alg(survey = args.survey, bands = bands, output_name = output_name, dataframe=df, path_to_csv=False, validation_set=False)
# _, sigma, eta = ModelOptimizer.best_params(max_evals=max_evals, method=args.optimize)
# print("%s %s : "%(args.algorithm, args.optimize) + "Sigma: {:.3f}, outlier rate: {:.3f} % ".format(sigma, eta*100))
#------------------------------------------------------------------#
# # # # # UNIONS # # # # #
#------------------------------------------------------------------#
elif args.survey == 'unions_deprecated':
spectral_path = '/home/mkilbing/astro/data/CFIS/spectro_surveys/'
spectral_names = ['data_DR14_LRG_N', 'data_DR14_LRG_S', 'galaxy_DR12v5_CMASSLOWZTOT_North', 'galaxy_DR12v5_CMASSLOWZTOT_South','sdss_main_gal']
# spectral_names = ['sdss_main_gal']
spectral_surveys = ['SDSS', 'SDSS', 'eBOSS', 'eBOSS', 'SDSS_2']
# spectral_surveys = ['SDSS_2']
output_name = 'CFIS_matched_eBOSS_SDSS_catalog_RUIZ'
# output_name = 'CFIS_matched_SDSS_2_catalog_RUIZ'
output_path = path
temp_path = '/n17data/jimenez/temp/'
bands = ['R', 'U', 'I', 'Z']
# out_dir = os.listdir("/n17data/jimenez/shaperun_unions/output_%s/"%(spectral_surveys[i]))[-1]
# path_to_tile_run = '/n17data/jimenez/shaperun/'
# input_path = path_to_tile_run + args.survey + '/%s/output/%s/paste_cat_runner/output/'%(spectral_surveys[i], out_dir)
# paste_dir = os.listdir(input_path)
if args.clean == True:
GenFiles = GenerateFiles(args.survey, bands, temp_path)
GenFiles.clean_temp_directories()
GenFiles.make_directories()
elif args.make == True:
cat = MakeCatalogs(args.survey, bands, temp_path)
# vignet = [False, False, False, False, False]
for i in range(len(spectral_names)):
cat.make_survey_catalog(spectral_path, spectral_names[i])
out_dir = os.listdir("/n17data/jimenez/shaperun_unions/output_%s/"%(spectral_surveys[i]))[-1]
paste_dir = os.listdir('/n17data/jimenez/shaperun_unions/output_%s/%s/paste_cat_runner/output/'%(spectral_surveys[i], out_dir))
input_path = '/n17data/jimenez/shaperun_unions/output_%s/%s/paste_cat_runner/output/'%(spectral_surveys[i], out_dir)
Parallel(n_jobs=args.nodes)(delayed(cat.make_catalog)(p, paste_dir, input_path, spectral_names[i], vignet=False) for p in tqdm(range(len(paste_dir))))
elif args.join == True:
cat = MakeCatalogs(args.survey, bands, temp_path)
cat.merge_catalogs(output_name, vignet=False)
elif args.generate_plots == True:
GenPlot = GeneratePlots(args.survey, bands, temp_path, csv_name=output_name, spectral_names=spectral_names)
# GenPlot.plot_d2d()
GenPlot.plot_matched_r_i_i_z()
GenPlot.plot_matched_u_r_r_i()
GenPlot.plot_matched_z_spec_hist()
# GenPlot.plot_unmatched_r_i_i_z()
# GenPlot.plot_unmatched_u_r_r_i()
GenPlot.plot_unmatched_z_spec_hist()
# if args.survey != 'unions' or args.survey != 'ps3pi_cfis':
# print("Survey must either be 'unions' or 'ps3pi_cfis'")
# raise SyntaxError("Survey must either be 'unions' or 'ps3pi_cfis'")
| 54.102041 | 289 | 0.515805 |
c2e28399830065a80ecde0af2720320c90368d6c | 776 | py | Python | units/prefix/metactl/bin/bin.py | hoefkensj/BTRWin | 1432868ad60155f5ae26f33903a890497e089480 | [
"MIT"
] | null | null | null | units/prefix/metactl/bin/bin.py | hoefkensj/BTRWin | 1432868ad60155f5ae26f33903a890497e089480 | [
"MIT"
] | null | null | null | units/prefix/metactl/bin/bin.py | hoefkensj/BTRWin | 1432868ad60155f5ae26f33903a890497e089480 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import betterwin
G=betterwin.confcfg.load_global_config()
if __name__ == '__main__':
| 19.897436 | 123 | 0.666237 |
c2e55b26934d85e03276f6736007bed25c578301 | 1,348 | py | Python | network/fs_net_repo/PoseTs.py | lolrudy/GPV_pose | f326a623b3e45e6edfc1963b068e8e7aaea2bfff | [
"MIT"
] | 10 | 2022-03-16T02:14:56.000Z | 2022-03-31T19:01:34.000Z | network/fs_net_repo/PoseTs.py | lolrudy/GPV_pose | f326a623b3e45e6edfc1963b068e8e7aaea2bfff | [
"MIT"
] | 1 | 2022-03-18T06:43:16.000Z | 2022-03-18T06:56:35.000Z | network/fs_net_repo/PoseTs.py | lolrudy/GPV_pose | f326a623b3e45e6edfc1963b068e8e7aaea2bfff | [
"MIT"
] | 2 | 2022-03-19T13:06:28.000Z | 2022-03-19T16:08:18.000Z | import torch.nn as nn
import torch
import torch.nn.functional as F
import absl.flags as flags
from absl import app
FLAGS = flags.FLAGS
# Point_center encode the segmented point cloud
# one more conv layer compared to original paper
if __name__ == "__main__":
app.run(main)
| 25.433962 | 55 | 0.58457 |
c2e64fced5d7c9dff05319da1da37700db19293c | 2,653 | py | Python | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/analysis/exportXGBoostNode.py | t-triobox/gQuant | 6ee3ba104ce4c6f17a5755e7782298902d125563 | [
"Apache-2.0"
] | null | null | null | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/analysis/exportXGBoostNode.py | t-triobox/gQuant | 6ee3ba104ce4c6f17a5755e7782298902d125563 | [
"Apache-2.0"
] | null | null | null | gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/analysis/exportXGBoostNode.py | t-triobox/gQuant | 6ee3ba104ce4c6f17a5755e7782298902d125563 | [
"Apache-2.0"
] | null | null | null | from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.portsSpecSchema import (ConfSchema,
PortsSpecSchema)
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.util import get_file_path
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
| 31.211765 | 74 | 0.547682 |
c2e989f1d471ff586e3048f193d3b0ec35055cc5 | 623 | py | Python | Python/main.py | mrn4344/Mandelbrot | 8958b6453b3feafa1329fa18dc2822ab8985cb41 | [
"MIT"
] | null | null | null | Python/main.py | mrn4344/Mandelbrot | 8958b6453b3feafa1329fa18dc2822ab8985cb41 | [
"MIT"
] | null | null | null | Python/main.py | mrn4344/Mandelbrot | 8958b6453b3feafa1329fa18dc2822ab8985cb41 | [
"MIT"
] | null | null | null | import mandelbrot as mand
from PIL import Image
width = 1280
height = 720
scale = 2
if __name__ == "__main__":
main()
| 22.25 | 67 | 0.5313 |
c2ea645b92efeff22da8081f24ec4c1af5469ade | 1,699 | py | Python | blockformer/position/relative_position_bias.py | colinski/blockformer | 56be6abc08dc25ab97c526384e9c69f6c814c3ed | [
"MIT"
] | null | null | null | blockformer/position/relative_position_bias.py | colinski/blockformer | 56be6abc08dc25ab97c526384e9c69f6c814c3ed | [
"MIT"
] | null | null | null | blockformer/position/relative_position_bias.py | colinski/blockformer | 56be6abc08dc25ab97c526384e9c69f6c814c3ed | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn.utils.weight_init import trunc_normal_
#adapted from open-mmlab implementation of swin transformer
| 38.613636 | 78 | 0.638611 |
c2ec07613b902faccf5658ff9af13a51b5b0ec16 | 6,065 | py | Python | __main__.py | vEnhance/dragon | ada173a05e986941f20002ca726041a698eb8a1d | [
"MIT"
] | null | null | null | __main__.py | vEnhance/dragon | ada173a05e986941f20002ca726041a698eb8a1d | [
"MIT"
] | null | null | null | __main__.py | vEnhance/dragon | ada173a05e986941f20002ca726041a698eb8a1d | [
"MIT"
] | null | null | null | #Import some stuff
import os
import zipfile
import ConfigParser
import string
import argparse
from xml.etree.ElementTree import ElementTree
from constants import SHORT_NAME, VERSION_NUMBER, FULL_NAME, GEOGEBRA_XML_LOCATION
from diagram import AsyDiagram, doCompileDiagramObjects, drawDiagram
# Argument parser {{{
parser = argparse.ArgumentParser(
description = "%s %s, by v_Enhance: %s" %(SHORT_NAME, VERSION_NUMBER, FULL_NAME),
epilog = "Note: This is, and probably always will be, an unfinished work. It may not always produce large, in-scale, clearly labelled diagram made with drawing instruments (compass, ruler, protractor, graph paper, carbon paper)."
)
parser.add_argument("FILENAME",
action = "store",
metavar = "FILE",
help = "The .ggb file to be converted. Obviously,this argument is required."
)
#Non-bool arguments
parser.add_argument('--size', '-s',
action = "store",
dest = "IMG_SIZE",
metavar = "SIZE",
default = "11cm",
help = "The size of the image to be produce. Defaults to 11cm."
)
parser.add_argument('--linescale',
action = "store",
dest = "LINE_SCALE_FACTOR",
metavar = "FACTOR",
default = 2011,
help = "Defines the constant by which lines are extended. The image may break if this is too small, since interesecting lines may return errors. Default is 2011."
)
parser.add_argument('--labelscale',
action = "store",
dest = "LABEL_SCALE_FACTOR",
metavar = "FACTOR",
default = 0.8,
help = "Defines the constant LSF which is used when labelling points. This is 0.4 by default."
)
parser.add_argument('--fontsize',
action = "store",
dest = "FONT_SIZE",
metavar = "SIZE",
default = "10pt",
help = "Default font size, in arbitrary units. Defaults to \'10pt\'."
)
parser.add_argument('--config',
action = "store",
dest = "CONFIG_FILENAME",
metavar = "FILENAME",
default = "",
help = "If specified, uses the specified .cfg files for this diagram only. Defaults to FILENAME.cfg"
)
#Bool arguments
parser.add_argument("--xml",
action = "store_const",
dest = "DO_XML_ONLY",
const = 1,
default = 0,
help = "Prints the XML of the input file and exits. Mainly for debugging"
)
parser.add_argument('--clip',
action = "store_const",
dest = "CLIP_IMG",
const = 1,
default = 0,
help = "If true, clips the image according to the viewport specified in Geogebra. Defaults to false."
)
parser.add_argument('--concise',
action = "store_const",
dest = "CONCISE_MODE",
const = 1,
default = 0,
help = "Turns on concise mode, which shortens the code. By default, this is turned off."
)
parser.add_argument('--cse', '--cse5',
action = "store_const",
dest = "CSE_MODE",
const = 1,
default = 0,
help = "Allows the usage of CSE5 whenever possible."
)
parser.add_argument('--verbose',
action = "store_const",
dest = "CONCISE_MODE",
const = 0,
default = 0,
help = "Turns off concise mode. This is the default."
)
parser.add_argument('--nocse',
action = "store_const",
dest = "CSE_MODE",
const = 1,
default = 0,
help = "Forbids the usage of CSE5 except when necessary. This is the default."
)
parser.add_argument('--csecolors',
action = "store_const",
dest = "CSE_COLORS",
const = 1,
default = 0,
help = "When using CSE5, use the default pathpen and pointpen (blue/red). This is off by default."
)
parser.add_argument('--version',
action = "version",
version = "DRAGON %s, by v_Enhance" %VERSION_NUMBER
)
# }}}
opts = vars(parser.parse_args())
opts['LINE_SCALE_FACTOR'] = float(opts['LINE_SCALE_FACTOR'])
opts['LABEL_SCALE_FACTOR'] = float(opts['LABEL_SCALE_FACTOR'])
if __name__ == "__main__":
#Get the desired file and parse it
FILENAME = opts['FILENAME']
if not "." in FILENAME:
#Extension isn't given, let's assume it was omitted
FILENAME += ".ggb"
elif FILENAME[-1] == ".":
#Last character is ".", add in "ggb"
FILENAME += "ggb"
ggb = zipfile.ZipFile(FILENAME)
xmlFile = ggb.open(GEOGEBRA_XML_LOCATION)
#Read configuration file
config_filename = opts['CONFIG_FILENAME']
if config_filename.strip() == "":
config_filename = FILENAME[:FILENAME.find('.')] + '.cfg'
label_dict = {}
if os.path.isfile(config_filename):
config = ConfigParser.RawConfigParser()
config.optionxform = str # makes names case-sensitive
config.read(config_filename)
var_cfg = config.items("var") if config.has_section("var") else {}
for key, val in var_cfg:
try:
opts[string.upper(key)] = eval(val)
except (NameError, SyntaxError):
opts[string.upper(key)] = val
label_cfg = config.items("label") if config.has_section("label") else {}
for key, val in label_cfg:
label_dict[key] = "lsf * " + val
# Print XML file only, then exit
if opts['DO_XML_ONLY']:
print ''.join(xmlFile.readlines())
exit()
#Convert to tree
ggb_tree = ElementTree()
ggb_tree.parse(xmlFile)
#Retrieve the provided values of the viewport {{{
window_width = float(ggb_tree.find("euclidianView").find("size").attrib["width"])
window_height = float(ggb_tree.find("euclidianView").find("size").attrib["height"])
xzero = float(ggb_tree.find("euclidianView").find("coordSystem").attrib["xZero"])
yzero = float(ggb_tree.find("euclidianView").find("coordSystem").attrib["yZero"])
xscale = float(ggb_tree.find("euclidianView").find("coordSystem").attrib["scale"])
yscale = float(ggb_tree.find("euclidianView").find("coordSystem").attrib["yscale"])
#Compute the viewport coordinates from this information
xmin = -xzero/float(xscale)
xmax = (window_width - xzero)/float(xscale)
ymin = -(window_height -yzero)/float(yscale)
ymax = yzero/float(yscale)
view = (xmin, xmax, ymin, ymax)
# }}}
#Do the construction
construct_tree = ggb_tree.find("construction")
theMainDiagram = AsyDiagram()
doCompileDiagramObjects(construct_tree, theMainDiagram)
if opts['CLIP_IMG'] == 0:
print drawDiagram(theMainDiagram, label_dict, opts=opts).replace(u"\u03B1", "alpha")
else:
print drawDiagram(theMainDiagram, label_dict, view=view, opts=opts).replace(u"\u03B1", "alpha")
| 32.783784 | 232 | 0.697939 |
c2ecefbb6392e5044c1bce089bc79ba2086836e6 | 1,714 | py | Python | ka_model.py | ycjing/AmalgamateGNN.PyTorch | f99a60b374d23002d53385f23da2d540d964c7c2 | [
"MIT"
] | 15 | 2021-06-25T05:02:37.000Z | 2022-03-20T08:34:15.000Z | ka_model.py | ycjing/AmalgamateGNN.PyTorch | f99a60b374d23002d53385f23da2d540d964c7c2 | [
"MIT"
] | 2 | 2022-01-21T05:14:17.000Z | 2022-03-23T09:24:45.000Z | ka_model.py | ycjing/AmalgamateGNN.PyTorch | f99a60b374d23002d53385f23da2d540d964c7c2 | [
"MIT"
] | 1 | 2021-08-18T06:28:58.000Z | 2021-08-18T06:28:58.000Z | import torch
from utils import get_teacher1, get_teacher2, get_student
def collect_model(args, data_info_s, data_info_t1, data_info_t2):
"""This is the function that constructs the dictionary containing the models and the corresponding optimizers
Args:
args (parse_args): parser arguments
data_info_s (dict): the dictionary containing the data information of the student
data_info_t1 (dict): the dictionary containing the data information of teacher #1
data_info_t2 (dict): the dictionary containing the data information of teacher #2
Returns:
dict: model dictionary ([model_name][model/optimizer])
"""
device = torch.device("cpu") if args.gpu < 0 else torch.device("cuda:" + str(args.gpu))
# initialize the two teacher GNNs and the student GNN
s_model = get_student(args, data_info_s)
s_model.to(device)
t1_model = get_teacher1(args, data_info_t1)
t1_model.to(device)
t2_model = get_teacher2(args, data_info_t2)
t2_model.to(device)
# define the corresponding optimizers of the teacher GNNs and the student GNN
params = s_model.parameters()
s_model_optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
t1_model_optimizer = None
t2_model_optimizer = None
# construct the model dictionary containing the models and the corresponding optimizers
model_dict = {}
model_dict['s_model'] = {'model':s_model, 'optimizer':s_model_optimizer}
model_dict['t1_model'] = {'model':t1_model, 'optimizer':t1_model_optimizer}
model_dict['t2_model'] = {'model':t2_model, 'optimizer':t2_model_optimizer}
return model_dict
| 41.804878 | 113 | 0.713536 |
c2ed8ec4755fb9cd0f0e90d7dcf10e9cf020ad38 | 8,759 | py | Python | core/migrations/0001_initial.py | vlafranca/stream_framework_example | 3af636c591d4a278f3720f64118d86aeb8091714 | [
"MIT"
] | 102 | 2015-01-18T15:02:34.000Z | 2021-12-07T17:22:12.000Z | core/migrations/0001_initial.py | vlafranca/stream_framework_example | 3af636c591d4a278f3720f64118d86aeb8091714 | [
"MIT"
] | 11 | 2015-01-04T14:42:11.000Z | 2022-01-13T04:58:10.000Z | core/migrations/0001_initial.py | vlafranca/stream_framework_example | 3af636c591d4a278f3720f64118d86aeb8091714 | [
"MIT"
] | 53 | 2015-01-12T07:11:10.000Z | 2021-07-28T08:40:02.000Z | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| 56.509677 | 187 | 0.553488 |
c2edaf37adb691a52b1dfd785bf639490dc75f3a | 3,309 | py | Python | desicos/abaqus/conecyl/__init__.py | saullocastro/desicos | 922db8ac4fb0fb4d09df18ce2a14011f207f6fa8 | [
"BSD-3-Clause"
] | 1 | 2020-10-22T22:15:24.000Z | 2020-10-22T22:15:24.000Z | desicos/abaqus/conecyl/__init__.py | saullocastro/desicos | 922db8ac4fb0fb4d09df18ce2a14011f207f6fa8 | [
"BSD-3-Clause"
] | 1 | 2020-10-09T12:42:02.000Z | 2020-10-09T12:42:02.000Z | desicos/abaqus/conecyl/__init__.py | saullocastro/desicos | 922db8ac4fb0fb4d09df18ce2a14011f207f6fa8 | [
"BSD-3-Clause"
] | 2 | 2020-07-14T07:45:31.000Z | 2020-12-29T00:22:41.000Z | r"""
===================================================
ConeCyl (:mod:`desicos.abaqus.conecyl`)
===================================================
.. currentmodule:: desicos.abaqus.conecyl
Cone/Cylinder Model
=====================
Figure 1 provides a schematic view of the typical model created using this
module. Two coordinate systems are defined: one rectangular with axes `X_1`,
`X_2`, `X_3` and a cylindrical with axes `R`, `Th`, `Z`.
.. _figure_conecyl:
.. figure:: ../../../figures/modules/abaqus/conecyl/conecyl_model.png
:width: 400
Figure 1: Cone/Cylinder Model
The complexity of the actual model created in Abaqus goes beyond the
simplification above
Boundary Conditions
===================
Based on the coordinate systems shown in Figure 1 the following boundary
condition parameters can be controlled:
- constraint for radial and circumferential displacement (`u_R` and `v`) at
the bottom and top edges
- simply supported or clamped bottom and top edges, consisting in the
rotational constraint along the meridional coordinate, called `\phi_x`.
- use of resin rings as described in :ref:`the next section <resin_rings>`
- the use of distributed or concentrated load at the top edge will be
automatically determined depending on the attributes of the current
:class:`.ConeCyl` object
- application of shims at the top edge as detailed in
:meth:`.ImpConf.add_shim_top_edge`, following this example::
from desicos.abaqus.conecyl import ConeCyl
cc = ConeCyl()
cc.from_DB('castro_2014_c02')
cc.impconf.add_shim(thetadeg, thick, width)
- application of uneven top edges as detailed in
:meth:`.UnevenTopEdge.add_measured_u3s`, following this example::
thetadegs = [0.0, 22.5, 45.0, 67.5, 90.0, 112.5, 135.0, 157.5, 180.0,
202.5, 225.0, 247.5, 270.0, 292.5, 315.0, 337.5, 360.0]
u3s = [0.0762, 0.0508, 0.1270, 0.0000, 0.0000, 0.0762, 0.2794, 0.1778,
0.0000, 0.0000, 0.0762, 0.0000, 0.1016, 0.2032, 0.0381, 0.0000,
0.0762]
cc.impconf.add_measured_u3s_top_edge(thetadegs, u3s)
.. _resin_rings:
Resin Rings
===========
When resin rings are used the actual boundary condition will be determined by
the parameters defining the resin rings (cf. Figure 2), and therefore no clamped conditions
will be applied in the shell edges.
.. figure:: ../../../figures/modules/abaqus/conecyl/resin_rings.png
:width: 400
Figure 2: Resin Rings
Defining resin rings can be done following the example below, where each
attribute is detailed in the :class:`.ConeCyl` class description::
from desicos.abaqus.conecyl import ConeCyl
cc = Conecyl()
cc.from_DB('castro_2014_c02')
cc.resin_add_BIR = False
cc.resin_add_BOR = True
cc.resin_add_TIR = False
cc.resin_add_TOR = True
cc.resin_E = 2454.5336
cc.resin_nu = 0.3
cc.resin_numel = 3
cc.resin_bot_h = 25.4
cc.resin_top_h = 25.4
cc.resin_bir_w1 = 25.4
cc.resin_bir_w2 = 25.4
cc.resin_bor_w1 = 25.4
cc.resin_bor_w2 = 25.4
cc.resin_tir_w1 = 25.4
cc.resin_tir_w2 = 25.4
cc.resin_tor_w1 = 25.4
cc.resin_tor_w2 = 25.4
The ConeCyl Class
=================
.. automodule:: desicos.abaqus.conecyl.conecyl
:members:
"""
from __future__ import absolute_import
from .conecyl import *
| 30.925234 | 91 | 0.676337 |
c2f022d833125248ec963e921ce8841d3ad389bf | 1,230 | py | Python | rpython/jit/backend/ppc/regname.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 381 | 2018-08-18T03:37:22.000Z | 2022-02-06T23:57:36.000Z | rpython/jit/backend/ppc/regname.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 16 | 2018-09-22T18:12:47.000Z | 2022-02-22T20:03:59.000Z | rpython/jit/backend/ppc/regname.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 55 | 2015-08-16T02:41:30.000Z | 2022-03-20T20:33:35.000Z |
r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, \
r13, r14, r15, r16, r17, r18, r19, r20, r21, r22, \
r23, r24, r25, r26, r27, r28, r29, r30, r31 = map(_R, range(32))
fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7, fr8, fr9, fr10, fr11, fr12, \
fr13, fr14, fr15, fr16, fr17, fr18, fr19, fr20, fr21, fr22, \
fr23, fr24, fr25, fr26, fr27, fr28, fr29, fr30, fr31 = map(_F, range(32))
vr0, vr1, vr2, vr3, vr4, vr5, vr6, vr7, vr8, vr9, vr10, vr11, vr12, vr13, \
vr14, vr15, vr16, vr17, vr18, vr19, vr20, vr21, vr22, vr23, vr24, vr25, \
vr26, vr27, vr28, vr29, vr30, vr31, vr32, vr33, vr34, vr35, vr36, vr37, \
vr38, vr39, vr40, vr41, vr42, vr43, vr44, vr45, vr46, vr47, vr48, \
vr49, vr50, vr51, vr52, vr53, vr54, vr55, vr56, vr57, vr58, vr59, vr60, \
vr61, vr62, vr63 = map(_V, range(64))
crf0, crf1, crf2, crf3, crf4, crf5, crf6, crf7 = range(8)
| 41 | 78 | 0.591057 |
c2f080fa5d08bb1269862977727df7460da362c1 | 445 | py | Python | probs/prob9.py | mattrid93/ProjectEuler | 3e1cf1bad9581e526b37d17e20b5fe8af837c1c6 | [
"MIT"
] | null | null | null | probs/prob9.py | mattrid93/ProjectEuler | 3e1cf1bad9581e526b37d17e20b5fe8af837c1c6 | [
"MIT"
] | null | null | null | probs/prob9.py | mattrid93/ProjectEuler | 3e1cf1bad9581e526b37d17e20b5fe8af837c1c6 | [
"MIT"
] | null | null | null | """Problem 9: Special Pythagorean triplet.
Brute force."""
import unittest
def find_triple(s):
"""Returns abc where a^2+b^2=c^2 with a+b+c=s."""
a, b, c = 998, 1, 1
while b < 999:
if a**2 + b**2 == c**2:
return a*b*c
if a == 1:
c += 1
b = 1
a = 1000 - b - c
else:
b += 1
a -= 1
if __name__ == "__main__":
print(find_triple(1000))
| 20.227273 | 53 | 0.440449 |
c2f186277f31c8ec4b6c844878711153981d3676 | 920 | py | Python | common/utilities/message_utilities.py | uk-gov-mirror/nhsconnect.integration-adaptor-mhs | bf090a17659da738401667997a10695d8b75b94b | [
"Apache-2.0"
] | 15 | 2019-08-06T16:08:12.000Z | 2021-05-24T13:14:39.000Z | common/utilities/message_utilities.py | uk-gov-mirror/nhsconnect.integration-adaptor-mhs | bf090a17659da738401667997a10695d8b75b94b | [
"Apache-2.0"
] | 75 | 2019-04-25T13:59:02.000Z | 2021-09-15T06:05:36.000Z | common/utilities/message_utilities.py | uk-gov-mirror/nhsconnect.integration-adaptor-mhs | bf090a17659da738401667997a10695d8b75b94b | [
"Apache-2.0"
] | 7 | 2019-11-12T15:26:34.000Z | 2021-04-11T07:23:56.000Z | import uuid
import datetime
import utilities.file_utilities as file_utilities
EBXML_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
def get_uuid():
"""Generate a UUID suitable for sending in messages to Spine.
:return: A string representation of the UUID.
"""
return str(uuid.uuid4()).upper()
def get_timestamp():
"""Generate a timestamp in a format suitable for sending in ebXML messages.
:return: A string representation of the timestamp
"""
current_utc_time = datetime.datetime.utcnow()
return current_utc_time.strftime(EBXML_TIMESTAMP_FORMAT)
| 27.878788 | 97 | 0.738043 |
c2f1a3b7771e7491e2a518b145a7443aeabf7658 | 81 | py | Python | corehq/util/tests/__init__.py | bglar/commcare-hq | 972129fc26864c08c7bef07874bd2a7218550bff | [
"BSD-3-Clause"
] | 1 | 2017-02-10T03:14:51.000Z | 2017-02-10T03:14:51.000Z | corehq/util/tests/__init__.py | bglar/commcare-hq | 972129fc26864c08c7bef07874bd2a7218550bff | [
"BSD-3-Clause"
] | null | null | null | corehq/util/tests/__init__.py | bglar/commcare-hq | 972129fc26864c08c7bef07874bd2a7218550bff | [
"BSD-3-Clause"
] | null | null | null | from test_couch import *
from test_toggle import *
from test_quickcache import *
| 20.25 | 29 | 0.814815 |
c2f1d876ec603c325d5fd840f0aed40ac0a43ab5 | 998 | py | Python | cleanup.py | DuncteBot/tf2-transformer-chatbot | 0e364da0537717de025314d40c5b0423891f9dc4 | [
"MIT"
] | null | null | null | cleanup.py | DuncteBot/tf2-transformer-chatbot | 0e364da0537717de025314d40c5b0423891f9dc4 | [
"MIT"
] | null | null | null | cleanup.py | DuncteBot/tf2-transformer-chatbot | 0e364da0537717de025314d40c5b0423891f9dc4 | [
"MIT"
] | null | null | null | import sqlite3
from helpers import get_db_path, get_timeframes
from traceback import print_tb
timeframes = get_timeframes()
print(timeframes)
for timeframe in timeframes:
with sqlite3.connect(get_db_path(timeframe)) as connection:
try:
c = connection.cursor()
print("Cleanin up!")
c.execute('BEGIN TRANSACTION')
# Remove values that we don't want
sql = "DELETE FROM parent_reply WHERE parent IS NULL OR parent == 'False' OR parent == '0'"
c.execute(sql)
connection.commit()
# c.execute("VACUUM")
# connection.commit()
sql = "SELECT COUNT(comment_id) FROM parent_reply"
c.execute(sql)
result = c.fetchone()
if result is not None:
res = result[0]
print(f'Cleanup done, paired rows: {res}')
except Exception as e:
print('Something broke')
print(e)
print('Done')
| 28.514286 | 103 | 0.576152 |
c2f341062556abc813aaebd4a88c681a262c4eb7 | 8,059 | py | Python | visualization/plots.py | yc14600/beta3_IRT | 7c3d87b2f04fc9ad7bf59db5d60166df5ca47dc6 | [
"MIT"
] | 7 | 2019-06-26T15:23:14.000Z | 2021-12-28T14:16:24.000Z | visualization/plots.py | yc14600/beta3_IRT | 7c3d87b2f04fc9ad7bf59db5d60166df5ca47dc6 | [
"MIT"
] | null | null | null | visualization/plots.py | yc14600/beta3_IRT | 7c3d87b2f04fc9ad7bf59db5d60166df5ca47dc6 | [
"MIT"
] | 4 | 2019-08-29T19:07:35.000Z | 2021-12-28T19:22:11.000Z | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
import seaborn as sns
import pandas as pd
import glob
import re
from itertools import combinations
import matplotlib
matplotlib.rcParams['text.usetex'] = True
| 36.631818 | 173 | 0.618191 |
c2f46b42f5c6546a78b91fb417eafbf47943fbf3 | 10,125 | py | Python | kitchensink/data/catalog.py | hhuuggoo/kitchensink | 1f81050fec7eace52e0b4e1b47851b649a4e4d33 | [
"BSD-3-Clause"
] | 2 | 2015-03-17T05:02:42.000Z | 2016-04-07T15:02:28.000Z | kitchensink/data/catalog.py | hhuuggoo/kitchensink | 1f81050fec7eace52e0b4e1b47851b649a4e4d33 | [
"BSD-3-Clause"
] | null | null | null | kitchensink/data/catalog.py | hhuuggoo/kitchensink | 1f81050fec7eace52e0b4e1b47851b649a4e4d33 | [
"BSD-3-Clause"
] | 1 | 2015-10-07T21:50:44.000Z | 2015-10-07T21:50:44.000Z | from os.path import join, exists, isdir, relpath, abspath, dirname
import datetime as dt
import posixpath
import logging
import tempfile
from os import stat, makedirs, remove
import random
import uuid
from cStringIO import StringIO
import time
from six import string_types
try:
import gevent
except:
gevent = None
from ..clients.http import Client
from .. import settings
from ..serialization import deserializer, serializer
from ..errors import KitchenSinkError
from ..utils.pathutils import urlsplit, dirsplit, urljoin
from .funcs import get_info_bulk, hosts
logger = logging.getLogger(__name__)
| 35.904255 | 95 | 0.599407 |
c2f4c885d5dce7315988e496badae91eba3b1efc | 982 | py | Python | nadine/migrations/0034_stripebillingprofile.py | alvienzo720/Dep_Nadine | b23688aa87ba3cfe138f9b243eed3f50a74e1486 | [
"Apache-2.0"
] | null | null | null | nadine/migrations/0034_stripebillingprofile.py | alvienzo720/Dep_Nadine | b23688aa87ba3cfe138f9b243eed3f50a74e1486 | [
"Apache-2.0"
] | null | null | null | nadine/migrations/0034_stripebillingprofile.py | alvienzo720/Dep_Nadine | b23688aa87ba3cfe138f9b243eed3f50a74e1486 | [
"Apache-2.0"
] | 1 | 2020-02-24T08:23:45.000Z | 2020-02-24T08:23:45.000Z | # Generated by Django 2.0.3 on 2018-04-06 18:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 37.769231 | 139 | 0.665988 |
c2f563aefb1bf16c5c0e403fa207c966d043272b | 5,212 | py | Python | compete.py | ChristopherKlix/sorting_algorithms | 5586393fb8e66d41c29c3a1a1a100fe323b6e1b6 | [
"MIT"
] | null | null | null | compete.py | ChristopherKlix/sorting_algorithms | 5586393fb8e66d41c29c3a1a1a100fe323b6e1b6 | [
"MIT"
] | null | null | null | compete.py | ChristopherKlix/sorting_algorithms | 5586393fb8e66d41c29c3a1a1a100fe323b6e1b6 | [
"MIT"
] | null | null | null | from generate import generate
from datetime import datetime
from time import sleep
# sorting algorithms
# merge_sort helper functions
def split(full_list):
'''
get length of list
initialize both halves
'''
list_len = len(full_list)
left_half, right_half = list(), list()
'''
iterate over each item in full_list
and append to left half until i is greater than length / 2
'''
for i in range(list_len):
if i < list_len / 2:
left_half.append(full_list[i])
else:
right_half.append(full_list[i])
return left_half, right_half
# print function
# main function
main()
| 25.54902 | 95 | 0.585955 |
c2f6e7fc941847d3304a0d5ca32647ac0c95ed2a | 251 | py | Python | controller/index.py | YunYinORG/social | 5020e980cacd8eca39fccc36faabc584f3c3e15f | [
"Apache-2.0"
] | 4 | 2015-12-20T14:57:57.000Z | 2021-01-23T12:54:20.000Z | controller/index.py | YunYinORG/social | 5020e980cacd8eca39fccc36faabc584f3c3e15f | [
"Apache-2.0"
] | 1 | 2016-03-13T15:19:02.000Z | 2016-03-18T03:11:18.000Z | controller/index.py | YunYinORG/social | 5020e980cacd8eca39fccc36faabc584f3c3e15f | [
"Apache-2.0"
] | 4 | 2015-12-21T02:26:29.000Z | 2016-09-03T02:57:07.000Z | #!/usr/bin/env python
# coding=utf-8
import web
import lib.user as user
"""[done]"""
| 16.733333 | 59 | 0.59761 |
c2f74385f195f0884b6d65f78882d41fbb6267cb | 19,448 | py | Python | models/transformer/transformer.py | lsgai/selene | ad23904cad2a5a292732ff350e7689c0b9e511f4 | [
"BSD-3-Clause-Clear"
] | null | null | null | models/transformer/transformer.py | lsgai/selene | ad23904cad2a5a292732ff350e7689c0b9e511f4 | [
"BSD-3-Clause-Clear"
] | null | null | null | models/transformer/transformer.py | lsgai/selene | ad23904cad2a5a292732ff350e7689c0b9e511f4 | [
"BSD-3-Clause-Clear"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import numpy as np
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME, BertConfig
from pytorch_transformers.modeling_bert import *
from pytorch_transformers.tokenization_bert import BertTokenizer
import pytorch_transformers.optimization
def criterion():
return nn.BCELoss()
def get_optimizer(lr):
# adam with L2 norm
#return (torch.optim.Adam, {"lr": lr, "weight_decay": 1e-6})
#https://github.com/datduong/BertGOAnnotation/blob/master/finetune/RunTokenClassifyProtData.py#L313
# Prepare optimizer and schedule (linear warmup and decay)
'''
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
'''
return (pytorch_transformers.optimization.AdamW, {"lr":lr, "weight_decay": 1e-6})
# using deepsea optimizer
#return (torch.optim.SGD,
# {"lr": lr, "weight_decay": 1e-6, "momentum": 0.9})
| 47.783784 | 121 | 0.722285 |
c2f98c67de6fff06f026a352c43e196aef39bfda | 1,166 | py | Python | setup.py | jackschultz/dbactor | 57ca01bb257d92b32d6003b56cec69e930b6ea73 | [
"MIT"
] | 2 | 2021-11-18T09:35:42.000Z | 2021-11-18T14:46:30.000Z | setup.py | jackschultz/dbactor | 57ca01bb257d92b32d6003b56cec69e930b6ea73 | [
"MIT"
] | null | null | null | setup.py | jackschultz/dbactor | 57ca01bb257d92b32d6003b56cec69e930b6ea73 | [
"MIT"
] | null | null | null | from setuptools import setup
__version__ = '0.0.3'
REQUIRES = ['psycopg2-binary']
EXTRAS_REQUIRE = {
'sqlalchemy': ['sqlalchemy'],
'jinjasql': ['jinjasql'],
'pandas': ['jinjasql', 'pandas'],
}
extras_lists = [vals for k, vals in EXTRAS_REQUIRE.items()]
# flattening the values in EXTRAS_REQUIRE from popular stack overflow question 952914
all_extras_require = list(set([item for sublist in extras_lists for item in sublist]))
EXTRAS_REQUIRE['all'] = all_extras_require
TESTS_REQUIRE = REQUIRES + all_extras_require + ['pytest', 'testing.postgresql']
setup_dict = dict(name='dbactor',
version=__version__,
description='DBActor: ORM helper and alternative',
long_description=open('README.md').read(),
url='http://github.com/jackschultz/dbactor',
author='Jack Schultz',
author_email='jackschultz23@gmail.com',
license='MIT',
install_requires=REQUIRES,
extras_require=EXTRAS_REQUIRE,
tests_require=TESTS_REQUIRE,
packages=['dbactor'])
setup(**setup_dict)
| 36.4375 | 86 | 0.628645 |
c2fa5f8b735606c6ff049842620445e8616d3b41 | 603 | py | Python | figur/__init__.py | severinsimmler/figur | d42cf6d150cc1b8effe1b4e7093bafd8975377b3 | [
"MIT"
] | 1 | 2019-04-29T20:29:15.000Z | 2019-04-29T20:29:15.000Z | figur/__init__.py | severinsimmler/figur | d42cf6d150cc1b8effe1b4e7093bafd8975377b3 | [
"MIT"
] | 2 | 2019-03-13T14:30:08.000Z | 2019-05-28T15:41:27.000Z | figur/__init__.py | severinsimmler/figur | d42cf6d150cc1b8effe1b4e7093bafd8975377b3 | [
"MIT"
] | null | null | null | """
Figurenerkennung for German literary texts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`figur` is very easy to use:
```
>>> import figur
>>> text = "Der Grtner entfernte sich eilig, und Eduard folgte bald."
>>> figur.tag(text)
SentenceId Token Tag
0 0 Der _
1 0 Grtner AppTdfW
2 0 entfernte _
3 0 sich Pron
4 0 eilig, _
5 0 und _
6 0 Eduard Core
7 0 folgte _
8 0 bald. _
```
"""
from .api import tag
| 24.12 | 70 | 0.41791 |
c2fb06c89af3c0d869e1710b20eb4d1e629dd002 | 725 | py | Python | CV0101EN-09.02-frames_to_video.py | reddyprasade/Computer-Vision-with-Python | 8eebec61f0fdacb05e122460d6845a32ae506c8f | [
"Apache-2.0"
] | null | null | null | CV0101EN-09.02-frames_to_video.py | reddyprasade/Computer-Vision-with-Python | 8eebec61f0fdacb05e122460d6845a32ae506c8f | [
"Apache-2.0"
] | null | null | null | CV0101EN-09.02-frames_to_video.py | reddyprasade/Computer-Vision-with-Python | 8eebec61f0fdacb05e122460d6845a32ae506c8f | [
"Apache-2.0"
] | null | null | null | import cv2
import numpy as np
import os
inputpath = 'folder path'
outpath = 'video file path/video.mp4'
fps = 29
frames_to_video(inputpath,outpath,fps)
| 29 | 75 | 0.66069 |
c2febe7880974ca6e91553584ed0bba9eac9b426 | 5,303 | py | Python | pbt/estimator_worker.py | Octavian-ai/mac-graph | 3ef978e8a6f79f2dcc46783d34f01934aabf7f19 | [
"Unlicense"
] | 116 | 2018-07-11T13:19:56.000Z | 2021-07-26T17:22:44.000Z | pbt/estimator_worker.py | Octavian-ai/mac-graph | 3ef978e8a6f79f2dcc46783d34f01934aabf7f19 | [
"Unlicense"
] | 1 | 2019-02-11T02:25:02.000Z | 2019-02-11T17:05:19.000Z | pbt/estimator_worker.py | Octavian-ai/mac-graph | 3ef978e8a6f79f2dcc46783d34f01934aabf7f19 | [
"Unlicense"
] | 21 | 2018-10-11T23:03:22.000Z | 2021-07-14T22:42:08.000Z |
import tensorflow as tf
import numpy as np
import traceback
import os.path
from .worker import Worker
from .param import *
from .params import *
import logging
logger = logging.getLogger(__name__)
def resize_and_load(var, val, sess):
o_shape = var.get_shape().as_list()
i_shape = list(val.shape)
if o_shape != i_shape:
resize_dim = 1 # may not always hold true, assumption for now
delta = o_shape[resize_dim] - i_shape[resize_dim]
if delta != 0:
tf.logging.info("reshape var {} by {}".format(var.name, deta))
if delta < 0:
val = val[:,:o_shape[1]]
elif delta > 0:
val = np.pad(val, ((0,0),(0, delta)), 'reflect')
v.load(val, self.sess)
| 24.896714 | 103 | 0.666227 |
6c01243ea6bcaf63004fe1fe3e588e8eca1e226b | 4,064 | py | Python | tracer/main.py | LzVv123456/Deep-Reinforced-Tree-Traversal | 8e117590c8cd51c9fc9c033232658876160fa638 | [
"MIT"
] | 20 | 2021-07-08T08:33:27.000Z | 2022-01-14T03:27:35.000Z | tracer/main.py | abcxubu/Deep-Reinforced-Tree-Traversal | 8e117590c8cd51c9fc9c033232658876160fa638 | [
"MIT"
] | 1 | 2021-10-01T12:39:11.000Z | 2021-10-01T13:19:43.000Z | tracer/main.py | abcxubu/Deep-Reinforced-Tree-Traversal | 8e117590c8cd51c9fc9c033232658876160fa638 | [
"MIT"
] | 3 | 2021-07-08T07:34:48.000Z | 2022-01-10T11:41:59.000Z | import os
import glob
import yaml
import torch
import argparse
from addict import Dict
from dataset import *
from init import *
from utilities import *
from train import *
if __name__ == '__main__':
args = parse_args()
main(args) | 37.62963 | 137 | 0.615404 |
6c0605d359e470dbd90558cdc9d674b331db2e65 | 181 | py | Python | tests/acceptance/__init__.py | datphan/moviecrab | e3bcff700b994388f1ded68d268a960b10d57a81 | [
"BSD-3-Clause"
] | null | null | null | tests/acceptance/__init__.py | datphan/moviecrab | e3bcff700b994388f1ded68d268a960b10d57a81 | [
"BSD-3-Clause"
] | null | null | null | tests/acceptance/__init__.py | datphan/moviecrab | e3bcff700b994388f1ded68d268a960b10d57a81 | [
"BSD-3-Clause"
] | null | null | null | """acceptance tests"""
import unittest
from nose.plugins.attrib import attr
| 15.083333 | 44 | 0.729282 |
6c0675ff607912b34920445802ae59f9d31371c8 | 4,222 | py | Python | test/functional/bsv-protoconf.py | bxlkm1/yulecoin | 3605faf2ff2e3c7bd381414613fc5c0234ad2936 | [
"OML"
] | 8 | 2019-08-02T02:49:42.000Z | 2022-01-17T15:51:48.000Z | test/functional/bsv-protoconf.py | bxlkm1/yulecoin | 3605faf2ff2e3c7bd381414613fc5c0234ad2936 | [
"OML"
] | null | null | null | test/functional/bsv-protoconf.py | bxlkm1/yulecoin | 3605faf2ff2e3c7bd381414613fc5c0234ad2936 | [
"OML"
] | 4 | 2019-08-02T02:50:44.000Z | 2021-05-28T03:21:38.000Z | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin SV developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time, math
from test_framework.blocktools import create_block, create_coinbase
if __name__ == '__main__':
BsvProtoconfTest().main()
| 43.979167 | 143 | 0.709853 |
6c079626dca82782593d5d2bd1f2fb59f4206ddc | 1,022 | py | Python | apps/goods/migrations/0063_auto_20200108_1555.py | lianxiaopang/camel-store-api | b8021250bf3d8cf7adc566deebdba55225148316 | [
"Apache-2.0"
] | 12 | 2020-02-01T01:52:01.000Z | 2021-04-28T15:06:43.000Z | apps/goods/migrations/0063_auto_20200108_1555.py | lianxiaopang/camel-store-api | b8021250bf3d8cf7adc566deebdba55225148316 | [
"Apache-2.0"
] | 5 | 2020-02-06T08:07:58.000Z | 2020-06-02T13:03:45.000Z | apps/goods/migrations/0063_auto_20200108_1555.py | lianxiaopang/camel-store-api | b8021250bf3d8cf7adc566deebdba55225148316 | [
"Apache-2.0"
] | 11 | 2020-02-03T13:07:46.000Z | 2020-11-29T01:44:06.000Z | # Generated by Django 2.1.8 on 2020-01-08 07:55
from django.db import migrations, models
| 29.2 | 103 | 0.589041 |
6c09b1ff084d1e9df9670c57209d4a2a65e97d3c | 9,838 | py | Python | actor_critic/trainer.py | zamlz/dlcampjeju2018-I2A-cube | 85ae7a2084ca490ea685ff3d30e82720fb58c0ea | [
"MIT"
] | 14 | 2018-07-19T03:56:45.000Z | 2019-10-01T12:09:01.000Z | actor_critic/trainer.py | zamlz/dlcampjeju2018-I2A-cube | 85ae7a2084ca490ea685ff3d30e82720fb58c0ea | [
"MIT"
] | null | null | null | actor_critic/trainer.py | zamlz/dlcampjeju2018-I2A-cube | 85ae7a2084ca490ea685ff3d30e82720fb58c0ea | [
"MIT"
] | null | null | null |
import gym
import numpy as np
import tensorflow as tf
import time
from actor_critic.policy import A2CBuilder
from actor_critic.util import discount_with_dones, cat_entropy, fix_tf_name
from common.model import NetworkBase
from common.multiprocessing_env import SubprocVecEnv
from tqdm import tqdm
# The function that trains the a2c model
def train(env_fn = None,
spectrum = False,
a2c_arch = None,
nenvs = 16,
nsteps = 100,
max_iters = 1e6,
gamma = 0.99,
pg_coeff = 1.0,
vf_coeff = 0.5,
ent_coeff = 0.01,
max_grad_norm = 0.5,
lr = 7e-4,
alpha = 0.99,
epsilon = 1e-5,
log_interval = 100,
summarize = True,
load_path = None,
log_path = None,
cpu_cores = 1):
# Construct the vectorized parallel environments
envs = [ env_fn for _ in range(nenvs) ]
envs = SubprocVecEnv(envs)
# Set some random seeds for the environment
envs.seed(0)
if spectrum:
envs.spectrum()
ob_space = envs.observation_space.shape
nw, nh, nc = ob_space
ac_space = envs.action_space
obs = envs.reset()
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=cpu_cores,
intra_op_parallelism_threads=cpu_cores )
tf_config.gpu_options.allow_growth = True
with tf.Session(config=tf_config) as sess:
actor_critic = ActorCritic(sess, a2c_arch, ob_space, ac_space,
pg_coeff, vf_coeff, ent_coeff, max_grad_norm,
lr, alpha, epsilon, summarize)
load_count = 0
if load_path is not None:
actor_critic.load(load_path)
print('Loaded a2c')
summary_op = tf.summary.merge_all()
writer = tf.summary.FileWriter(log_path, graph=sess.graph)
sess.run(tf.global_variables_initializer())
batch_ob_shape = (-1, nw, nh, nc)
dones = [False for _ in range(nenvs)]
episode_rewards = np.zeros((nenvs, ))
final_rewards = np.zeros((nenvs, ))
print('a2c Training Start!')
print('Model will be saved on intervals of %i' % (log_interval))
for i in tqdm(range(load_count + 1, int(max_iters) + 1), ascii=True, desc='ActorCritic'):
# Create the minibatch lists
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_depth = [], [], [], [], [], []
total_reward = 0
for n in range(nsteps):
# Get the actions and values from the actor critic, we don't need neglogp
actions, values, neglogp = actor_critic.act(obs)
mb_obs.append(np.copy(obs))
mb_actions.append(actions)
mb_values.append(values)
mb_dones.append(dones)
obs, rewards, dones, info = envs.step(actions)
total_reward += np.sum(rewards)
episode_rewards += rewards
masks = 1 - np.array(dones)
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
mb_rewards.append(rewards)
mb_depth.append(np.array([ info_item['scramble_depth'] for info_item in info ]))
mb_dones.append(dones)
# Convert batch steps to batch rollouts
mb_obs = np.asarray(mb_obs, dtype=np.float32).swapaxes(1,0).reshape(batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1,0)
mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1,0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1,0)
mb_dones = np.asarray(mb_dones, dtype=np.float32).swapaxes(1,0)
mb_depth = np.asarray(mb_depth, dtype=np.int32).swapaxes(1,0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
last_values = actor_critic.critique(obs).tolist()
# discounting
for n, (rewards, d, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):
rewards = rewards.tolist()
d = d.tolist()
if d[-1] == 0:
rewards = discount_with_dones(rewards+[value], d+[0], gamma)[:-1]
else:
rewards = discount_with_dones(rewards, d, gamma)
mb_rewards[n] = rewards
# Flatten the whole minibatch
mb_rewards = mb_rewards.flatten()
mb_actions = mb_actions.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
mb_depth = mb_depth.flatten()
# Save the information to tensorboard
if summarize:
loss, policy_loss, value_loss, policy_ent, mrew, mdp, _, summary = actor_critic.train(
mb_obs, mb_rewards, mb_masks, mb_actions, mb_values, mb_depth, i, summary_op)
writer.add_summary(summary, i)
else:
loss, policy_loss, value_loss, policy_ent, mrew, mdp, _ = actor_critic.train(
mb_obs, mb_rewards, mb_masks, mb_actions, mb_values, mb_depth, i)
if i % log_interval == 0:
actor_critic.save(log_path, i)
actor_critic.save(log_path, 'final')
print('a2c model is finished training')
| 37.838462 | 102 | 0.593617 |
6c0bbff19246f88fe29603b2519f950e3178d9cc | 23,504 | py | Python | src/model_ode.py | fkhiro/kws-ode | 5751f9b665511908b26e77f6ea5a97bf87823aab | [
"MIT"
] | 5 | 2020-08-12T07:24:12.000Z | 2022-02-23T14:04:16.000Z | src/model_ode.py | fkhiro/kws-ode | 5751f9b665511908b26e77f6ea5a97bf87823aab | [
"MIT"
] | null | null | null | src/model_ode.py | fkhiro/kws-ode | 5751f9b665511908b26e77f6ea5a97bf87823aab | [
"MIT"
] | 1 | 2020-09-03T07:28:19.000Z | 2020-09-03T07:28:19.000Z | from enum import Enum
import hashlib
import math
import os
import random
import re
from chainmap import ChainMap
from torch.autograd import Variable
import librosa
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from .manage_audio import AudioPreprocessor
from torchdiffeq import odeint_adjoint as odeint
import pickle
def find_model(conf):
if isinstance(conf, ConfigType):
conf = conf.value
if conf.startswith("ode-tcnn"):
print("ODE-TCNN")
return SpeechOdeTCNNModel
elif conf.startswith("ode-tdnn"):
print("ODE-TDNN")
return SpeechOdeTDNNModel
print("model is not specified.")
return None
def find_config(conf):
if isinstance(conf, ConfigType):
conf = conf.value
return _configs[conf]
def truncated_normal(tensor, std_dev=0.01):
tensor.zero_()
tensor.normal_(std=std_dev)
while torch.sum(torch.abs(tensor) > 2 * std_dev) > 0:
t = tensor[torch.abs(tensor) > 2 * std_dev]
t.zero_()
tensor[torch.abs(tensor) > 2 * std_dev] = torch.normal(t, std=std_dev)
def complement_run_bn(data, max_t, t):
low = None
high = None
tl = t - 1
while tl >= 0:
if type(data[tl]) == torch.Tensor:
low = data[tl]
break
tl -= 1
th = t + 1
while th < max_t:
if type(data[th]) == torch.Tensor:
high = data[th]
break
th += 1
if type(low) != torch.Tensor:
if type(high) != torch.Tensor:
print("Complement failed ({} {}) ...".format(tl, th))
exit()
else:
print("low is not found, and thus high ({}) is used in stead.".format(th))
return high
elif type(high) != torch.Tensor:
if type(low) != torch.Tensor:
print("Complement failed ({} {}) ...".format(tl, th))
exit()
else:
print("high is not found, and thus low ({}) is used in stead.".format(tl))
return low
return low + (high-low)*(float(t-tl)/float(th-tl))
def complement_simple(norm, bn_statistics, tm):
t = round(tm.item()*100)
mean_t = bn_statistics.mean_t
var_t = bn_statistics.var_t
if t >= len(mean_t):
print("t is too large ({} >= {})".format(t, len(mean_t)))
t = len(mean_t) - 1
if type(mean_t[t]) != torch.Tensor:
print("complement at t = {}".format(t))
max_t = len(mean_t)
mean_t[t] = complement_run_bn(mean_t, max_t, t)
var_t[t] = complement_run_bn(var_t, max_t, t)
norm.running_mean = mean_t[t]
norm.running_var = var_t[t]
def calc_poly_coeff(data):
dtype = None
device = None
x = []
y = None
for i in range(len(data)):
if type(data[i]) == torch.Tensor:
dtype = data[i].dtype
device = data[i].device
x.append(i/100.0)
if type(y) != np.ndarray:
y = data[i].cpu().numpy()
else:
y = np.vstack((y, data[i].cpu().numpy()))
x = np.array(x)
coef = np.polyfit(x,y,2)
y_pred = coef[0].reshape(1,-1)*(x**2).reshape(-1,1) + coef[1].reshape(1,-1)*x.reshape(-1,1) + coef[2].reshape(1,-1)*np.ones((len(x),1))
y_bar = np.mean(y, axis=0) * np.ones((len(x),1))
r2 = np.ones(y.shape[1]) - np.sum((y-y_pred)**2, axis=0) / np.sum((y-y_bar)**2, axis=0)
t_coef = torch.from_numpy(coef)
if type(device) == torch.device:
t_coef = t_coef.to(device)
if type(dtype) == torch.dtype:
t_coef = t_coef.to(dtype)
return t_coef
def complement_polyfit2(norm, bn_statistics, t):
if type(bn_statistics.poly_coeff_mean) != torch.Tensor:
print("Calculating polynomial coefficients...")
bn_statistics.poly_coeff_mean = calc_poly_coeff(bn_statistics.mean_t)
bn_statistics.poly_coeff_var = calc_poly_coeff(bn_statistics.var_t)
norm.running_mean = bn_statistics.poly_coeff_mean[0]*(t**2) + bn_statistics.poly_coeff_mean[1]*t + bn_statistics.poly_coeff_mean[2]
norm.running_var = bn_statistics.poly_coeff_var[0]*(t**2) + bn_statistics.poly_coeff_var[1]*t + bn_statistics.poly_coeff_var[2]
complement_simple(norm, bn_statistics, t)
def collect_statistics(norm, mean_t, var_t, count, tm):
t = round(tm.item()*100)
if t >= len(mean_t):
print("list index out of range: {} > {}".format(t, len(mean_t)))
return
if type(mean_t[t]) != torch.Tensor:
mean_t[t] = torch.zeros(norm.num_features)
var_t[t] = torch.zeros(norm.num_features)
mean_t[t] += norm.running_mean
var_t[t] += norm.running_var
count[t] += 1
def run_norm(x, t, norm, bn_statistics, training, bForward, complement_statistics_func=complement_simple):
if training:
if bForward:
norm.running_mean.zero_()
norm.running_var.fill_(1)
norm.num_batches_tracked.zero_()
else:
complement_statistics_func(norm, bn_statistics, t)
norm.num_batches_tracked.zero_()
out = norm(x)
if training and bForward:
collect_statistics(norm, bn_statistics.mean_t, bn_statistics.var_t, bn_statistics.count, t)
return out
bn_complement_func = { "complement": complement_simple, "polyfit2": complement_polyfit2 }
# TDNN is based on the following implementation:
# https://github.com/cvqluu/TDNN
_configs = {
ConfigType.ODE_TCNN.value: dict(n_labels=12, n_feature_maps=20, res_pool=(4, 1), use_dilation=False),
ConfigType.ODE_TDNN.value: dict(n_labels=12, n_feature_maps=32, sub_sample_window=3, sub_sample_stride=3, tdnn_window=3),
}
| 35.185629 | 200 | 0.592282 |
6c0d6af23938ca6fed73a619af2c2521273b4c43 | 7,642 | py | Python | tests/test_snapshot.py | arkadiam/virt-backup | b3e8703ae3ab0f792f5d68913ecf5e7270acea46 | [
"BSD-2-Clause-FreeBSD"
] | 54 | 2019-06-21T23:29:02.000Z | 2022-03-28T14:30:44.000Z | tests/test_snapshot.py | arkadiam/virt-backup | b3e8703ae3ab0f792f5d68913ecf5e7270acea46 | [
"BSD-2-Clause-FreeBSD"
] | 28 | 2019-08-18T01:01:25.000Z | 2021-07-14T17:39:42.000Z | tests/test_snapshot.py | arkadiam/virt-backup | b3e8703ae3ab0f792f5d68913ecf5e7270acea46 | [
"BSD-2-Clause-FreeBSD"
] | 12 | 2019-07-12T10:16:03.000Z | 2022-03-09T05:33:30.000Z | import json
import os
import arrow
import libvirt
import pytest
from virt_backup.backups import DomBackup
from virt_backup.domains import get_xml_block_of_disk
from virt_backup.backups.snapshot import DomExtSnapshot, DomExtSnapshotCallbackRegistrer
from virt_backup.exceptions import DiskNotFoundError, SnapshotNotStarted
from helper.virt_backup import MockSnapshot
| 36.390476 | 88 | 0.634258 |
6c0dd11197119baf2f7c1d5775874b54734c6eff | 554 | py | Python | assets/tuned/daemon/tuned/profiles/functions/function_regex_search_ternary.py | sjug/cluster-node-tuning-operator | 8654d1c9558d0d5ef03d14373c877ebc737f9736 | [
"Apache-2.0"
] | 53 | 2018-11-13T07:02:03.000Z | 2022-03-25T00:00:04.000Z | assets/tuned/daemon/tuned/profiles/functions/function_regex_search_ternary.py | sjug/cluster-node-tuning-operator | 8654d1c9558d0d5ef03d14373c877ebc737f9736 | [
"Apache-2.0"
] | 324 | 2018-10-02T14:18:54.000Z | 2022-03-31T23:47:33.000Z | assets/tuned/daemon/tuned/profiles/functions/function_regex_search_ternary.py | sjug/cluster-node-tuning-operator | 8654d1c9558d0d5ef03d14373c877ebc737f9736 | [
"Apache-2.0"
] | 54 | 2018-10-01T16:55:09.000Z | 2022-03-28T13:56:53.000Z | import re
from . import base
| 25.181818 | 74 | 0.725632 |
6c0ebaf57bf48ef4c5911547b83ac2a6a45fa5e9 | 905 | py | Python | craft_ai/__init__.py | craft-ai/craft-ai-client-python | 3d8b3d9a49c0c70964deaeb9645130dd54f9a0b3 | [
"BSD-3-Clause"
] | 14 | 2016-08-26T07:06:57.000Z | 2020-09-22T07:41:21.000Z | craft_ai/__init__.py | craft-ai/craft-ai-client-python | 3d8b3d9a49c0c70964deaeb9645130dd54f9a0b3 | [
"BSD-3-Clause"
] | 94 | 2016-08-02T14:07:59.000Z | 2021-10-06T11:50:52.000Z | craft_ai/__init__.py | craft-ai/craft-ai-client-python | 3d8b3d9a49c0c70964deaeb9645130dd54f9a0b3 | [
"BSD-3-Clause"
] | 8 | 2017-02-07T12:05:57.000Z | 2021-10-14T09:45:30.000Z | __version__ = "2.4.3"
from . import errors
from .client import Client
from .interpreter import Interpreter
from .time import Time
from .formatters import format_property, format_decision_rules
from .reducer import reduce_decision_rules
from .tree_utils import (
extract_decision_paths_from_tree,
extract_decision_path_neighbors,
extract_output_tree,
)
import nest_asyncio
# this is to patch asyncio to allow a nested asyncio loop
# nested asyncio loop allow the client to use websocket call inside jupyter
# and other webbrowser based IDE
nest_asyncio.apply()
# Defining what will be imported when doing `from craft_ai import *`
__all__ = [
"Client",
"errors",
"Interpreter",
"Time",
"format_property",
"format_decision_rules",
"reduce_decision_rules",
"extract_output_tree",
"extract_decision_paths_from_tree",
"extract_decision_path_neighbors",
]
| 25.857143 | 75 | 0.764641 |
6c0f4bbb43f54fa43e4df577a49de96ebd810921 | 969 | py | Python | bitshares/aio/block.py | silverchen0402/python-bitshares | aafbcf5cd09e7bca99dd156fd60b9df8ba508630 | [
"MIT"
] | 102 | 2018-04-08T23:05:00.000Z | 2022-03-31T10:10:03.000Z | bitshares/aio/block.py | silverchen0402/python-bitshares | aafbcf5cd09e7bca99dd156fd60b9df8ba508630 | [
"MIT"
] | 246 | 2018-04-03T12:35:49.000Z | 2022-02-28T10:44:28.000Z | bitshares/aio/block.py | silverchen0402/python-bitshares | aafbcf5cd09e7bca99dd156fd60b9df8ba508630 | [
"MIT"
] | 128 | 2018-04-14T01:39:12.000Z | 2022-03-25T08:56:51.000Z | # -*- coding: utf-8 -*-
from .instance import BlockchainInstance
from ..block import Block as SyncBlock, BlockHeader as SyncBlockHeader
from graphenecommon.aio.block import (
Block as GrapheneBlock,
BlockHeader as GrapheneBlockHeader,
)
| 25.5 | 75 | 0.721362 |
6c0ff50a90211a83518224c4a9e7cb96da0fbca0 | 1,015 | py | Python | DongbinNa/17/pt.py | wonnerky/coteMaster | 360e491e6342c1ee42ff49750b838a2ead865613 | [
"Apache-2.0"
] | null | null | null | DongbinNa/17/pt.py | wonnerky/coteMaster | 360e491e6342c1ee42ff49750b838a2ead865613 | [
"Apache-2.0"
] | null | null | null | DongbinNa/17/pt.py | wonnerky/coteMaster | 360e491e6342c1ee42ff49750b838a2ead865613 | [
"Apache-2.0"
] | null | null | null | # NxN , ,
# (for) . matrix stop.
#
n, k = map(int, input().split())
matrix = []
for _ in range(n):
matrix.append(list(map(int, input().split())))
s, x, y = map(int, input().split())
#
dx = [-1, 1, 0, 0] #
dy = [0, 0, -1, 1] #
# dict. initial
virus = {}
for i in range(k):
virus[i+1] = []
for i in range(n):
for j in range(n):
if matrix[i][j] != 0:
virus[matrix[i][j]].append((i,j))
for _ in range(s):
for idx in sorted(virus.keys()):
for cord in virus[idx]:
move(cord)
# answer. initial cord = (1,1)
print(matrix[x-1][y-1]) | 23.604651 | 57 | 0.519212 |
6c11ff715822a78e65219cb047fa20aeb18248ac | 7,843 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/pavelib/i18n.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/pavelib/i18n.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/pavelib/i18n.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2022-02-06T10:48:15.000Z | 2022-02-06T10:48:15.000Z | """
Internationalization tasks
"""
import re
import subprocess
import sys
from path import Path as path
from paver.easy import cmdopts, needs, sh, task
from .utils.cmd import django_cmd
from .utils.envs import Env
from .utils.timer import timed
try:
from pygments.console import colorize
except ImportError:
colorize = lambda color, text: text
DEFAULT_SETTINGS = Env.DEVSTACK_SETTINGS
def find_release_resources():
"""
Validate the .tx/config file for release files, returning the resource names.
For working with release files, the .tx/config file should have exactly
two resources defined named "release-*". Check that this is true. If
there's a problem, print messages about it.
Returns a list of resource names, or raises ValueError if .tx/config
doesn't have two resources.
"""
# An entry in .tx/config for a release will look like this:
#
# [edx-platform.release-dogwood]
# file_filter = conf/locale/<lang>/LC_MESSAGES/django.po
# source_file = conf/locale/en/LC_MESSAGES/django.po
# source_lang = en
# type = PO
#
# [edx-platform.release-dogwood-js]
# file_filter = conf/locale/<lang>/LC_MESSAGES/djangojs.po
# source_file = conf/locale/en/LC_MESSAGES/djangojs.po
# source_lang = en
# type = PO
rx_release = r"^\[([\w-]+\.release-[\w-]+)\]$"
with open(".tx/config") as tx_config:
resources = re.findall(rx_release, tx_config.read(), re.MULTILINE)
if len(resources) == 2:
return resources
if not resources: # lint-amnesty, pylint: disable=no-else-raise
raise ValueError("You need two release-* resources defined to use this command.")
else:
msg = "Strange Transifex config! Found these release-* resources:\n" + "\n".join(resources)
raise ValueError(msg)
| 23.694864 | 99 | 0.651409 |
6c1262e89c4802e8d7e590c6c84ac0e62c5a4169 | 2,020 | py | Python | sympy/parsing/autolev/test-examples/ruletest9.py | Michal-Gagala/sympy | 3cc756c2af73b5506102abaeefd1b654e286e2c8 | [
"MIT"
] | null | null | null | sympy/parsing/autolev/test-examples/ruletest9.py | Michal-Gagala/sympy | 3cc756c2af73b5506102abaeefd1b654e286e2c8 | [
"MIT"
] | null | null | null | sympy/parsing/autolev/test-examples/ruletest9.py | Michal-Gagala/sympy | 3cc756c2af73b5506102abaeefd1b654e286e2c8 | [
"MIT"
] | null | null | null | import sympy.physics.mechanics as _me
import sympy as _sm
import math as m
import numpy as _np
frame_n = _me.ReferenceFrame('n')
frame_a = _me.ReferenceFrame('a')
a = 0
d = _me.inertia(frame_a, 1, 1, 1)
point_po1 = _me.Point('po1')
point_po2 = _me.Point('po2')
particle_p1 = _me.Particle('p1', _me.Point('p1_pt'), _sm.Symbol('m'))
particle_p2 = _me.Particle('p2', _me.Point('p2_pt'), _sm.Symbol('m'))
c1, c2, c3 = _me.dynamicsymbols('c1 c2 c3')
c1_d, c2_d, c3_d = _me.dynamicsymbols('c1_ c2_ c3_', 1)
body_r_cm = _me.Point('r_cm')
body_r_cm.set_vel(frame_n, 0)
body_r_f = _me.ReferenceFrame('r_f')
body_r = _me.RigidBody('r', body_r_cm, body_r_f, _sm.symbols('m'), (_me.outer(body_r_f.x,body_r_f.x),body_r_cm))
point_po2.set_pos(particle_p1.point, c1*frame_a.x)
v = 2*point_po2.pos_from(particle_p1.point)+c2*frame_a.y
frame_a.set_ang_vel(frame_n, c3*frame_a.z)
v = 2*frame_a.ang_vel_in(frame_n)+c2*frame_a.y
body_r_f.set_ang_vel(frame_n, c3*frame_a.z)
v = 2*body_r_f.ang_vel_in(frame_n)+c2*frame_a.y
frame_a.set_ang_acc(frame_n, (frame_a.ang_vel_in(frame_n)).dt(frame_a))
v = 2*frame_a.ang_acc_in(frame_n)+c2*frame_a.y
particle_p1.point.set_vel(frame_a, c1*frame_a.x+c3*frame_a.y)
body_r_cm.set_acc(frame_n, c2*frame_a.y)
v_a = _me.cross(body_r_cm.acc(frame_n), particle_p1.point.vel(frame_a))
x_b_c = v_a
x_b_d = 2*x_b_c
a_b_c_d_e = x_b_d*2
a_b_c = 2*c1*c2*c3
a_b_c += 2*c1
a_b_c = 3*c1
q1, q2, u1, u2 = _me.dynamicsymbols('q1 q2 u1 u2')
q1_d, q2_d, u1_d, u2_d = _me.dynamicsymbols('q1_ q2_ u1_ u2_', 1)
x, y = _me.dynamicsymbols('x y')
x_d, y_d = _me.dynamicsymbols('x_ y_', 1)
x_dd, y_dd = _me.dynamicsymbols('x_ y_', 2)
yy = _me.dynamicsymbols('yy')
yy = x*x_d**2+1
m = _sm.Matrix([[0]])
m[0] = 2*x
m = m.row_insert(m.shape[0], _sm.Matrix([[0]]))
m[m.shape[0]-1] = 2*y
a = 2*m[0]
m = _sm.Matrix([1,2,3,4,5,6,7,8,9]).reshape(3, 3)
m[0,1] = 5
a = m[0, 1]*2
force_ro = q1*frame_n.x
torque_a = q2*frame_n.z
force_ro = q1*frame_n.x + q2*frame_n.y
f = force_ro*2
| 36.071429 | 113 | 0.688119 |
6c137c12cabff00b49311cbc274302f573ef641a | 3,830 | py | Python | tests/test_asm_stats.py | hall-lab/tenx-gcp | f204e60cc5efb543a524df9cdbd44d0a8c590673 | [
"MIT"
] | null | null | null | tests/test_asm_stats.py | hall-lab/tenx-gcp | f204e60cc5efb543a524df9cdbd44d0a8c590673 | [
"MIT"
] | null | null | null | tests/test_asm_stats.py | hall-lab/tenx-gcp | f204e60cc5efb543a524df9cdbd44d0a8c590673 | [
"MIT"
] | null | null | null | import filecmp, os, tempfile, unittest
from click.testing import CliRunner
from tenx.asm_stats import asm_stats_cmd, get_contig_lengths, get_scaffold_and_contig_lengths, get_stats, length_buckets
# -- AsmStatsTest
if __name__ == '__main__':
unittest.main(verbosity=2)
#-- __main__
| 40.315789 | 120 | 0.629243 |
6c1416cedaf37318b018aae01bda9b0f41f3ed30 | 3,435 | py | Python | utils.py | zexihuang/raft-blockchain | a2f7365e10f5a5334c59bac6b551648bae04e2e8 | [
"Apache-2.0"
] | 1 | 2021-06-04T03:05:06.000Z | 2021-06-04T03:05:06.000Z | utils.py | zexihuang/raft-blockchain | a2f7365e10f5a5334c59bac6b551648bae04e2e8 | [
"Apache-2.0"
] | null | null | null | utils.py | zexihuang/raft-blockchain | a2f7365e10f5a5334c59bac6b551648bae04e2e8 | [
"Apache-2.0"
] | null | null | null | import socket
import pickle
import random
import string
import time
import hashlib
import os
BUFFER_SIZE = 65536
| 32.714286 | 115 | 0.616885 |
6c14181d8879fcc2609ab9415e7fe2cdbb328098 | 3,850 | py | Python | api/data_refinery_api/test/test_dataset_stats.py | AlexsLemonade/refinebio | 52f44947f902adedaccf270d5f9dbd56ab47e40a | [
"BSD-3-Clause"
] | 106 | 2018-03-05T16:24:47.000Z | 2022-03-19T19:12:25.000Z | api/data_refinery_api/test/test_dataset_stats.py | AlexsLemonade/refinebio | 52f44947f902adedaccf270d5f9dbd56ab47e40a | [
"BSD-3-Clause"
] | 1,494 | 2018-02-27T17:02:21.000Z | 2022-03-24T15:10:30.000Z | api/data_refinery_api/test/test_dataset_stats.py | AlexsLemonade/refinebio | 52f44947f902adedaccf270d5f9dbd56ab47e40a | [
"BSD-3-Clause"
] | 15 | 2019-02-03T01:34:59.000Z | 2022-03-29T01:59:13.000Z | import json
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from data_refinery_api.test.test_api_general import API_VERSION
from data_refinery_common.models import (
Experiment,
ExperimentOrganismAssociation,
ExperimentSampleAssociation,
Organism,
Sample,
)
| 32.627119 | 100 | 0.647013 |
6c15bfba4b8c0e66ef69eb440d0dc33cc1bed1d7 | 4,804 | py | Python | hetzner_fix_report/hetzner_fix_report.py | flxai/hetzner-fix-report | ab484a3463ed0efc6f14ebd7b45d1b2c1281fb0b | [
"MIT"
] | 2 | 2020-06-20T21:50:38.000Z | 2020-06-22T08:37:11.000Z | hetzner_fix_report/hetzner_fix_report.py | flxai/hetzner-fix-report | ab484a3463ed0efc6f14ebd7b45d1b2c1281fb0b | [
"MIT"
] | 4 | 2020-07-01T21:59:08.000Z | 2020-07-05T11:33:59.000Z | hetzner_fix_report/hetzner_fix_report.py | flxai/hetzner-fix-report | ab484a3463ed0efc6f14ebd7b45d1b2c1281fb0b | [
"MIT"
] | null | null | null | import pdftotext
import sys
import numpy as np
import pandas as pd
import regex as re
def get_server_type(server_type_str):
"""Check wether string is contained"""
server_type_list = server_type_str.split(' ')
if len(server_type_list) < 2:
if server_type_str == 'Backup':
return 'backup'
else:
return 'unknown'
return server_type_list[1].split('-')[0]
def regex_match(server_type_str, regex, ret_id=1):
"""Applies a regular expression and returns a match """
m = re.match(regex, server_type_str)
return np.NaN if m is None else m.group(ret_id)
def regex_search(server_type_str, regex, ret_id=1):
"""Applies a regular expression and returns a match """
m = re.search(regex, server_type_str)
return np.NaN if m is None else m.group(ret_id)
| 32.90411 | 118 | 0.59159 |
6c16620f0a89c9e70bfae221558f9859765dc5b0 | 3,705 | py | Python | src/random_forest.py | rrozema12/Data-Mining-Final-Project | 4848f3daed4b75879b626c5dc460e8dbd70ae861 | [
"MIT"
] | 1 | 2018-02-04T01:10:20.000Z | 2018-02-04T01:10:20.000Z | src/random_forest.py | rrozema12/Data-Mining-Final-Project | 4848f3daed4b75879b626c5dc460e8dbd70ae861 | [
"MIT"
] | null | null | null | src/random_forest.py | rrozema12/Data-Mining-Final-Project | 4848f3daed4b75879b626c5dc460e8dbd70ae861 | [
"MIT"
] | null | null | null | # random_forest.py
# does the random forest calcutlaions
import decision_tree
import partition
import heapq
import table_utils
import classifier_util
from homework_util import strat_folds
def run_a_table(table, indexes, class_index, N, M, F):
""" Takes a table, splits it into a training and test set. Creates a
random forest for the training set. Then tests the forest off of
the test set
:param table: a table of values
:param indexes: The indexes to partition on
:param class_index: The index of the label to predict
:param N: Number of trees to produce
:param M: Number of the best trees to choose
:param F: Subset size of random attributes
:return: Returns a list of tuples. Of the actual, predicted label and
training and test
[(actual1,predicted1), (actual2,predicted2), ...], training, test
"""
domains = table_utils.get_domains(table, indexes)
folds = strat_folds(table, class_index, 3)
training = folds[0]
training.extend(folds[1])
test = folds[2]
forest = _random_forest(test, indexes, class_index, domains, N, M, F)
return [(row[class_index], predict_label(forest, row)) for row in test], \
training, test
def _random_forest(table, indexes, class_index, att_domains, N, M, F):
""" Generates a random forest classifier for a given table
:param table: a table
:param indexes: a list of indexes to partition on
:param class_index: the index of the class label to predict
:param N: Number of trees to produce
:param M: Number of the best trees to choose
:param F: Subset size of random attributes
:return: A list of lists. Trees and thier accuracies
[(accuracy1, tree1), ... , (accuracyM, treeM)]
"""
# We store the accuracies and trees in a priority queue
# lower numbers = higher priority
priority_queue = [] # see: https://docs.python.org/3/library/heapq.html#basic-examples
attributes = indexes
# Uses a training and remainder set from bootsraping to create each tree
bags = partition.bagging(table, N)
for bag_set in bags:
tree = decision_tree.tdidt_RF(bag_set[0], attributes, att_domains, class_index, F)
acc = _accuracy_for_tree(tree,class_index, bag_set[1])
heapq.heappush(priority_queue, (acc, tree))
#push to the priorityQueue
# Since our priority queue is backwards (and I dunno how to reverse that)
# we pop off all the ones we don't need. N - M
for i in range(N - M):
heapq.heappop(priority_queue)
# Now our priority queue will be our list that we can return
return priority_queue
def predict_label(forest, instance):
""" predicts the label of an instance given a forest using weighted
voting with accuracies
:param forest: a list of lists in te form returned by random_forest()
:param instance: an row to have a class label predicted
:return: a class label
"""
labels = {}
for acc_and_tree in forest:
prediction = decision_tree.get_label(acc_and_tree[1], instance)
# totals the accuracy predicted for each label
try:
labels[prediction] += acc_and_tree[0]
except KeyError:
labels[prediction] = acc_and_tree[0]
# gets the label with the highest predicted value
highest_value = 0
highest_label = 0
for current_label, value in labels.items():
if value > highest_value:
highest_label = current_label
return highest_label
| 35.970874 | 90 | 0.691768 |
6c1838a55b525f71872539fcbbf11141e0709474 | 5,682 | py | Python | model_converter/test_freeze_pb.py | zhangmifigo/MobileDeepPill | 270538494488767a7fb36e237b72212be5cf4f45 | [
"MIT"
] | 4 | 2020-03-23T20:27:24.000Z | 2021-08-12T20:23:53.000Z | model_converter/test_freeze_pb.py | zhangmifigo/MobileDeepPill | 270538494488767a7fb36e237b72212be5cf4f45 | [
"MIT"
] | null | null | null | model_converter/test_freeze_pb.py | zhangmifigo/MobileDeepPill | 270538494488767a7fb36e237b72212be5cf4f45 | [
"MIT"
] | 3 | 2019-10-14T07:56:05.000Z | 2020-03-23T20:27:27.000Z | import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.contrib.slim.python.slim.nets import alexnet
from tensorflow.python.ops import random_ops
from tensorflow.python.tools import optimize_for_inference_lib
from tensorflow.python.framework import dtypes
from tensorflow.core.framework import graph_pb2
import tensorflow.contrib.slim as slim
import network
import os
batch_size = 128
height = width = 224
num_classes = 1000
if __name__ == '__main__':
tf.app.run() | 36.896104 | 129 | 0.707673 |
6c184b2174364e3e55c83631e166cd7d528e99e1 | 60 | py | Python | app/comic/container_exec/__init__.py | EYRA-Benchmark/grand-challenge.org | 8264c19fa1a30ffdb717d765e2aa2e6ceccaab17 | [
"Apache-2.0"
] | 2 | 2019-06-28T09:23:55.000Z | 2020-03-18T05:52:13.000Z | app/comic/container_exec/__init__.py | EYRA-Benchmark/comic | 8264c19fa1a30ffdb717d765e2aa2e6ceccaab17 | [
"Apache-2.0"
] | 112 | 2019-08-12T15:13:27.000Z | 2022-03-21T15:49:40.000Z | app/comic/container_exec/__init__.py | EYRA-Benchmark/grand-challenge.org | 8264c19fa1a30ffdb717d765e2aa2e6ceccaab17 | [
"Apache-2.0"
] | 1 | 2020-03-19T14:19:57.000Z | 2020-03-19T14:19:57.000Z | default_app_config = "comic.container_exec.apps.CoreConfig"
| 30 | 59 | 0.85 |
6c186e241fa2559c5801595eef7a0db1d8af608a | 18,320 | py | Python | run.py | RafaelCenzano/Corona-Virus-Email-Updater | 2d5bc071ab21fe8df358689862a019d400c73cd5 | [
"MIT"
] | 3 | 2020-03-10T13:52:37.000Z | 2020-03-15T17:19:39.000Z | run.py | RafaelCenzano/Corona-Virus-Email-Updater | 2d5bc071ab21fe8df358689862a019d400c73cd5 | [
"MIT"
] | null | null | null | run.py | RafaelCenzano/Corona-Virus-Email-Updater | 2d5bc071ab21fe8df358689862a019d400c73cd5 | [
"MIT"
] | 2 | 2020-03-10T13:52:29.000Z | 2022-01-13T19:58:28.000Z | import requests
import json
import os
from bs4 import BeautifulSoup as bs
from secret import *
from smtplib import SMTP
from datetime import datetime
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
if __name__ == '__main__':
scraper()
| 41.922197 | 177 | 0.593177 |
6c19d7164f4d767fbe5d4431bf900ccb1c4a00d6 | 6,494 | py | Python | Machine_Learning/Feature_Tutorials/04-tensorflow-ai-optimizer/files/application/app_mt.py | dankernel/Vitis-Tutorials | 558791a2350327ea275917db890797a895d0fac2 | [
"Apache-2.0"
] | null | null | null | Machine_Learning/Feature_Tutorials/04-tensorflow-ai-optimizer/files/application/app_mt.py | dankernel/Vitis-Tutorials | 558791a2350327ea275917db890797a895d0fac2 | [
"Apache-2.0"
] | null | null | null | Machine_Learning/Feature_Tutorials/04-tensorflow-ai-optimizer/files/application/app_mt.py | dankernel/Vitis-Tutorials | 558791a2350327ea275917db890797a895d0fac2 | [
"Apache-2.0"
] | null | null | null | '''
Copyright 2020 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Author: Mark Harvey, Xilinx Inc
'''
from ctypes import *
import cv2
import numpy as np
import runner
import os
import xir.graph
import pathlib
import xir.subgraph
import threading
import time
import sys
import argparse
divider = '-----------------------------------------------'
def preprocess_fn(image_path):
'''
Image pre-processing.
Rearranges from BGR to RGB then normalizes to range 0:1
input arg: path of image file
return: numpy array
'''
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image/255.0
return image
def get_subgraph (g):
'''
interrogate model file to return subgraphs
Returns a list of subgraph objects
'''
sub = []
root = g.get_root_subgraph()
sub = [ s for s in root.children
if s.metadata.get_attr_str ("device") == "DPU"]
return sub
def runDPU(id,start,dpu,img):
'''
DPU execution - called in thread from app function.
Arguments:
id: integer to identify thread - not currently used
start: Start index for writes to out_q.
dpu: runner
img: list of pre-processed images to pass into DPU
'''
''' input/output tensor information
get_input_tensors() and get_output_tensors() return lists of tensors objects.
The lists will contain one element for each input or output of the network.
The shape of each tensor object is (batch,height,width,channels)
For Edge DPU, batchsize is always 1.
'''
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
#print('Input tensor :',inputTensors[0].name,inputTensors[0].shape)
#print('Output Tensor:',outputTensors[0].name,outputTensors[0].shape)
outputSize = outputTensors[0].dims[1]*outputTensors[0].dims[2]*outputTensors[0].dims[3]
shapeIn = inputTensors[0].shape
shapeOut = outputTensors[0].shape
for i in range(len(img)):
'''prepare lists of np arrays to hold input & output tensors '''
inputData = []
inputData.append(img[i].reshape(shapeIn))
outputData = []
outputData.append(np.empty((shapeOut), dtype = np.float32, order = 'C'))
'''start DPU, wait until it finishes '''
job_id = dpu.execute_async(inputData,outputData)
dpu.wait(job_id)
''' output data shape is currently (batch,height,width,channels)
so flatten it into (batch,height*width*channels)'''
outputData[0] = outputData[0].reshape(1, outputSize)
''' store results in global lists '''
out_q[start+i] = outputData[0][0]
return
def app(image_dir,threads,model):
'''
main application function
'''
listimage=os.listdir(image_dir)
runTotal = len(listimage[:2500])
print('Found',len(listimage),'images - processing',runTotal,'of them')
''' global list that all threads can write results to '''
global out_q
out_q = [None] * runTotal
''' get a list of subgraphs from the compiled model file '''
g = xir.graph.Graph.deserialize(pathlib.Path(model))
subgraphs = get_subgraph (g)
print('Found',len(subgraphs),'subgraphs in',model)
''' preprocess images '''
print('Pre-processing',runTotal,'images...')
img = []
for i in range(runTotal):
path = os.path.join(image_dir,listimage[i])
img.append(preprocess_fn(path))
''' create dpu runners
Each thread receives a dpu runner.
Each dpu runner executes a subgraph
'''
all_dpu_runners = []
for i in range(threads):
all_dpu_runners.append(runner.Runner(subgraphs[0], "run"))
''' create threads
Each thread receives a section of the preprocessed images list as input and
will write results into the corresponding section of the global out_q list.
'''
threadAll = []
start=0
for i in range(threads):
if (i==threads-1):
end = len(img)
else:
end = start+(len(img)//threads)
in_q = img[start:end]
t1 = threading.Thread(target=runDPU, args=(i,start,all_dpu_runners[i], in_q))
threadAll.append(t1)
start=end
'''run threads '''
print('Starting',threads,'threads...')
time1 = time.time()
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time2 = time.time()
threads_time = time2 - time1
''' post-processing '''
classes = ['dog','cat']
correct = 0
wrong = 0
for i in range(len(out_q)):
argmax = np.argmax((out_q[i]))
prediction = classes[argmax]
ground_truth, _ = listimage[i].split('.',1)
if (ground_truth==prediction):
correct += 1
else:
wrong += 1
accuracy = correct/len(out_q)
print (divider)
print('Correct:',correct,'Wrong:',wrong,'Accuracy:', accuracy)
print (divider)
fps = float(runTotal / threads_time)
print('FPS: %.2f, total frames: %.0f, total time: %.3f seconds' %(fps,runTotal,threads_time))
print (divider)
return
# only used if script is run as 'main' from command line
if __name__ == '__main__':
main()
| 29.788991 | 172 | 0.647213 |
6c1b6ee9b212e08f8648b06179fcaaa04a11d3e2 | 545 | py | Python | photos/migrations/0008_image_post.py | adriankiprono/imstragram_project | c4935ad745987fb53b62d116c3bc2faff20927ce | [
"MIT"
] | null | null | null | photos/migrations/0008_image_post.py | adriankiprono/imstragram_project | c4935ad745987fb53b62d116c3bc2faff20927ce | [
"MIT"
] | 4 | 2020-06-06T00:31:39.000Z | 2022-03-12T00:10:52.000Z | photos/migrations/0008_image_post.py | adriankiprono/instragram_project | c4935ad745987fb53b62d116c3bc2faff20927ce | [
"MIT"
] | null | null | null | # Generated by Django 3.0 on 2020-01-07 07:56
import datetime
from django.db import migrations
from django.utils.timezone import utc
import tinymce.models
| 23.695652 | 113 | 0.638532 |