hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42e77bb6f8a615aa18b12b83385ee014877a332f | 340 | py | Python | fdp/__init__.py | cffbots/fairdatapoint | 6142b31408b5746d1a7e9f59e61735b7ad8bfde9 | [
"Apache-2.0"
] | 9 | 2020-03-27T12:58:51.000Z | 2021-01-21T16:22:46.000Z | fdp/__init__.py | MaastrichtU-IDS/fairdatapoint | f9f38903a629acbdb74a6a20014ac424cc3d3206 | [
"Apache-2.0"
] | 26 | 2016-05-26T22:22:34.000Z | 2020-02-13T07:12:37.000Z | fdp/__init__.py | MaastrichtU-IDS/fairdatapoint | f9f38903a629acbdb74a6a20014ac424cc3d3206 | [
"Apache-2.0"
] | 4 | 2020-06-09T18:37:33.000Z | 2020-12-16T08:05:01.000Z | # -*- coding: utf-8 -*-
import logging
from .__version__ import __version__
logging.getLogger(__name__).addHandler(logging.NullHandler())
__author__ = "Rajaram Kaliyaperumal, Arnold Kuzniar, Cunliang Geng, Carlos Martinez-Ortiz"
__email__ = 'c.martinez@esciencecenter.nl'
__status__ = 'beta'
__license__ = 'Apache License, Version 2.0'
| 26.153846 | 90 | 0.770588 |
42e78c22b92189ac4df049c1a2d85684f40079f9 | 358 | py | Python | my_site/blog/admin.py | sidharth-lucy/Blog | 33afd31faf5a1da44e050b13e3364b419f108c7f | [
"MIT"
] | null | null | null | my_site/blog/admin.py | sidharth-lucy/Blog | 33afd31faf5a1da44e050b13e3364b419f108c7f | [
"MIT"
] | null | null | null | my_site/blog/admin.py | sidharth-lucy/Blog | 33afd31faf5a1da44e050b13e3364b419f108c7f | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Post,Author,Tag
# Register your models here.
admin.site.register(Post,PostAdmin)
admin.site.register(Author)
admin.site.register(Tag)
| 23.866667 | 43 | 0.72905 |
42e8e15830841aa965ec225fd7e1715fe1c14fdd | 60,795 | py | Python | fluids/flow_meter.py | rddaz2013/fluids | acde6a6edc2110c152c59341574739b24a2f1bad | [
"MIT"
] | null | null | null | fluids/flow_meter.py | rddaz2013/fluids | acde6a6edc2110c152c59341574739b24a2f1bad | [
"MIT"
] | null | null | null | fluids/flow_meter.py | rddaz2013/fluids | acde6a6edc2110c152c59341574739b24a2f1bad | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2018 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
from math import cos, sin, tan, atan, pi, radians, exp, acos, log10
import numpy as np
from fluids.friction import friction_factor
from fluids.core import Froude_densimetric
from scipy.optimize import newton, brenth
from scipy.constants import g, inch
__all__ = ['C_Reader_Harris_Gallagher',
'differential_pressure_meter_solver',
'differential_pressure_meter_dP',
'orifice_discharge', 'orifice_expansibility',
'Reader_Harris_Gallagher_discharge',
'discharge_coefficient_to_K', 'K_to_discharge_coefficient',
'dP_orifice', 'velocity_of_approach_factor',
'flow_coefficient', 'nozzle_expansibility',
'C_long_radius_nozzle', 'C_ISA_1932_nozzle', 'C_venturi_nozzle',
'orifice_expansibility_1989', 'dP_venturi_tube',
'diameter_ratio_cone_meter', 'diameter_ratio_wedge_meter',
'cone_meter_expansibility_Stewart', 'dP_cone_meter',
'C_wedge_meter_Miller',
'C_Reader_Harris_Gallagher_wet_venturi_tube',
'dP_Reader_Harris_Gallagher_wet_venturi_tube'
]
CONCENTRIC_ORIFICE = 'concentric'
ECCENTRIC_ORIFICE = 'eccentric'
SEGMENTAL_ORIFICE = 'segmental'
CONDITIONING_4_HOLE_ORIFICE = 'Rosemount 4 hole self conditioing'
ORIFICE_HOLE_TYPES = [CONCENTRIC_ORIFICE, ECCENTRIC_ORIFICE, SEGMENTAL_ORIFICE,
CONDITIONING_4_HOLE_ORIFICE]
ORIFICE_CORNER_TAPS = 'corner'
ORIFICE_FLANGE_TAPS = 'flange'
ORIFICE_D_AND_D_2_TAPS = 'D and D/2'
ISO_5167_ORIFICE = 'ISO 5167 orifice'
LONG_RADIUS_NOZZLE = 'long radius nozzle'
ISA_1932_NOZZLE = 'ISA 1932 nozzle'
VENTURI_NOZZLE = 'venuri nozzle'
AS_CAST_VENTURI_TUBE = 'as cast convergent venturi tube'
MACHINED_CONVERGENT_VENTURI_TUBE = 'machined convergent venturi tube'
ROUGH_WELDED_CONVERGENT_VENTURI_TUBE = 'rough welded convergent venturi tube'
CONE_METER = 'cone meter'
WEDGE_METER = 'wedge meter'
__all__.extend(['ISO_5167_ORIFICE', 'LONG_RADIUS_NOZZLE', 'ISA_1932_NOZZLE',
'VENTURI_NOZZLE', 'AS_CAST_VENTURI_TUBE',
'MACHINED_CONVERGENT_VENTURI_TUBE',
'ROUGH_WELDED_CONVERGENT_VENTURI_TUBE', 'CONE_METER',
'WEDGE_METER'])
def orifice_discharge(D, Do, P1, P2, rho, C, expansibility=1.0):
r'''Calculates the flow rate of an orifice plate based on the geometry
of the plate, measured pressures of the orifice, and the density of the
fluid.
.. math::
m = \left(\frac{\pi D_o^2}{4}\right) C \frac{\sqrt{2\Delta P \rho_1}}
{\sqrt{1 - \beta^4}}\cdot \epsilon
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
rho : float
Density of fluid at `P1`, [kg/m^3]
C : float
Coefficient of discharge of the orifice, [-]
expansibility : float, optional
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Returns
-------
m : float
Mass flow rate of fluid, [kg/s]
Notes
-----
This is formula 1-12 in [1]_ and also [2]_.
Examples
--------
>>> orifice_discharge(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, rho=1.1646,
... C=0.5988, expansibility=0.9975)
0.01120390943807026
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
dP = P1 - P2
beta = Do/D
return (pi*Do*Do/4.)*C*(2*dP*rho)**0.5/(1.0 - beta**4)**0.5*expansibility
def orifice_expansibility(D, Do, P1, P2, k):
r'''Calculates the expansibility factor for orifice plate calculations
based on the geometry of the plate, measured pressures of the orifice, and
the isentropic exponent of the fluid.
.. math::
\epsilon = 1 - (0.351 + 0.256\beta^4 + 0.93\beta^8)
\left[1-\left(\frac{P_2}{P_1}\right)^{1/\kappa}\right]
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
k : float
Isentropic exponent of fluid, [-]
Returns
-------
expansibility : float, optional
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Notes
-----
This formula was determined for the range of P2/P1 >= 0.80, and for fluids
of air, steam, and natural gas. However, there is no objection to using
it for other fluids.
Examples
--------
>>> orifice_expansibility(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, k=1.4)
0.9974739057343425
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
return (1.0 - (0.351 + 0.256*beta**4 + 0.93*beta**8)*(
1.0 - (P2/P1)**(1./k)))
def orifice_expansibility_1989(D, Do, P1, P2, k):
r'''Calculates the expansibility factor for orifice plate calculations
based on the geometry of the plate, measured pressures of the orifice, and
the isentropic exponent of the fluid.
.. math::
\epsilon = 1- (0.41 + 0.35\beta^4)\Delta P/\kappa/P_1
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
k : float
Isentropic exponent of fluid, [-]
Returns
-------
expansibility : float
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Notes
-----
This formula was determined for the range of P2/P1 >= 0.75, and for fluids
of air, steam, and natural gas. However, there is no objection to using
it for other fluids.
This is an older formula used to calculate expansibility factors for
orifice plates.
In this standard, an expansibility factor formula transformation in terms
of the pressure after the orifice is presented as well. This is the more
standard formulation in terms of the upstream conditions. The other formula
is below for reference only:
.. math::
\epsilon_2 = \sqrt{1 + \frac{\Delta P}{P_2}} - (0.41 + 0.35\beta^4)
\frac{\Delta P}{\kappa P_2 \sqrt{1 + \frac{\Delta P}{P_2}}}
[2]_ recommends this formulation for wedge meters as well.
Examples
--------
>>> orifice_expansibility_1989(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, k=1.4)
0.9970510687411718
References
----------
.. [1] American Society of Mechanical Engineers. MFC-3M-1989 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2005.
.. [2] Miller, Richard W. Flow Measurement Engineering Handbook. 3rd
edition. New York: McGraw-Hill Education, 1996.
'''
return 1.0 - (0.41 + 0.35*(Do/D)**4)*(P1 - P2)/(k*P1)
def C_Reader_Harris_Gallagher(D, Do, rho, mu, m, taps='corner'):
r'''Calculates the coefficient of discharge of the orifice based on the
geometry of the plate, measured pressures of the orifice, mass flow rate
through the orifice, and the density and viscosity of the fluid.
.. math::
C = 0.5961 + 0.0261\beta^2 - 0.216\beta^8 + 0.000521\left(\frac{
10^6\beta}{Re_D}\right)^{0.7}\\
+ (0.0188 + 0.0063A)\beta^{3.5} \left(\frac{10^6}{Re_D}\right)^{0.3} \\
+(0.043 + 0.080\exp(-10L_1) -0.123\exp(-7L_1))(1-0.11A)\frac{\beta^4}
{1-\beta^4} \\
- 0.031(M_2' - 0.8M_2'^{1.1})\beta^{1.3}
.. math::
M_2' = \frac{2L_2'}{1-\beta}
A = \left(\frac{19000\beta}{Re_{D}}\right)^{0.8}
Re_D = \frac{\rho v D}{\mu}
If D < 71.12 mm (2.8 in.):
.. math::
C += 0.11(0.75-\beta)\left(2.8-\frac{D}{0.0254}\right)
If the orifice has corner taps:
.. math::
L_1 = L_2' = 0
If the orifice has D and D/2 taps:
.. math::
L_1 = 1
L_2' = 0.47
If the orifice has Flange taps:
.. math::
L_1 = L_2' = \frac{0.0254}{D}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
m : float
Mass flow rate of fluid through the orifice, [kg/s]
taps : str
The orientation of the taps; one of 'corner', 'flange', 'D', or 'D/2',
[-]
Returns
-------
C : float
Coefficient of discharge of the orifice, [-]
Notes
-----
The following limits apply to the orifice plate standard [1]_:
The measured pressure difference for the orifice plate should be under
250 kPa.
There are roughness limits as well; the roughness should be under 6
micrometers, although there are many more conditions to that given in [1]_.
For orifice plates with D and D/2 or corner pressure taps:
* Orifice bore diameter muse be larger than 12.5 mm (0.5 inches)
* Pipe diameter between 50 mm and 1 m (2 to 40 inches)
* Beta between 0.1 and 0.75 inclusive
* Reynolds number larger than 5000 (for :math:`0.10 \le \beta \le 0.56`)
or for :math:`\beta \ge 0.56, Re_D \ge 16000\beta^2`
For orifice plates with flange pressure taps:
* Orifice bore diameter muse be larger than 12.5 mm (0.5 inches)
* Pipe diameter between 50 mm and 1 m (2 to 40 inches)
* Beta between 0.1 and 0.75 inclusive
* Reynolds number larger than 5000 and also larger than
:math:`170000\beta^2 D`.
This is also presented in Crane's TP410 (2009)publication, whereas the
1999 and 1982 editions showed only a graph for discharge coefficients.
Examples
--------
>>> C_Reader_Harris_Gallagher(D=0.07391, Do=0.0222, rho=1.165, mu=1.85E-5,
... m=0.12, taps='flange')
0.5990326277163659
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
.. [3] Reader-Harris, M. J., "The Equation for the Expansibility Factor for
Orifice Plates," Proceedings of FLOMEKO 1998, Lund, Sweden, 1998:
209-214.
.. [4] Reader-Harris, Michael. Orifice Plates and Venturi Tubes. Springer,
2015.
'''
A_pipe = pi/4.*D*D
v = m/(A_pipe*rho)
Re_D = rho*v*D/mu
beta = Do/D
if taps == 'corner':
L1, L2_prime = 0.0, 0.0
elif taps == 'D' or taps == 'D/2':
L1 = 1.0
L2_prime = 0.47
elif taps == 'flange':
L1 = L2_prime = 0.0254/D
else:
raise Exception('Unsupported tap location')
beta2 = beta*beta
beta4 = beta2*beta2
beta8 = beta4*beta4
A = (19000.0*beta/Re_D)**0.8
M2_prime = 2*L2_prime/(1.0 - beta)
delta_C_upstream = ((0.043 + 0.080*exp(-1E1*L1) - 0.123*exp(-7.0*L1))
*(1.0 - 0.11*A)*beta4/(1.0 - beta4))
# The max part is not in the ISO standard
delta_C_downstream = (-0.031*(M2_prime - 0.8*M2_prime**1.1)*beta**1.3
*(1.0 + 8*max(log10(3700./Re_D), 0.0)))
# C_inf is discharge coefficient with corner taps for infinite Re
# Cs, slope term, provides increase in discharge coefficient for lower
# Reynolds numbers.
# max term is not in the ISO standard
C_inf_C_s = (0.5961 + 0.0261*beta2 - 0.216*beta8
+ 0.000521*(1E6*beta/Re_D)**0.7
+ (0.0188 + 0.0063*A)*beta**3.5*(
max((1E6/Re_D)**0.3, 22.7 - 4700.0*(Re_D/1E6))))
C = (C_inf_C_s + delta_C_upstream + delta_C_downstream)
if D < 0.07112:
# Limit is 2.8 inches, .1 inches smaller than the internal diameter of
# a sched. 80 pipe.
# Suggested to be required not becausue of any effect of small
# diameters themselves, but because of edge radius differences.
# max term is given in [4]_ Reader-Harris, Michael book
delta_C_diameter = 0.011*(0.75 - beta)*max((2.8 - D/0.0254), 0.0)
C += delta_C_diameter
return C
def Reader_Harris_Gallagher_discharge(D, Do, P1, P2, rho, mu, k, taps='corner'):
r'''Calculates the mass flow rate of fluid through an orifice based on the
geometry of the plate, measured pressures of the orifice, and the density,
viscosity, and isentropic exponent of the fluid. This solves an equation
iteratively to obtain the correct flow rate.
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
k : float
Isentropic exponent of fluid, [-]
taps : str
The orientation of the taps; one of 'corner', 'flange', 'D', or 'D/2',
[-]
Returns
-------
m : float
Mass flow rate of fluid through the orifice, [kg/s]
Notes
-----
Examples
--------
>>> Reader_Harris_Gallagher_discharge(D=0.07366, Do=0.05, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33, taps='D')
7.702338035732167
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
return newton(to_solve, 2.81)
def discharge_coefficient_to_K(D, Do, C):
r'''Converts a discharge coefficient to a standard loss coefficient,
for use in computation of the actual pressure drop of an orifice or other
device.
.. math::
K = \left[\frac{\sqrt{1-\beta^4(1-C^2)}}{C\beta^2} - 1\right]^2
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
C : float
Coefficient of discharge of the orifice, [-]
Returns
-------
K : float
Loss coefficient with respect to the velocity and density of the fluid
just upstream of the orifice, [-]
Notes
-----
If expansibility is used in the orifice calculation, the result will not
match with the specified pressure drop formula in [1]_; it can almost
be matched by dividing the calculated mass flow by the expansibility factor
and using that mass flow with the loss coefficient.
Examples
--------
>>> discharge_coefficient_to_K(D=0.07366, Do=0.05, C=0.61512)
5.2314291729754
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
return ((1.0 - beta4*(1.0 - C*C))**0.5/(C*beta2) - 1.0)**2
def K_to_discharge_coefficient(D, Do, K):
r'''Converts a standard loss coefficient to a discharge coefficient.
.. math::
C = \sqrt{\frac{1}{2 \sqrt{K} \beta^{4} + K \beta^{4}}
- \frac{\beta^{4}}{2 \sqrt{K} \beta^{4} + K \beta^{4}} }
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
K : float
Loss coefficient with respect to the velocity and density of the fluid
just upstream of the orifice, [-]
Returns
-------
C : float
Coefficient of discharge of the orifice, [-]
Notes
-----
If expansibility is used in the orifice calculation, the result will not
match with the specified pressure drop formula in [1]_; it can almost
be matched by dividing the calculated mass flow by the expansibility factor
and using that mass flow with the loss coefficient.
This expression was derived with SymPy, and checked numerically. There were
three other, incorrect roots.
Examples
--------
>>> K_to_discharge_coefficient(D=0.07366, Do=0.05, K=5.2314291729754)
0.6151200000000001
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
root_K = K**0.5
common_term = 2.0*root_K*beta4 + K*beta4
return (-beta4/(common_term) + 1.0/(common_term))**0.5
def dP_orifice(D, Do, P1, P2, C):
r'''Calculates the non-recoverable pressure drop of an orifice plate based
on the pressure drop and the geometry of the plate and the discharge
coefficient.
.. math::
\Delta\bar w = \frac{\sqrt{1-\beta^4(1-C^2)}-C\beta^2}
{\sqrt{1-\beta^4(1-C^2)}+C\beta^2} (P_1 - P_2)
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
C : float
Coefficient of discharge of the orifice, [-]
Returns
-------
dP : float
Non-recoverable pressure drop of the orifice plate, [Pa]
Notes
-----
This formula can be well approximated by:
.. math::
\Delta\bar w = \left(1 - \beta^{1.9}\right)(P_1 - P_2)
The recoverable pressure drop should be recovered by 6 pipe diameters
downstream of the orifice plate.
Examples
--------
>>> dP_orifice(D=0.07366, Do=0.05, P1=200000.0, P2=183000.0, C=0.61512)
9069.474705745388
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
dP = P1 - P2
delta_w = ((1.0 - beta4*(1.0 - C*C))**0.5 - C*beta2)/(
(1.0 - beta4*(1.0 - C*C))**0.5 + C*beta2)*dP
return delta_w
def velocity_of_approach_factor(D, Do):
r'''Calculates a factor for orifice plate design called the `velocity of
approach`.
.. math::
\text{Velocity of approach} = \frac{1}{\sqrt{1 - \beta^4}}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
Returns
-------
velocity_of_approach : float
Coefficient of discharge of the orifice, [-]
Notes
-----
Examples
--------
>>> velocity_of_approach_factor(D=0.0739, Do=0.0222)
1.0040970074165514
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
'''
return (1.0 - (Do/D)**4)**-0.5
def flow_coefficient(D, Do, C):
r'''Calculates a factor for differential pressure flow meter design called
the `flow coefficient`. This should not be confused with the flow
coefficient often used when discussing valves.
.. math::
\text{Flow coefficient} = \frac{C}{\sqrt{1 - \beta^4}}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of flow meter characteristic dimension at flow conditions, [m]
C : float
Coefficient of discharge of the flow meter, [-]
Returns
-------
flow_coefficient : float
Differential pressure flow meter flow coefficient, [-]
Notes
-----
This measure is used not just for orifices but for other differential
pressure flow meters [2]_.
It is sometimes given the symbol K. It is also equal to the product of the
diacharge coefficient and the velocity of approach factor [2]_.
Examples
--------
>>> flow_coefficient(D=0.0739, Do=0.0222, C=0.6)
0.6024582044499308
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] Miller, Richard W. Flow Measurement Engineering Handbook. 3rd
edition. New York: McGraw-Hill Education, 1996.
'''
return C*(1.0 - (Do/D)**4)**-0.5
def nozzle_expansibility(D, Do, P1, P2, k):
r'''Calculates the expansibility factor for a nozzle or venturi nozzle,
based on the geometry of the plate, measured pressures of the orifice, and
the isentropic exponent of the fluid.
.. math::
\epsilon = \left\{\left(\frac{\kappa \tau^{2/\kappa}}{\kappa-1}\right)
\left(\frac{1 - \beta^4}{1 - \beta^4 \tau^{2/\kappa}}\right)
\left[\frac{1 - \tau^{(\kappa-1)/\kappa}}{1 - \tau}
\right] \right\}^{0.5}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice of the venturi or nozzle, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
k : float
Isentropic exponent of fluid, [-]
Returns
-------
expansibility : float
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Notes
-----
This formula was determined for the range of P2/P1 >= 0.75.
Examples
--------
>>> nozzle_expansibility(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, k=1.4)
0.9945702344566746
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-3:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 3: Nozzles and Venturi Nozzles.
'''
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
tau = P2/P1
term1 = k*tau**(2.0/k )/(k - 1.0)
term2 = (1.0 - beta4)/(1.0 - beta4*tau**(2.0/k))
term3 = (1.0 - tau**((k - 1.0)/k))/(1.0 - tau)
return (term1*term2*term3)**0.5
def C_long_radius_nozzle(D, Do, rho, mu, m):
r'''Calculates the coefficient of discharge of a long radius nozzle used
for measuring flow rate of fluid, based on the geometry of the nozzle,
mass flow rate through the nozzle, and the density and viscosity of the
fluid.
.. math::
C = 0.9965 - 0.00653\beta^{0.5} \left(\frac{10^6}{Re_D}\right)^{0.5}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of long radius nozzle orifice at flow conditions, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
m : float
Mass flow rate of fluid through the nozzle, [kg/s]
Returns
-------
C : float
Coefficient of discharge of the long radius nozzle orifice, [-]
Notes
-----
Examples
--------
>>> C_long_radius_nozzle(D=0.07391, Do=0.0422, rho=1.2, mu=1.8E-5, m=0.1)
0.9805503704679863
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-3:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 3: Nozzles and Venturi Nozzles.
'''
A_pipe = pi/4.*D*D
v = m/(A_pipe*rho)
Re_D = rho*v*D/mu
beta = Do/D
return 0.9965 - 0.00653*beta**0.5*(1E6/Re_D)**0.5
def C_ISA_1932_nozzle(D, Do, rho, mu, m):
r'''Calculates the coefficient of discharge of an ISA 1932 style nozzle
used for measuring flow rate of fluid, based on the geometry of the nozzle,
mass flow rate through the nozzle, and the density and viscosity of the
fluid.
.. math::
C = 0.9900 - 0.2262\beta^{4.1} - (0.00175\beta^2 - 0.0033\beta^{4.15})
\left(\frac{10^6}{Re_D}\right)^{1.15}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of nozzle orifice at flow conditions, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
m : float
Mass flow rate of fluid through the nozzle, [kg/s]
Returns
-------
C : float
Coefficient of discharge of the nozzle orifice, [-]
Notes
-----
Examples
--------
>>> C_ISA_1932_nozzle(D=0.07391, Do=0.0422, rho=1.2, mu=1.8E-5, m=0.1)
0.9635849973250495
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-3:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 3: Nozzles and Venturi Nozzles.
'''
A_pipe = pi/4.*D*D
v = m/(A_pipe*rho)
Re_D = rho*v*D/mu
beta = Do/D
C = (0.9900 - 0.2262*beta**4.1
- (0.00175*beta**2 - 0.0033*beta**4.15)*(1E6/Re_D)**1.15)
return C
def C_venturi_nozzle(D, Do):
r'''Calculates the coefficient of discharge of an Venturi style nozzle
used for measuring flow rate of fluid, based on the geometry of the nozzle.
.. math::
C = 0.9858 - 0.196\beta^{4.5}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of nozzle orifice at flow conditions, [m]
Returns
-------
C : float
Coefficient of discharge of the nozzle orifice, [-]
Notes
-----
Examples
--------
>>> C_venturi_nozzle(D=0.07391, Do=0.0422)
0.9698996454169576
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-3:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 3: Nozzles and Venturi Nozzles.
'''
beta = Do/D
return 0.9858 - 0.198*beta**4.5
# Relative pressure loss as a function of beta reatio for venturi nozzles
# Venturi nozzles should be between 65 mm and 500 mm; there are high and low
# loss ratios , with the high losses corresponding to small diameters,
# low high losses corresponding to large diameters
# Interpolation can be performed.
venturi_tube_betas = np.array(
[0.299160, 0.299470, 0.312390, 0.319010, 0.326580, 0.337290,
0.342020, 0.347060, 0.359030, 0.365960, 0.372580, 0.384870,
0.385810, 0.401250, 0.405350, 0.415740, 0.424250, 0.434010,
0.447880, 0.452590, 0.471810, 0.473090, 0.493540, 0.499240,
0.516530, 0.523800, 0.537630, 0.548060, 0.556840, 0.573890,
0.582350, 0.597820, 0.601560, 0.622650, 0.626490, 0.649480,
0.650990, 0.668700, 0.675870, 0.688550, 0.693180, 0.706180,
0.713330, 0.723510, 0.749540, 0.749650])
venturi_tube_dP_high = np.array(
[0.164534, 0.164504, 0.163591, 0.163508, 0.163439,
0.162652, 0.162224, 0.161866, 0.161238, 0.160786,
0.160295, 0.159280, 0.159193, 0.157776, 0.157467,
0.156517, 0.155323, 0.153835, 0.151862, 0.151154,
0.147840, 0.147613, 0.144052, 0.143050, 0.140107,
0.138981, 0.136794, 0.134737, 0.132847, 0.129303,
0.127637, 0.124758, 0.124006, 0.119269, 0.118449,
0.113605, 0.113269, 0.108995, 0.107109, 0.103688,
0.102529, 0.099567, 0.097791, 0.095055, 0.087681,
0.087648])
venturi_tube_dP_low = np.array(
[0.089232, 0.089218, 0.088671, 0.088435, 0.088206,
0.087853, 0.087655, 0.087404, 0.086693, 0.086241,
0.085813, 0.085142, 0.085102, 0.084446, 0.084202,
0.083301, 0.082470, 0.081650, 0.080582, 0.080213,
0.078509, 0.078378, 0.075989, 0.075226, 0.072700,
0.071598, 0.069562, 0.068128, 0.066986, 0.064658,
0.063298, 0.060872, 0.060378, 0.057879, 0.057403,
0.054091, 0.053879, 0.051726, 0.050931, 0.049362,
0.048675, 0.046522, 0.045381, 0.043840, 0.039913,
0.039896])
#ratios_average = 0.5*(ratios_high + ratios_low)
D_bound_venturi_tube = np.array([0.065, 0.5])
def dP_venturi_tube(D, Do, P1, P2):
r'''Calculates the non-recoverable pressure drop of a venturi tube
differential pressure meter based on the pressure drop and the geometry of
the venturi meter.
.. math::
\epsilon = \frac{\Delta\bar w }{\Delta P}
The :math:`\epsilon` value is looked up in a table of values as a function
of beta ratio and upstream pipe diameter (roughness impact).
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of venturi tube at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of venturi tube at the cross-section
of the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of venturi tube at the
cross-section of the pressure tap, [Pa]
Returns
-------
dP : float
Non-recoverable pressure drop of the venturi tube, [Pa]
Notes
-----
The recoverable pressure drop should be recovered by 6 pipe diameters
downstream of the venturi tube.
Note there is some information on the effect of Reynolds number as well
in [1]_ and [2]_, with a curve showing an increased pressure drop
from 1E5-6E5 to with a decreasing multiplier from 1.75 to 1; the multiplier
is 1 for higher Reynolds numbers. This is not currently included in this
implementation.
Examples
--------
>>> dP_venturi_tube(D=0.07366, Do=0.05, P1=200000.0, P2=183000.0)
1788.5717754177406
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-4:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 4: Venturi Tubes.
'''
# Effect of Re is not currently included
beta = Do/D
epsilon_D65 = np.interp(beta, venturi_tube_betas, venturi_tube_dP_high)
epsilon_D500 = np.interp(beta, venturi_tube_betas, venturi_tube_dP_low)
epsilon = np.interp(D, D_bound_venturi_tube, [epsilon_D65, epsilon_D500])
return epsilon*(P1 - P2)
def diameter_ratio_cone_meter(D, Dc):
r'''Calculates the diameter ratio `beta` used to characterize a cone
flow meter.
.. math::
\beta = \sqrt{1 - \frac{d_c^2}{D^2}}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Dc : float
Diameter of the largest end of the cone meter, [m]
Returns
-------
beta : float
Cone meter diameter ratio, [-]
Notes
-----
Examples
--------
>>> diameter_ratio_cone_meter(D=0.2575, Dc=0.184)
0.6995709873957624
References
----------
.. [1] Hollingshead, Colter. "Discharge Coefficient Performance of Venturi,
Standard Concentric Orifice Plate, V-Cone, and Wedge Flow Meters at
Small Reynolds Numbers." May 1, 2011.
https://digitalcommons.usu.edu/etd/869.
'''
D_ratio = Dc/D
return (1.0 - D_ratio*D_ratio)**0.5
def cone_meter_expansibility_Stewart(D, Dc, P1, P2, k):
r'''Calculates the expansibility factor for a cone flow meter,
based on the geometry of the cone meter, measured pressures of the orifice,
and the isentropic exponent of the fluid. Developed in [1]_, also shown
in [2]_.
.. math::
\epsilon = 1 - (0.649 + 0.696\beta^4) \frac{\Delta P}{\kappa P_1}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Dc : float
Diameter of the largest end of the cone meter, [m]
P1 : float
Static pressure of fluid upstream of cone meter at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid at the end of the center of the cone pressure
tap, [Pa]
k : float
Isentropic exponent of fluid, [-]
Returns
-------
expansibility : float
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Notes
-----
This formula was determined for the range of P2/P1 >= 0.75; the only gas
used to determine the formula is air.
Examples
--------
>>> cone_meter_expansibility_Stewart(D=1, Dc=0.9, P1=1E6, P2=8.5E5, k=1.2)
0.9157343
References
----------
.. [1] Stewart, D. G., M. Reader-Harris, and NEL Dr RJW Peters. "Derivation
of an Expansibility Factor for the V-Cone Meter." In Flow Measurement
International Conference, Peebles, Scotland, UK, 2001.
.. [2] ISO 5167-5:2016 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 5: Cone meters.
'''
dP = P1 - P2
beta = diameter_ratio_cone_meter(D, Dc)
return 1.0 - (0.649 + 0.696*beta**4)*dP/(k*P1)
def dP_cone_meter(D, Dc, P1, P2):
r'''Calculates the non-recoverable pressure drop of a cone meter
based on the measured pressures before and at the cone end, and the
geometry of the cone meter according to [1]_.
.. math::
\Delta \bar \omega = (1.09 - 0.813\beta)\Delta P
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Dc : float
Diameter of the largest end of the cone meter, [m]
P1 : float
Static pressure of fluid upstream of cone meter at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid at the end of the center of the cone pressure
tap, [Pa]
Returns
-------
dP : float
Non-recoverable pressure drop of the orifice plate, [Pa]
Notes
-----
The recoverable pressure drop should be recovered by 6 pipe diameters
downstream of the cone meter.
Examples
--------
>>> dP_cone_meter(1, .7, 1E6, 9.5E5)
25470.093437973323
References
----------
.. [1] ISO 5167-5:2016 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 5: Cone meters.
'''
dP = P1 - P2
beta = diameter_ratio_cone_meter(D, Dc)
return (1.09 - 0.813*beta)*dP
def diameter_ratio_wedge_meter(D, H):
r'''Calculates the diameter ratio `beta` used to characterize a wedge
flow meter as given in [1]_ and [2]_.
.. math::
\beta = \left(\frac{1}{\pi}\left\{\arccos\left[1 - \frac{2H}{D}
\right] - 2 \left[1 - \frac{2H}{D}
\right]\left(\frac{H}{D} - \left[\frac{H}{D}\right]^2
\right)^{0.5}\right\}\right)^{0.5}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
H : float
Portion of the diameter of the clear segment of the pipe up to the
wedge blocking flow; the height of the pipe up to the wedge, [m]
Returns
-------
beta : float
Wedge meter diameter ratio, [-]
Notes
-----
Examples
--------
>>> diameter_ratio_wedge_meter(D=0.2027, H=0.0608)
0.5022531424646643
References
----------
.. [1] Hollingshead, Colter. "Discharge Coefficient Performance of Venturi,
Standard Concentric Orifice Plate, V-Cone, and Wedge Flow Meters at
Small Reynolds Numbers." May 1, 2011.
https://digitalcommons.usu.edu/etd/869.
.. [2] IntraWedge WEDGE FLOW METER Type: IWM. January 2011.
http://www.intra-automation.com/download.php?file=pdf/products/technical_information/en/ti_iwm_en.pdf
'''
H_D = H/D
t0 = 1.0 - 2.0*H_D
t1 = acos(t0)
t2 = 2.0*(t0)
t3 = (H_D - H_D*H_D)**0.5
t4 = t1 - t2*t3
return (1./pi*t4)**0.5
def C_wedge_meter_Miller(D, H):
r'''Calculates the coefficient of discharge of an wedge flow meter
used for measuring flow rate of fluid, based on the geometry of the
differential pressure flow meter.
For half-inch lines:
.. math::
C = 0.7883 + 0.107(1 - \beta^2)
For 1 to 1.5 inch lines:
.. math::
C = 0.6143 + 0.718(1 - \beta^2)
For 1.5 to 24 inch lines:
.. math::
C = 0.5433 + 0.2453(1 - \beta^2)
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
H : float
Portion of the diameter of the clear segment of the pipe up to the
wedge blocking flow; the height of the pipe up to the wedge, [m]
Returns
-------
C : float
Coefficient of discharge of the wedge flow meter, [-]
Notes
-----
There is an ISO standard being developed to cover wedge meters as of 2018.
Wedge meters can have varying angles; 60 and 90 degree wedge meters have
been reported. Tap locations 1 or 2 diameters (upstream and downstream),
and 2D upstream/1D downstream have been used. Some wedges are sharp;
some are smooth. [2]_ gives some experimental values.
Examples
--------
>>> C_wedge_meter_Miller(D=0.1524, H=0.3*0.1524)
0.7267069372687651
References
----------
.. [1] Miller, Richard W. Flow Measurement Engineering Handbook. 3rd
edition. New York: McGraw-Hill Education, 1996.
.. [2] Seshadri, V., S. N. Singh, and S. Bhargava. "Effect of Wedge Shape
and Pressure Tap Locations on the Characteristics of a Wedge Flowmeter."
IJEMS Vol.01(5), October 1994.
'''
beta = diameter_ratio_wedge_meter(D, H)
if D <= 0.7*inch:
# suggested limit 0.5 inch for this equation
C = 0.7883 + 0.107*(1 - beta*beta)
elif D <= 1.4*inch:
# Suggested limit is under 1.5 inches
C = 0.6143 + 0.718*(1 - beta*beta)
else:
C = 0.5433 + 0.2453*(1 - beta*beta)
return C
def C_Reader_Harris_Gallagher_wet_venturi_tube(mg, ml, rhog, rhol, D, Do, H=1):
r'''Calculates the coefficient of discharge of the wet gas venturi tube
based on the geometry of the tube, mass flow rates of liquid and vapor
through the tube, the density of the liquid and gas phases, and an
adjustable coefficient `H`.
.. math::
C = 1 - 0.0463\exp(-0.05Fr_{gas, th}) \cdot \min\left(1,
\sqrt{\frac{X}{0.016}}\right)
Fr_{gas, th} = \frac{Fr_{\text{gas, densionetric }}}{\beta^{2.5}}
\phi = \sqrt{1 + C_{Ch} X + X^2}
C_{Ch} = \left(\frac{\rho_l}{\rho_{1,g}}\right)^n +
\left(\frac{\rho_{1, g}}{\rho_{l}}\right)^n
n = \max\left[0.583 - 0.18\beta^2 - 0.578\exp\left(\frac{-0.8
Fr_{\text{gas, densiometric}}}{H}\right),0.392 - 0.18\beta^2 \right]
X = \left(\frac{m_l}{m_g}\right) \sqrt{\frac{\rho_{1,g}}{\rho_l}}
{Fr_{\text{gas, densiometric}}} = \frac{v_{gas}}{\sqrt{gD}}
\sqrt{\frac{\rho_{1,g}}{\rho_l - \rho_{1,g}}}
= \frac{4m_g}{\rho_{1,g} \pi D^2 \sqrt{gD}}
\sqrt{\frac{\rho_{1,g}}{\rho_l - \rho_{1,g}}}
Parameters
----------
mg : float
Mass flow rate of gas through the venturi tube, [kg/s]
ml : float
Mass flow rate of liquid through the venturi tube, [kg/s]
rhog : float
Density of gas at `P1`, [kg/m^3]
rhol : float
Density of liquid at `P1`, [kg/m^3]
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of venturi tube at flow conditions, [m]
H : float, optional
A surface-tension effect coefficient used to adjust for different
fluids, (1 for a hydrocarbon liquid, 1.35 for water, 0.79 for water in
steam) [-]
Returns
-------
C : float
Coefficient of discharge of the wet gas venturi tube flow meter
(includes flow rate of gas ONLY), [-]
Notes
-----
This model has more error than single phase differential pressure meters.
The model was first published in [1]_, and became ISO 11583 later.
The limits of this correlation according to [2]_ are as follows:
.. math::
0.4 \le \beta \le 0.75
0 < X \le 0.3
Fr_{gas, th} > 3
\frac{\rho_g}{\rho_l} > 0.02
D \ge 50 \text{ mm}
Examples
--------
>>> C_Reader_Harris_Gallagher_wet_venturi_tube(mg=5.31926, ml=5.31926/2,
... rhog=50.0, rhol=800., D=.1, Do=.06, H=1)
0.9754210845876333
References
----------
.. [1] Reader-harris, Michael, and Tuv Nel. An Improved Model for
Venturi-Tube Over-Reading in Wet Gas, 2009.
.. [2] ISO/TR 11583:2012 Measurement of Wet Gas Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits.
'''
V = 4*mg/(rhog*pi*D**2)
Frg = Froude_densimetric(V, L=D, rho1=rhol, rho2=rhog, heavy=False)
beta = Do/D
beta2 = beta*beta
Fr_gas_th = Frg*beta**-2.5
n = max(0.583 - 0.18*beta2 - 0.578*exp(-0.8*Frg/H),
0.392 - 0.18*beta2)
C_Ch = (rhol/rhog)**n + (rhog/rhol)**n
X = ml/mg*(rhog/rhol)**0.5
OF = (1.0 + C_Ch*X + X*X)**0.5
C = 1.0 - 0.0463*exp(-0.05*Fr_gas_th)*min(1.0, (X/0.016)**0.5)
return C
def dP_Reader_Harris_Gallagher_wet_venturi_tube(D, Do, P1, P2, ml, mg, rhol,
rhog, H=1):
r'''Calculates the non-recoverable pressure drop of a wet gas venturi
nozzle based on the pressure drop and the geometry of the venturi nozzle,
the mass flow rates of liquid and gas through it, the densities of the
vapor and liquid phase, and an adjustable coefficient `H`.
.. math::
Y = \frac{\Delta \bar \omega}{\Delta P} - 0.0896 - 0.48\beta^9
Y_{max} = 0.61\exp\left[-11\frac{\rho_{1,g}}{\rho_l}
- 0.045 \frac{Fr_{gas}}{H}\right]
\frac{Y}{Y_{max}} = 1 - \exp\left[-35 X^{0.75} \exp
\left( \frac{-0.28Fr_{gas}}{H}\right)\right]
X = \left(\frac{m_l}{m_g}\right) \sqrt{\frac{\rho_{1,g}}{\rho_l}}
{Fr_{\text{gas, densiometric}}} = \frac{v_{gas}}{\sqrt{gD}}
\sqrt{\frac{\rho_{1,g}}{\rho_l - \rho_{1,g}}}
= \frac{4m_g}{\rho_{1,g} \pi D^2 \sqrt{gD}}
\sqrt{\frac{\rho_{1,g}}{\rho_l - \rho_{1,g}}}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of venturi tube at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of venturi tube at the cross-section
of the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of venturi tube at the cross-
section of the pressure tap, [Pa]
ml : float
Mass flow rate of liquid through the venturi tube, [kg/s]
mg : float
Mass flow rate of gas through the venturi tube, [kg/s]
rhol : float
Density of liquid at `P1`, [kg/m^3]
rhog : float
Density of gas at `P1`, [kg/m^3]
H : float, optional
A surface-tension effect coefficient used to adjust for different
fluids, (1 for a hydrocarbon liquid, 1.35 for water, 0.79 for water in
steam) [-]
Returns
-------
C : float
Coefficient of discharge of the wet gas venturi tube flow meter
(includes flow rate of gas ONLY), [-]
Notes
-----
The model was first published in [1]_, and became ISO 11583 later.
Examples
--------
>>> dP_Reader_Harris_Gallagher_wet_venturi_tube(D=.1, Do=.06, H=1,
... P1=6E6, P2=6E6-5E4, ml=5.31926/2, mg=5.31926, rhog=50.0, rhol=800.,)
16957.43843129572
References
----------
.. [1] Reader-harris, Michael, and Tuv Nel. An Improved Model for
Venturi-Tube Over-Reading in Wet Gas, 2009.
.. [2] ISO/TR 11583:2012 Measurement of Wet Gas Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits.
'''
dP = P1 - P2
beta = Do/D
X = ml/mg*(rhog/rhol)**0.5
V = 4*mg/(rhog*pi*D**2)
Frg = Froude_densimetric(V, L=D, rho1=rhol, rho2=rhog, heavy=False)
Y_ratio = 1.0 - exp(-35.0*X**0.75*exp(-0.28*Frg/H))
Y_max = 0.61*exp(-11.0*rhog/rhol - 0.045*Frg/H)
Y = Y_max*Y_ratio
rhs = -0.0896 - 0.48*beta**9
dw = dP*(Y - rhs)
return dw
# Venturi tube loss coefficients as a function of Re
as_cast_convergent_venturi_Res = [4E5, 6E4, 1E5, 1.5E5]
as_cast_convergent_venturi_Cs = [0.957, 0.966, 0.976, 0.982]
machined_convergent_venturi_Res = [5E4, 1E5, 2E5, 3E5,
7.5E5, # 5E5 to 1E6
1.5E6, # 1E6 to 2E6
5E6] # 2E6 to 1E8
machined_convergent_venturi_Cs = [0.970, 0.977, 0.992, 0.998, 0.995, 1.000, 1.010]
rough_welded_convergent_venturi_Res = [4E4, 6E4, 1E5]
rough_welded_convergent_venturi_Cs = [0.96, 0.97, 0.98]
as_cast_convergent_entrance_machined_venturi_Res = [1E4, 6E4, 1E5, 1.5E5,
3.5E5, # 2E5 to 5E5
3.2E6] # 5E5 to 3.2E6
as_cast_convergent_entrance_machined_venturi_Cs = [0.963, 0.978, 0.98, 0.987, 0.992, 0.995]
CONE_METER_C = 0.82
ROUGH_WELDED_CONVERGENT_VENTURI_TUBE_C = 0.985
MACHINED_CONVERGENT_VENTURI_TUBE_C = 0.995
AS_CAST_VENTURI_TUBE_C = 0.984
def _differential_pressure_C_epsilon(D, D2, m, P1, P2, rho, mu, k, meter_type,
taps=None):
'''Helper function only.
'''
if meter_type == ISO_5167_ORIFICE:
C = C_Reader_Harris_Gallagher(D=D, Do=D2, rho=rho, mu=mu, m=m, taps=taps)
epsilon = orifice_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
elif meter_type == LONG_RADIUS_NOZZLE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = C_long_radius_nozzle(D=D, Do=D2, rho=rho, mu=mu, m=m)
elif meter_type == ISA_1932_NOZZLE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = C_ISA_1932_nozzle(D=D, Do=D2, rho=rho, mu=mu, m=m)
elif meter_type == VENTURI_NOZZLE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = C_venturi_nozzle(D=D, Do=D2)
elif meter_type == AS_CAST_VENTURI_TUBE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = AS_CAST_VENTURI_TUBE_C
elif meter_type == MACHINED_CONVERGENT_VENTURI_TUBE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = MACHINED_CONVERGENT_VENTURI_TUBE_C
elif meter_type == ROUGH_WELDED_CONVERGENT_VENTURI_TUBE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = ROUGH_WELDED_CONVERGENT_VENTURI_TUBE_C
elif meter_type == CONE_METER:
epsilon = cone_meter_expansibility_Stewart(D=D, Dc=D2, P1=P1, P2=P2, k=k)
C = CONE_METER_C
elif meter_type == WEDGE_METER:
epsilon = orifice_expansibility_1989(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = C_wedge_meter_Miller(D=D, H=D2)
return epsilon, C
def differential_pressure_meter_solver(D, rho, mu, k, D2=None, P1=None, P2=None,
m=None, meter_type=ISO_5167_ORIFICE,
taps=None):
r'''Calculates either the mass flow rate, the upstream pressure, the second
pressure value, or the orifice diameter for a differential
pressure flow meter based on the geometry of the meter, measured pressures
of the meter, and the density, viscosity, and isentropic exponent of the
fluid. This solves an equation iteratively to obtain the correct flow rate.
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
k : float
Isentropic exponent of fluid, [-]
D2 : float, optional
Diameter of orifice, or venturi meter orifice, or flow tube orifice,
or cone meter end diameter, or wedge meter fluid flow height, [m]
P1 : float, optional
Static pressure of fluid upstream of differential pressure meter at the
cross-section of the pressure tap, [Pa]
P2 : float, optional
Static pressure of fluid downstream of differential pressure meter or
at the prescribed location (varies by type of meter) [Pa]
m : float, optional
Mass flow rate of fluid through the flow meter, [kg/s]
meter_type : str, optional
One of ('ISO 5167 orifice', 'long radius nozzle', 'ISA 1932 nozzle',
'venuri nozzle', 'as cast convergent venturi tube',
'machined convergent venturi tube',
'rough welded convergent venturi tube', 'cone meter',
'wedge meter'), [-]
taps : str, optional
The orientation of the taps; one of 'corner', 'flange', 'D', or 'D/2';
applies for orifice meters only, [-]
Returns
-------
ans : float
One of `m`, the mass flow rate of the fluid; `P1`, the pressure
upstream of the flow meter; `P2`, the second pressure
tap's value; and `D2`, the diameter of the measuring device; units
of respectively, [kg/s], [Pa], [Pa], or [m]
Notes
-----
See the appropriate functions for the documentation for the formulas and
references used in each method.
The solvers make some assumptions about the range of values answers may be
in.
Note that the solver for the upstream pressure uses the provided values of
density, viscosity and isentropic exponent; whereas these values all
depend on pressure (albeit to a small extent). An outer loop should be
added with pressure-dependent values calculated in it for maximum accuracy.
It would be possible to solve for the upstream pipe diameter, but there is
no use for that functionality.
Examples
--------
>>> differential_pressure_meter_solver(D=0.07366, D2=0.05, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33,
... meter_type='ISO 5167 orifice', taps='D')
7.702338035732168
>>> differential_pressure_meter_solver(D=0.07366, m=7.702338, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33,
... meter_type='ISO 5167 orifice', taps='D')
0.04999999990831885
'''
if m is None:
return newton(to_solve, 2.81)
elif D2 is None:
return brenth(to_solve, D*(1-1E-9), D*5E-3)
elif P2 is None:
return brenth(to_solve, P1*(1-1E-9), P1*0.7)
elif P1 is None:
return brenth(to_solve, P2*(1+1E-9), P2*1.4)
else:
raise Exception('Solver is capable of solving for one of P2, D2, or m only.')
def differential_pressure_meter_dP(D, D2, P1, P2, C=None,
meter_type=ISO_5167_ORIFICE):
r'''Calculates either the non-recoverable pressure drop of a differential
pressure flow meter based on the geometry of the meter, measured pressures
of the meter, and for most models the meter discharge coefficient.
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
D2 : float
Diameter of orifice, or venturi meter orifice, or flow tube orifice,
or cone meter end diameter, or wedge meter fluid flow height, [m]
P1 : float
Static pressure of fluid upstream of differential pressure meter at the
cross-section of the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of differential pressure meter or
at the prescribed location (varies by type of meter) [Pa]
C : float, optional
Coefficient of discharge of the wedge flow meter, [-]
meter_type : str, optional
One of ('ISO 5167 orifice', 'long radius nozzle', 'ISA 1932 nozzle',
'as cast convergent venturi tube',
'machined convergent venturi tube',
'rough welded convergent venturi tube', 'cone meter'), [-]
Returns
-------
dP : float
Non-recoverable pressure drop of the differential pressure flow
meter, [Pa]
Notes
-----
See the appropriate functions for the documentation for the formulas and
references used in each method.
Wedge meters, and venturi nozzles do not have standard formulas available
for pressure drop computation.
Examples
--------
>>> differential_pressure_meter_dP(D=0.07366, D2=0.05, P1=200000.0,
... P2=183000.0, meter_type='as cast convergent venturi tube')
1788.5717754177406
'''
if meter_type == ISO_5167_ORIFICE:
dP = dP_orifice(D=D, Do=D2, P1=P1, P2=P2, C=C)
elif meter_type == LONG_RADIUS_NOZZLE:
dP = dP_orifice(D=D, Do=D2, P1=P1, P2=P2, C=C)
elif meter_type == ISA_1932_NOZZLE:
dP = dP_orifice(D=D, Do=D2, P1=P1, P2=P2, C=C)
elif meter_type == VENTURI_NOZZLE:
raise Exception(NotImplemented)
elif meter_type == AS_CAST_VENTURI_TUBE:
dP = dP_venturi_tube(D=D, Do=D2, P1=P1, P2=P2)
elif meter_type == MACHINED_CONVERGENT_VENTURI_TUBE:
dP = dP_venturi_tube(D=D, Do=D2, P1=P1, P2=P2)
elif meter_type == ROUGH_WELDED_CONVERGENT_VENTURI_TUBE:
dP = dP_venturi_tube(D=D, Do=D2, P1=P1, P2=P2)
elif meter_type == CONE_METER:
dP = dP_cone_meter(D=D, Dc=D2, P1=P1, P2=P2)
elif meter_type == WEDGE_METER:
raise Exception(NotImplemented)
return dP
| 35.407688 | 108 | 0.607385 |
42ead0688f656228fb0df39a2d45d3c1dd001507 | 532 | py | Python | iaso/migrations/0115_auto_20220124_1120.py | BLSQ/iaso-copy | 85fb17f408c15e8c2d730416d1312f58f8db39b7 | [
"MIT"
] | null | null | null | iaso/migrations/0115_auto_20220124_1120.py | BLSQ/iaso-copy | 85fb17f408c15e8c2d730416d1312f58f8db39b7 | [
"MIT"
] | null | null | null | iaso/migrations/0115_auto_20220124_1120.py | BLSQ/iaso-copy | 85fb17f408c15e8c2d730416d1312f58f8db39b7 | [
"MIT"
] | 1 | 2022-03-23T16:44:12.000Z | 2022-03-23T16:44:12.000Z | # Generated by Django 3.1.14 on 2022-01-24 11:20
from django.db import migrations, models
| 22.166667 | 58 | 0.565789 |
42eb0db02ed2cdde4c36688526176ef0796f32f2 | 1,370 | py | Python | git_plan/cli/commands/delete.py | synek/git-plan | 4cf5429348a71fb5ea8110272fb89d20bfa38c38 | [
"MIT"
] | 163 | 2021-03-06T12:01:06.000Z | 2022-03-01T22:52:36.000Z | git_plan/cli/commands/delete.py | synek/git-plan | 4cf5429348a71fb5ea8110272fb89d20bfa38c38 | [
"MIT"
] | 61 | 2021-03-06T07:00:39.000Z | 2021-04-13T10:25:58.000Z | git_plan/cli/commands/delete.py | synek/git-plan | 4cf5429348a71fb5ea8110272fb89d20bfa38c38 | [
"MIT"
] | 9 | 2021-03-07T17:52:57.000Z | 2021-10-18T21:35:23.000Z | """Delete command
Author: Rory Byrne <rory@rory.bio>
"""
from typing import Any
from git_plan.cli.commands.command import Command
from git_plan.service.plan import PlanService
from git_plan.util.decorators import requires_initialized, requires_git_repository
| 31.136364 | 92 | 0.687591 |
42ebcdfbf6dd3a3f1a79b5af4ed661e3aa7d93c1 | 347 | py | Python | newspaper2/newspaper2/news/admin.py | luisfer85/newspaper2 | 8522bc29e5597113af9f9714e510548057e19315 | [
"Apache-2.0"
] | null | null | null | newspaper2/newspaper2/news/admin.py | luisfer85/newspaper2 | 8522bc29e5597113af9f9714e510548057e19315 | [
"Apache-2.0"
] | null | null | null | newspaper2/newspaper2/news/admin.py | luisfer85/newspaper2 | 8522bc29e5597113af9f9714e510548057e19315 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from newspaper2.news.models import News, Event
admin.site.register(News, NewsAdmin)
admin.site.register(Event, NewsAdmin)
| 23.133333 | 46 | 0.752161 |
42ef38196b7af8975b40694b6eb1954f2a48845e | 1,926 | py | Python | vision_module.py | seongdong2/GRADUATION | c38b13a2dd82a58bdba7673916408daa0d9b471e | [
"Unlicense"
] | 2 | 2021-09-19T13:52:05.000Z | 2021-10-04T01:09:21.000Z | vision_module.py | seongdong2/graduation | c38b13a2dd82a58bdba7673916408daa0d9b471e | [
"Unlicense"
] | 1 | 2021-10-14T06:19:44.000Z | 2021-10-14T06:19:44.000Z | vision_module.py | seongdong2/graduation | c38b13a2dd82a58bdba7673916408daa0d9b471e | [
"Unlicense"
] | null | null | null | import numpy as np
import cv2
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
net = cv2.dnn.readNetFromCaffe(
"MobileNetSSD_deploy.prototxt.txt", "MobileNetSSD_deploy.caffemodel")
BLACK_CRITERIA = 60
| 30.571429 | 106 | 0.555556 |
42efd3e55b344db382180d65f36b45d066baab96 | 618 | py | Python | riccipy/metrics/lewis_papapetrou.py | cjayross/riccipy | 2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846 | [
"MIT"
] | 4 | 2019-08-17T04:28:06.000Z | 2021-01-02T15:19:18.000Z | riccipy/metrics/lewis_papapetrou.py | grdbii/riccipy | 2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846 | [
"MIT"
] | 3 | 2019-08-02T04:07:43.000Z | 2020-06-18T07:49:38.000Z | riccipy/metrics/lewis_papapetrou.py | grdbii/riccipy | 2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846 | [
"MIT"
] | null | null | null | """
Name: Lewis Papapetrou
References: Ernst, Phys. Rev., v167, p1175, (1968)
Coordinates: Cartesian
"""
from sympy import Function, Rational, exp, symbols, zeros
coords = symbols("t x y z", real=True)
variables = ()
functions = symbols("k r s w", cls=Function)
t, x, y, z = coords
k, r, s, w = functions
metric = zeros(4)
metric[0, 0] = -exp(2 * s(x, y))
metric[3, 3] = (exp(-s(x, y)) * r(x, y) - w(x, y) * exp(s(x, y))) * (
exp(-s(x, y)) * r(x, y) + w(x, y) * exp(s(x, y))
)
metric[0, 3] = metric[3, 0] = -w(x, y) * exp(2 * s(x, y))
metric[1, 2] = metric[2, 1] = Rational(1, 2) * exp(2 * k(x, y) - 2 * s(x, y))
| 30.9 | 77 | 0.553398 |
42efdd1edf57c5e0230ae9edaa82d469b2ef9074 | 2,591 | py | Python | product/admin.py | NarminSH/e-commerce-sellshop-project | a753038c8265473021e21f75b6b095bdc25f43d6 | [
"MIT"
] | null | null | null | product/admin.py | NarminSH/e-commerce-sellshop-project | a753038c8265473021e21f75b6b095bdc25f43d6 | [
"MIT"
] | null | null | null | product/admin.py | NarminSH/e-commerce-sellshop-project | a753038c8265473021e21f75b6b095bdc25f43d6 | [
"MIT"
] | null | null | null | from django.contrib import admin
from modeltranslation.admin import TranslationAdmin
from product.models import (Category, Discount, Review, Product, Properity, ProperityOption,
Image, ShoppingCart, Tag,Wishlist,Color)
admin.site.register(Review, ReviewAdmin)
admin.site.register(Category, CategoryAdmin)
| 31.987654 | 93 | 0.703589 |
42eff7b73d4d9e9bde660bd60b5a65140cceb73c | 3,009 | py | Python | aikatsu_ranking.py | yokky21/aikatsu-ranking | 10d8e4d827414120e721640d42874c26f25c4811 | [
"MIT"
] | null | null | null | aikatsu_ranking.py | yokky21/aikatsu-ranking | 10d8e4d827414120e721640d42874c26f25c4811 | [
"MIT"
] | null | null | null | aikatsu_ranking.py | yokky21/aikatsu-ranking | 10d8e4d827414120e721640d42874c26f25c4811 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.6
# vim: ts=4 sw=4
import requests, lxml.html, json, sys, os, configparser, re
from datetime import datetime
from mastodon import *
## Initializing
host = 'https://bpnavi.jp'
ua = 'Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1'
url_main = host + '/s/elec/aikatsu_p5/ranking'
url_ajax = host + '/s/elec/aikatsu_p5/item_rankings/more'
rank = []
name = []
post_summary = datetime.now().strftime("%Y-%m-%d %H:%M") + ' '
post_data = post_summary + "\n"
conf_select = 'aikatsu8'
csvfile = 'aikatsu8.csv'
inifile = configparser.ConfigParser()
inifile.read(os.path.dirname(os.path.abspath(__file__)) + '/mastodon.ini', 'UTF-8')
## Getting main page (CSRF Token)
headers = {'User-Agent': ua}
resp = requests.get(url_main, timeout=30, headers=headers)
main_html = resp.text
cookies = resp.cookies
root = lxml.html.fromstring(main_html)
csrf_token_data = root.xpath('/html/head/meta[@name="csrf-token"]')
csrf_token = csrf_token_data[0].attrib['content']
## Getting ranking data
headers = {'User-Agent': ua,
'Accept': '*/*',
'Origin': host,
'Referer': host + '/s/elec/aikatsu_p5/item_rankings',
'X-CSRF-Token': csrf_token,
'X-Requested-With': 'XMLHttpRequest'}
for page in range(4):
obj = {'page': str(page+1)}
resp = requests.post(url_ajax, timeout=30,
headers=headers, cookies=cookies, data=obj)
if resp.status_code != 200:
sys.exit()
data = json.loads(resp.text)
rank_html = data['attachmentPartial']
root = lxml.html.fromstring(rank_html)
for row in range(3):
for col in range(3):
rank_data = root.xpath('//tr['+ str(row+1) +']/td['+ str(col+1) +']/p["rank"]/font[1]')
name_data = root.xpath('//tr['+ str(row+1) +']/td['+ str(col+1) +']/p["name_vote"]/a[1]')
try:
rank.append(rank_data[0].text.strip())
name.append(name_data[0].text.strip())
except IndexError:
break
else:
continue
break
for num in range(len(rank)):
post_data += rank[num] + name[num] + "\n"
## Create CSV file
csv = re.sub(',*$', '', post_data.replace('\n',',')) + "\n"
try:
f = open(os.path.dirname(os.path.abspath(__file__)) + '/' + csvfile, mode='a', encoding='utf-8')
f.write(csv)
f.close()
except:
pass
# print(post_data)
# print(post_summary)
# sys.exit()
## Posting to Mastodon
mastodon = Mastodon(client_id = inifile.get(conf_select, 'id'),
client_secret = inifile.get(conf_select, 'secret'),
access_token = inifile.get(conf_select, 'token'),
api_base_url = inifile.get(conf_select, 'url'))
# mastodon.toot(post_data)
mastodon.status_post(
post_data,
spoiler_text=post_summary)
| 33.065934 | 154 | 0.599535 |
42f0f632b463ffb1c555335ca23b1393342b2700 | 1,091 | py | Python | L11-LP-farm-example.py | jdherman/eci273 | 86828b2e075258afdd528e86295170e162cc99e3 | [
"MIT"
] | 10 | 2018-12-23T02:59:06.000Z | 2021-12-07T11:55:21.000Z | L11-LP-farm-example.py | jdherman/eci273 | 86828b2e075258afdd528e86295170e162cc99e3 | [
"MIT"
] | null | null | null | L11-LP-farm-example.py | jdherman/eci273 | 86828b2e075258afdd528e86295170e162cc99e3 | [
"MIT"
] | 7 | 2018-12-21T02:06:51.000Z | 2021-12-11T02:36:47.000Z | import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
# Lecture 11 2-user water allocation example
# First approach: scipy.optimize.linprog
# need matrix form: minimize c^T * x, subject to Ax <= b
c = [-5, -3] # negative to maximize
A = [[10,5], [1,1.5], [2,2], [-1,0], [0,-1]]
b = [20, 3, 4.5, 0, 0]
sol = optimize.linprog(c, A, b)
print('Scipy Output:')
print(sol)
# Second approach: cxvpy
# this import is easy but also could be confusing
# because it overwrites common functions (sum, mean, etc) with cvxpy functions
# from cvxpy import *
# xc = Variable(name='xc')
# xb = Variable(name='xb')
# pc = 5
# pb = 3
# obj = Maximize(pc*xc + pb*xb)
# constraints = [10*xc + 5*xb <= 20,
# xc + 1.5*xb <= 3,
# 2*xc + 2*xb <= 4.5,
# xc >= 0,
# xb >= 0]
# prob = Problem(obj, constraints)
# prob.solve()
# print('\ncvxpy Output:')
# print('Objective = %f' % obj.value)
# print('xc = %f' % xc.value)
# print('xb = %f' % xb.value)
# for c in constraints:
# print('Dual (%s) = %f' % (c, c.dual_value))
| 23.717391 | 78 | 0.582035 |
42f12d3200ce4d7e07aaba09b537e0ff03fb831a | 1,471 | py | Python | prev_ob_models/exclude/GilraBhalla2015/synapses/synapseConstantsMinimal.py | fameshpatel/olfactorybulb | 8d7a644b4560309ef177c0590ff73ed4c2432604 | [
"MIT"
] | null | null | null | prev_ob_models/exclude/GilraBhalla2015/synapses/synapseConstantsMinimal.py | fameshpatel/olfactorybulb | 8d7a644b4560309ef177c0590ff73ed4c2432604 | [
"MIT"
] | null | null | null | prev_ob_models/exclude/GilraBhalla2015/synapses/synapseConstantsMinimal.py | fameshpatel/olfactorybulb | 8d7a644b4560309ef177c0590ff73ed4c2432604 | [
"MIT"
] | null | null | null | ## This file used to be programmatically generated for converging to best fit Activity Dependent Inhibition curve.
## But that doesn't give decent result, so set by hand.
import sys
sys.path.extend(["../networks"])
## do not import networkConstants as that imports this file, and it's circular then!!!
from networkConstantsMinimal import *
## STRONG_SYNAPSES is defined in networkConstants, but can't import it due to reason above,
## so duplicating the directed and frac_directed check below again.
## For STRONG_SYNAPSES i.e differential connectivity set mitral -> granule base excitation to 0.2nS
## else, for random / uniform connectivity, set the base value to 0.3nS
## This is to get the same amount of activity dependent inhibition (Arevian et al)
## for the different network connectivities...
if directed and frac_directed>0.0:
mitral_granule_AMPA_Gbar = 0.2e-9 # Siemens
granule_mitral_GABA_Gbar = 1.0e-9#12.0e-09 # Siemens
else: #### confirm ADI for 0% frac_directed setting below
## 0.3e-9 for 3% frac_directed, _mod mitral,
## but 0.2e-9 for 1% frac_directed, _mod_spikeinit mitral
mitral_granule_AMPA_Gbar = 0.2e-9#0.3e-9 # Siemens
granule_mitral_GABA_Gbar = 1.5e-9#12.0e-09 # Siemens
## For the _mod mitral with _spikeinit,
## self Gbar below must be reduced to 5 pS, else huge self-inhibition
## For the _mod mitral, 50 pS is fine, it doesn't get affected much by inhibition!
self_mitral_GABA_Gbar = 5e-12#5e-12#50e-12 # Siemens
| 54.481481 | 114 | 0.755948 |
42f674ee12a896bdc6fefab4b830b689f09ef5e4 | 499 | py | Python | agoge/__init__.py | Nintorac/agoge | 0abe66e41e4fcd865854cc009374e2a52ef5671c | [
"MIT"
] | null | null | null | agoge/__init__.py | Nintorac/agoge | 0abe66e41e4fcd865854cc009374e2a52ef5671c | [
"MIT"
] | null | null | null | agoge/__init__.py | Nintorac/agoge | 0abe66e41e4fcd865854cc009374e2a52ef5671c | [
"MIT"
] | null | null | null | from .utils import defaults_f
DEFAULTS = defaults_f({
'ARTIFACTS_ROOT': '~/agoge/artifacts',
'TQDM_DISABLED': False,
'TRIAL_ROOT': 'Worker',
'BUCKET': 'nintorac_model_serving',
'BASE_URL': 'https://github.com/Nintorac/NeuralDX7-weights/raw/master'
})
from .data_handler import DataHandler
from .model import AbstractModel
from .solver import AbstractSolver
from .train_worker import TrainWorker
from .inference_worker import InferenceWorker
from .lmdb_helper import LMDBDataset | 31.1875 | 74 | 0.771543 |
42f8e8791025cfd39e8878d6744a088d9902c8a3 | 1,206 | py | Python | test/variable_type.py | bourne7/demo-python | 0c4dd12475bcada4e5826b7117bd4c4bdcedfd9f | [
"MIT"
] | null | null | null | test/variable_type.py | bourne7/demo-python | 0c4dd12475bcada4e5826b7117bd4c4bdcedfd9f | [
"MIT"
] | null | null | null | test/variable_type.py | bourne7/demo-python | 0c4dd12475bcada4e5826b7117bd4c4bdcedfd9f | [
"MIT"
] | null | null | null |
# *
# **
# Python3
if __name__ == '__main__':
print('Start test as main.')
show_type()
test_mutable()
| 19.451613 | 74 | 0.543118 |
42f979541235624972aa7beb6b4040036e613c33 | 951 | py | Python | scrapystsytem/spiders/doubanmoviespider.py | mezhou887/ScrapySystem2017 | 888ac42bba36b541845244596db1644e332bf291 | [
"Apache-2.0"
] | null | null | null | scrapystsytem/spiders/doubanmoviespider.py | mezhou887/ScrapySystem2017 | 888ac42bba36b541845244596db1644e332bf291 | [
"Apache-2.0"
] | null | null | null | scrapystsytem/spiders/doubanmoviespider.py | mezhou887/ScrapySystem2017 | 888ac42bba36b541845244596db1644e332bf291 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
from scrapystsytem.misc.commonspider import CommonSpider
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor as sle
logger = logging.getLogger(__name__) | 31.7 | 89 | 0.648791 |
42faa478c98edc7e43520c1e76c93b612e769679 | 560 | py | Python | hurricane/base.py | ericflo/hurricane | c192b711b2b1c06a386d1a1a47f538b13a659cde | [
"BSD-3-Clause"
] | 8 | 2015-02-21T17:59:41.000Z | 2021-01-07T20:57:39.000Z | hurricane/base.py | ericflo/hurricane | c192b711b2b1c06a386d1a1a47f538b13a659cde | [
"BSD-3-Clause"
] | null | null | null | hurricane/base.py | ericflo/hurricane | c192b711b2b1c06a386d1a1a47f538b13a659cde | [
"BSD-3-Clause"
] | 2 | 2016-07-09T16:06:23.000Z | 2016-08-02T18:44:20.000Z | import uuid
| 23.333333 | 59 | 0.542857 |
42fb56f78da3eca5f6dfd2e9de1258342401faa4 | 469 | py | Python | nbexchange/handlers/__init__.py | jgwerner/nbexchange | 510aa8fdff04b0873cec5dd75d3dfb0eac820c1b | [
"BSD-3-Clause"
] | 7 | 2020-04-30T20:16:18.000Z | 2021-09-11T20:31:51.000Z | nbexchange/handlers/__init__.py | jgwerner/nbexchange | 510aa8fdff04b0873cec5dd75d3dfb0eac820c1b | [
"BSD-3-Clause"
] | 86 | 2020-03-06T15:34:55.000Z | 2022-03-07T11:58:06.000Z | nbexchange/handlers/__init__.py | jgwerner/nbexchange | 510aa8fdff04b0873cec5dd75d3dfb0eac820c1b | [
"BSD-3-Clause"
] | 1 | 2020-07-25T23:04:51.000Z | 2020-07-25T23:04:51.000Z | from nbexchange.handlers.assignment import Assignment, Assignments
from nbexchange.handlers.collection import Collection, Collections
from nbexchange.handlers.feedback import FeedbackHandler
from nbexchange.handlers.pages import HomeHandler
from nbexchange.handlers.submission import Submission, Submissions
default_handlers = [
Assignment,
Assignments,
Collection,
Collections,
Submission,
Submissions,
HomeHandler,
FeedbackHandler,
]
| 27.588235 | 66 | 0.803838 |
42fe26b4d9e2cf96a145d2ebd3a33d07d37ab54e | 2,476 | py | Python | 09/09b.py | thejoeejoee/aoc-2021 | 1ae7650aea42b5fbb60e891687cf7bc84c81bd66 | [
"MIT"
] | 1 | 2021-12-01T17:43:38.000Z | 2021-12-01T17:43:38.000Z | 09/09b.py | thejoeejoee/aoc-2021 | 1ae7650aea42b5fbb60e891687cf7bc84c81bd66 | [
"MIT"
] | null | null | null | 09/09b.py | thejoeejoee/aoc-2021 | 1ae7650aea42b5fbb60e891687cf7bc84c81bd66 | [
"MIT"
] | null | null | null | #!/bin/env python3
import operator
from _operator import attrgetter, itemgetter
from collections import defaultdict, Counter
from functools import reduce, partial
from itertools import chain
from aocd import get_data
EMPTY = type('EMPTY', (int,), dict(__repr__=(f := lambda s: 'EMPTY'), __str__=f))(10)
heights = get_data().strip().splitlines()
HEIGHT = len(heights) + 2
WIDTH = len(heights[0]) + 2
def main():
data = tuple(chain(
(EMPTY for _ in range(WIDTH)),
*(((EMPTY,) + tuple(int(c) for c in line) + (EMPTY,)) for line in heights),
(EMPTY for _ in range(WIDTH)),
))
basins = Counter()
for low_point in find_low_points(data):
known = set()
to_explore = {low_point}
# not BFS, dot DFS? just JoeFS
while to_explore:
exploring = to_explore.pop()
known.add(exploring)
r, c = exploring
current = data[r * WIDTH + c]
for neighbor, level in get_neighbors(data, exploring):
if level in known:
continue
if level > current and level not in (EMPTY, 9):
to_explore.add(neighbor)
basins[low_point] = len(known)
return reduce(
operator.mul,
map(itemgetter(1), basins.most_common(3))
)
if __name__ == '__main__':
print(main())
| 26.340426 | 91 | 0.560582 |
42ff0390633d326bb027aa10d5b16efa20802940 | 1,343 | py | Python | tests/test_window.py | yogeshkumarpilli/detectron2 | f4f276dc8239b2c5a1bbbf6ed234acd25c75a522 | [
"Apache-2.0"
] | null | null | null | tests/test_window.py | yogeshkumarpilli/detectron2 | f4f276dc8239b2c5a1bbbf6ed234acd25c75a522 | [
"Apache-2.0"
] | null | null | null | tests/test_window.py | yogeshkumarpilli/detectron2 | f4f276dc8239b2c5a1bbbf6ed234acd25c75a522 | [
"Apache-2.0"
] | 3 | 2021-12-17T04:28:02.000Z | 2022-02-22T18:18:03.000Z | from detectron2.engine import DefaultPredictor
from detectron2.data import MetadataCatalog
from detectron2.config import get_cfg
from detectron2.utils.visualizer import ColorMode, Visualizer
from detectron2 import model_zoo
import cv2
import numpy as np
import requests
# Load an image
res = requests.get("https://thumbor.forbes.com/thumbor/fit-in/1200x0/filters%3Aformat%28jpg%29/https%3A%2F%2Fspecials-images.forbesimg.com%2Fimageserve%2F5f15af31465263000625ce08%2F0x0.jpg")
image = np.asarray(bytearray(res.content), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
config_file = 'COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml'
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(config_file))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.75 # Threshold
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(config_file)
cfg.MODEL.DEVICE = "cuda" # cpu or cuda
# Create predictor
predictor = DefaultPredictor(cfg)
# Make prediction
output = predictor(image)
print(output)
v = Visualizer(image[:, :, ::-1],
scale=0.8,
metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
instance_mode=ColorMode.IMAGE
)
v = v.draw_instance_predictions(output["instances"].to("cpu"))
cv2.imshow('images', v.get_image()[:, :, ::-1])
cv2.waitKey(0) | 37.305556 | 191 | 0.737156 |
42ff644535c1107deafd0fab424dd9161db0897b | 9,920 | py | Python | hydra/cli.py | albertoa/hydra | 8161e75829e4e76cb91ce516bbf03c258a87ce9e | [
"Apache-2.0"
] | 28 | 2020-11-05T16:04:51.000Z | 2021-02-16T22:58:10.000Z | hydra/cli.py | albertoa/hydra | 8161e75829e4e76cb91ce516bbf03c258a87ce9e | [
"Apache-2.0"
] | 43 | 2020-11-06T19:21:39.000Z | 2021-02-25T19:04:42.000Z | hydra/cli.py | albertoa/hydra | 8161e75829e4e76cb91ce516bbf03c258a87ce9e | [
"Apache-2.0"
] | 4 | 2020-11-06T08:54:57.000Z | 2021-01-18T03:26:00.000Z | import os
import yaml
import json
import click
import hydra.utils.constants as const
from hydra.utils.git import check_repo
from hydra.utils.utils import dict_to_string, inflate_options
from hydra.cloud.local_platform import LocalPlatform
from hydra.cloud.fast_local_platform import FastLocalPlatform
from hydra.cloud.google_cloud_platform import GoogleCloudPlatform
from hydra.cloud.aws_platform import AWSPlatform
from hydra.version import __version__
| 44.684685 | 200 | 0.674698 |
6e001fac10af046d03ee8754375ce8c560a47171 | 773 | py | Python | _estudoPython_solid/requests/request.py | carlos-freitas-gitHub/Python_Analise_De_Dados | 74a72772179f45684f4f12acd4ad607c99ed8107 | [
"Apache-2.0"
] | null | null | null | _estudoPython_solid/requests/request.py | carlos-freitas-gitHub/Python_Analise_De_Dados | 74a72772179f45684f4f12acd4ad607c99ed8107 | [
"Apache-2.0"
] | null | null | null | _estudoPython_solid/requests/request.py | carlos-freitas-gitHub/Python_Analise_De_Dados | 74a72772179f45684f4f12acd4ad607c99ed8107 | [
"Apache-2.0"
] | null | null | null | '''requests
biblioteca beaultiful solp para pginas web.
'''
from builtins import print
import requests
'''compartilhando o cabeario http, vem junto com requesio
cabecalho = {'User-agent': 'Windows 12',
'Referer': 'https://google.com.br'}
meus_cookies = {'Ultima-visita': '10-10-2020'}
meus_dados = {'Username': 'Guigui',
'Password': '12345'}
headers=cabecalho, cookies=meus_cookies, data=meus_dados
'''
try:
'''passar estes dados somente via post'''
requisicao = requests.post('http://uniesp.edu.br/sites/maua/')
status = requisicao.status_code
text = requisicao.text
except Exception as err:
print('Erro', err)
print('+=' *30)
print('Status:', status)
print('+=' *30)
print(text)
| 28.62963 | 67 | 0.641656 |
6e01596134dc9f1610c5e8f76e5d30c43961114c | 23,738 | py | Python | Tac Tac Toe/ttt_mobile_1080p.py | promitbasak/TicTacToe-Pygame | 6114cee9498d70942f48a0b6eb360f02bcf72df0 | [
"MIT"
] | 3 | 2020-06-15T13:50:51.000Z | 2021-08-18T05:10:17.000Z | Tac Tac Toe/ttt_mobile_1080p.py | promitbasak/TicTacToe-Pygame | 6114cee9498d70942f48a0b6eb360f02bcf72df0 | [
"MIT"
] | null | null | null | Tac Tac Toe/ttt_mobile_1080p.py | promitbasak/TicTacToe-Pygame | 6114cee9498d70942f48a0b6eb360f02bcf72df0 | [
"MIT"
] | 1 | 2020-06-15T13:52:49.000Z | 2020-06-15T13:52:49.000Z | import pygame
import random
import sys
Xfactor = 1.35
Yfactor = 3.2
CELLS = 9
PLAYERS = 2
CORNERS = [1, 3, 7, 9]
NON_CORNERS = [2, 4, 6, 8]
board = {}
for i in range(9):
board[i + 1] = 0
signs = {0: " ", 1: "X", 2: "O"}
winner = None
boardX = 10
boardY = 464
icon = pygame.image.load("ttticon2.png")
pygame.display.set_icon(icon)
fpsClock = pygame.time.Clock()
boardimg = pygame.image.load("board3dr.png")
crossimg = pygame.image.load("cross3dr.png")
roundimg = pygame.image.load("cuber.png")
bannerimg = pygame.image.load("tttbannerr.png")
winimg = pygame.image.load("winsmallr.png")
loseimg = pygame.image.load("losesmallr.png")
drawimg = pygame.image.load("drawsmallr.png")
markerimg = pygame.image.load("markerr.png")
diffimg = pygame.image.load("difficultyr.png")
backimg = pygame.image.load("backr.png")
clickimg = pygame.image.load("clickr.png")
def getemptycells():
return [i for i in range(1, 10) if board[i] == 0]
def cellvalidator(cell):
if board[cell] == 0:
return cell
else:
# print(f"Cell {cell} is occupied!!!")
raise Exception()
def getadjacentcorners(cell):
adjacent = CORNERS[:]
adjacent.remove(cell)
adjacent.remove(CELLS + 1 - cell)
return adjacent
def getadjacentcells(cell):
if cell < 5:
return [cell * 2, 5 - cell]
else:
return [15 - cell, cell - 1]
def solve():
for i in range(3):
if board[i * 3 + 1] == board[i * 3 + 2] == board[i * 3 + 3] and board[i * 3 + 1] != 0:
return board[i * 3 + 1]
elif board[i + 1] == board[i + 4] == board[i + 7] and board[i + 1] != 0:
return board[i + 1]
if board[1] == board[5] == board[9] and board[1] != 0:
return board[1]
elif board[3] == board[5] == board[7] and board[3] != 0:
return board[3]
try:
list(board.values()).index(0)
except:
return -1
return None
def marker(cell, mark):
if 1 > cell > 10:
print(f"Cell: {cell} not exist!!!")
raise Exception()
elif board[cell] != 0:
print(f"Cell: {cell} is occupied!!!")
raise Exception()
else:
board[cell] = mark
def getinput():
begin = True
key = None
while begin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
key = keytonum(pygame.mouse.get_pos())
if key:
begin = False
if event.type == pygame.KEYDOWN:
key = keytonum(event.key)
if key:
begin = False
pygame.display.update()
showboard()
return key
def getwinner(winner):
begin = True
showboard()
while begin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
begin = False
if event.type == pygame.KEYDOWN:
begin = False
if winner == -1:
screen.blit(drawimg, (245, 15))
elif winner == human.mark:
screen.blit(winimg, (245, 15))
else:
screen.blit(loseimg, (245, 15))
screen.blit(clickimg, (332, 1600))
pygame.display.update()
fpsClock.tick(30)
board = {}
for i in range(9):
board[i + 1] = 0
winner = None
return board, winner
def headline():
begin1 = True
begin2 = True
mark = None
while begin1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
begin1 = False
if event.type == pygame.KEYDOWN:
begin1 = False
screen.blit(bannerimg, (0, 0))
pygame.display.update()
fpsClock.tick(30)
while begin2:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
x, y = pygame.mouse.get_pos()
if 212 <= x <= 552 and 924 <= y <= 1376:
mark = 1
begin2 = False
if 584 <= x <= 916 and 924 <= y <= 1376:
mark = 2
begin2 = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_1:
mark = 1
begin2 = False
if event.key == pygame.K_2:
mark = 2
begin2 = False
screen.blit(markerimg, (0, 0))
pygame.display.update()
fpsClock.tick(30)
return mark
def init():
begin = True
diff = None
while begin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
x, y = pygame.mouse.get_pos()
if 148 <= x <= 548 and 1008 <= y <= 1288:
diff = 1
begin = False
if 592 <= x <= 988 and 1008 <= y <= 1288:
diff = 2
begin = False
if 148 <= x <= 548 and 1376 <= y <= 1648:
diff = 3
begin = False
if 592 <= x <= 988 and 1376 <= y <= 1648:
diff = 4
begin = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_1:
diff = 1
begin = False
if event.key == pygame.K_2:
diff = 2
begin = False
if event.key == pygame.K_3:
diff = 3
begin = False
if event.key == pygame.K_4:
diff = 4
begin = False
screen.blit(diffimg, (0, 0))
pygame.display.update()
fpsClock.tick(30)
return diff
def showboard():
screen.blit(backimg, (0, 0))
screen.blit(boardimg, (boardX, boardY))
for i in range(1, 10):
if board[i]:
putmark(i, board[i])
pygame.display.update()
def putmark(num, sign):
markX = boardX + 30 + (num - 1) % 3 * 365
markY = boardY + 30 + (num - 1) // 3 * 500
if sign == 1:
screen.blit(crossimg, (markX, markY))
elif sign == 2:
screen.blit(roundimg, (markX, markY))
else:
print("Invalid Sign!")
def keytonum(key):
if isinstance(key, tuple):
x, y = key
if 484 <= y <= 900:
if 24 <= x <= 348:
if not board[1]:
return 1
if 400 <= x <= 696:
if not board[2]:
return 2
if 748 <= x <= 1044:
if not board[3]:
return 3
if 968 <= y <= 1360:
if 24 <= x <= 348:
if not board[4]:
return 4
if 400 <= x <= 696:
if not board[5]:
return 5
if 748 <= x <= 1044:
if not board[6]:
return 6
if 1432 <= y <= 1840:
if 24 <= x <= 348:
if not board[7]:
return 7
if 400 <= x <= 696:
if not board[8]:
return 8
if 748 <= x <= 1044:
if not board[9]:
return 9
else:
if key == pygame.K_1:
if not board[1]:
return 1
elif key == pygame.K_2:
if not board[2]:
return 2
elif key == pygame.K_3:
if not board[3]:
return 3
elif key == pygame.K_4:
if not board[4]:
return 4
elif key == pygame.K_5:
if not board[5]:
return 5
elif key == pygame.K_6:
if not board[6]:
return 6
elif key == pygame.K_7:
if not board[7]:
return 7
elif key == pygame.K_8:
if not board[8]:
return 8
elif key == pygame.K_9:
if not board[9]:
return 9
pygame.init()
screen = pygame.display.set_mode((1080, 1920))
pygame.display.set_caption("TicTacToe", "tic-tac-toe.png")
screen.fill((20, 50, 80))
key = None
running = True
mark = headline()
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
running2 = False
sys.exit()
diff = init()
for i in range(100):
showboard()
running2 = True
human = user("You", mark)
if diff == 1:
comp = easy("Computer", mark % 2 + 1)
elif diff == 2:
comp = medium("Computer", mark % 2 + 1)
elif diff == 3:
comp = hard("Computer", mark % 2 + 1)
else:
comp = deadly("Computer", mark % 2 + 1)
if random.randint(0, 1):
players = [human, comp]
else:
players = [comp, human]
while running2:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
running2 = False
sys.exit()
showboard()
for p in players:
pygame.display.update()
showboard()
marker(p.getturn(), p.mark)
winner = solve()
if winner:
break
if winner:
running2 = False
board, winner = getwinner(winner)
pygame.display.update()
showboard()
pygame.display.update()
fpsClock.tick(30)
| 32.742069 | 95 | 0.433566 |
6e0596f60ea2aacca4a2e542940c06bbc4f394b7 | 25,458 | py | Python | utils/dataset_utils.py | Daipuwei/YOLO-tf2 | 1b2e7133c99507573f419c8a367a8dba4abeae5b | [
"MIT"
] | null | null | null | utils/dataset_utils.py | Daipuwei/YOLO-tf2 | 1b2e7133c99507573f419c8a367a8dba4abeae5b | [
"MIT"
] | null | null | null | utils/dataset_utils.py | Daipuwei/YOLO-tf2 | 1b2e7133c99507573f419c8a367a8dba4abeae5b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2021/9/18 11:23
# @Author : DaiPuWei
# @Email : 771830171@qq.com
# @File : dataset_utils.py
# @Software: PyCharm
"""
YOLO
"""
import cv2
import numpy as np
from PIL import Image
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
from utils.model_utils import get_classes
from utils.model_utils import get_anchors
def resize_keep_aspect_ratio(image_src, dst_size, value=[128, 128, 128]):
'''
opencv
Args:
image_src:
dst_size:
value:
Returns:
'''
#
src_h, src_w, _ = np.shape(image_src)
dst_h, dst_w = dst_size
#
if src_h < src_w: # h
delta = src_w - src_h #
top = int(delta // 2)
down = delta - top
left = 0
right = 0
else: # w
delta = src_h - src_w #
top = 0
down = 0
left = int(delta // 2)
right = delta - left
borderType = cv2.BORDER_CONSTANT
image_dst = cv2.copyMakeBorder(image_src, top, down, left, right, borderType, None, value)
image_dst = cv2.resize(image_dst, dst_size)
return image_dst
def letterbox_image(image, size):
'''
PIL
Args:
image:
size:
Returns:
'''
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image | 39.902821 | 112 | 0.439351 |
6e080db2602e0c90c09249fc8d6eeaeabeabd005 | 750 | py | Python | caesar_cipher.py | DomirScire/Basic_Ciphers | 7425b306f8d0ce9ceb5ba3a59e73a52892bee5ca | [
"MIT"
] | 1 | 2021-03-31T23:29:00.000Z | 2021-03-31T23:29:00.000Z | caesar_cipher.py | DomirScire/Ciphers_Py | 127c82b14c9bd5595f924bc267b6bf238f654c22 | [
"MIT"
] | null | null | null | caesar_cipher.py | DomirScire/Ciphers_Py | 127c82b14c9bd5595f924bc267b6bf238f654c22 | [
"MIT"
] | null | null | null | import string
if __name__ == "__main__":
print(caesar_cipher("meetMeAtOurHideOutAtTwo", 10))
print(caesar_cipher("woodWoKdYebRsnoYedKdDgy", 10, decrypt=True))
| 27.777778 | 70 | 0.630667 |
6e0977041deef6fa7bf74e2fadd3b0a89bcf73e3 | 6,953 | py | Python | hume/hume/app.py | megacorpincorporated/HOME | 0eb8009b028fabf64abb03acc0a081b2b8207eb0 | [
"MIT"
] | 1 | 2018-02-18T15:51:57.000Z | 2018-02-18T15:51:57.000Z | hume/hume/app.py | megacorpincorporated/HOME | 0eb8009b028fabf64abb03acc0a081b2b8207eb0 | [
"MIT"
] | null | null | null | hume/hume/app.py | megacorpincorporated/HOME | 0eb8009b028fabf64abb03acc0a081b2b8207eb0 | [
"MIT"
] | null | null | null | import json
import logging
from app.abc import StartError
from app.device import DeviceApp, DeviceMessage
from app.device.models import Device
from app.hint import HintApp
from app.hint.defs import HintMessage
from util.storage import DataStore
LOGGER = logging.getLogger(__name__)
| 38.414365 | 78 | 0.576154 |
6e0c62be30176a8297c1bf84eb84e82bffd0d9ee | 3,281 | py | Python | scripts/generate_demo_requests.py | onedata/onezone-gui-plugin-ecrin | 2bf38b0994d1c0bf8148b1b8c5990bcf0aa4a62b | [
"MIT"
] | null | null | null | scripts/generate_demo_requests.py | onedata/onezone-gui-plugin-ecrin | 2bf38b0994d1c0bf8148b1b8c5990bcf0aa4a62b | [
"MIT"
] | null | null | null | scripts/generate_demo_requests.py | onedata/onezone-gui-plugin-ecrin | 2bf38b0994d1c0bf8148b1b8c5990bcf0aa4a62b | [
"MIT"
] | null | null | null | #
# Author: Micha Borzcki
#
# This script creates empty files with study and data object metadata in
# specified space and Oneprovider. It uses JSON files located in directories
# `studies_dir` (= studies) and `data_object_dir` (= data_objects). Positional
# arguments:
# 1. Oneprovider location (IP address or domain).
# 2. Space name (it must be supported by passed Oneprovider).
# 3. Access token (can be obtained via Onezone).
# 4. Number of files metadata to upload ("100" means 100 studies and 100 data
# objects)
# 5. Name of a directory (in space), where files with metadata should be
# uploaded. Warning: if that directory already exists, it will be removed.
# Example of usage:
# python3 generate_demo_requests.py 172.17.0.16 s1 MDAzMvY...ZlOGCg 1000 ecrin1
#
# Example studies and data objects can be found at
# https://github.com/beatmix92/ct.gov_updated
#
import os
import sys
import subprocess
import json
from natsort import natsorted
provider = sys.argv[1]
space = sys.argv[2]
token = sys.argv[3]
files = int(sys.argv[4])
directory = sys.argv[5]
studies_dir = 'studies'
data_object_dir = 'data_objects'
FNULL = open(os.devnull, 'w')
curl = [
'curl',
'-k',
'-H', 'X-Auth-Token: ' + token,
'-H', 'X-CDMI-Specification-Version: 1.1.1',
'-H', 'Content-Type: application/cdmi-container',
'-X', 'DELETE',
'https://' + provider + '/cdmi/' + space + '/' + directory + '/'
]
remove_dir_proc = subprocess.Popen(curl, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
remove_dir_proc.wait()
curl = [
'curl',
'-k',
'-H', 'X-Auth-Token: ' + token,
'-H', 'X-CDMI-Specification-Version: 1.1.1',
'-H', 'Content-Type: application/cdmi-container',
'-X', 'PUT',
'https://' + provider + '/cdmi/' + space + '/' + directory + '/'
]
create_dir_proc = subprocess.Popen(curl, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
create_dir_proc.wait()
processes = []
for source in [studies_dir, data_object_dir]:
index = 0
for (dirpath, _, filenames) in os.walk(source):
filenames = natsorted(filenames)
for filename in filenames[:files]:
path = dirpath + '/' + filename
with open(path, 'r') as json_file:
metadata = json_file.read()
metadata_json = json.loads(metadata)
if metadata_json['object_type'] == 'study':
linked_data_objects = metadata_json['linked_data_objects']
start_id = linked_data_objects[0]['id']
for i in range(1, 20):
linked_data_objects.append({ 'id': start_id + i })
else:
related_studies = metadata_json['related_studies']
start_id = related_studies[0]['id']
for i in range(1, 20):
related_studies.append({ 'id': start_id - i })
curl = [
'curl',
'-k',
'-H', 'X-Auth-Token: ' + token,
'-H', 'X-CDMI-Specification-Version: 1.1.1',
'-H', 'Content-Type: application/cdmi-object',
'-X', 'PUT',
'-d', '{"metadata": {"onedata_json": ' + json.dumps(metadata_json) + '}}',
'https://' + provider + '/cdmi/' + space + '/' + directory + '/' + filename
]
processes.append(subprocess.Popen(curl, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL))
for proc in processes:
proc.wait()
| 33.824742 | 102 | 0.643401 |
6e0cbccdccc4307ec0cd8efe2c3cb65f9c612951 | 1,925 | py | Python | backend/routes/user.py | mradzikowski/flask-trackerproductivity | 029103b80e21b6c64801816fe8dc27585317cb02 | [
"MIT"
] | null | null | null | backend/routes/user.py | mradzikowski/flask-trackerproductivity | 029103b80e21b6c64801816fe8dc27585317cb02 | [
"MIT"
] | null | null | null | backend/routes/user.py | mradzikowski/flask-trackerproductivity | 029103b80e21b6c64801816fe8dc27585317cb02 | [
"MIT"
] | null | null | null | from flask import jsonify, request
import backend.services.user as user_services
from . import bp
| 27.112676 | 81 | 0.628052 |
6e0cf115db4bb95a08b1d4ece55fa11c8d6418e1 | 222 | py | Python | src/mot/motion_models/__init__.py | neer201/Multi-Object-Tracking-for-Automotive-Systems-in-python | 886cd9e87283982381713dbf2e4ef695030f81de | [
"Apache-2.0"
] | 6 | 2021-11-21T10:47:01.000Z | 2022-03-17T01:14:53.000Z | src/mot/motion_models/__init__.py | neer201/Multi-Object-Tracking-for-Automotive-Systems-in-python | 886cd9e87283982381713dbf2e4ef695030f81de | [
"Apache-2.0"
] | 3 | 2021-04-12T12:37:41.000Z | 2021-04-30T14:29:53.000Z | src/mot/motion_models/__init__.py | neer201/Multi-Object-Tracking-for-Automotive-Systems-in-python | 886cd9e87283982381713dbf2e4ef695030f81de | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
from mot.motion_models.base_motion_model import MotionModel
from mot.motion_models.CT_motion_model import CoordinateTurnMotionModel
from mot.motion_models.CV_motion_model import ConstantVelocityMotionModel
| 37 | 73 | 0.891892 |
6e0db8ed1374b74b17dc4c64dad644332a33ce07 | 7,205 | py | Python | src/modu/editable/datatypes/date.py | philchristensen/modu | 795f3bc413956b98522ac514dafe35cbab0d57a3 | [
"MIT"
] | null | null | null | src/modu/editable/datatypes/date.py | philchristensen/modu | 795f3bc413956b98522ac514dafe35cbab0d57a3 | [
"MIT"
] | null | null | null | src/modu/editable/datatypes/date.py | philchristensen/modu | 795f3bc413956b98522ac514dafe35cbab0d57a3 | [
"MIT"
] | null | null | null | # modu
# Copyright (c) 2006-2010 Phil Christensen
# http://modu.bubblehouse.org
#
#
# See LICENSE for details
"""
Datatypes for managing stringlike data.
"""
import time, datetime
from zope.interface import implements
from modu.editable import IDatatype, define
from modu.util import form, tags, date
from modu.persist import sql
from modu import persist, assets
DAY = 86400
MONTH = DAY * 31
YEAR = DAY * 365
| 28.82 | 106 | 0.658015 |
6e0dc799717432679f99b12ed1cdbf0dbbf71f58 | 829 | py | Python | calculator.py | MateusLinharesDeAelencarLima/Calculator | 44e836aa92fd76d21b4c5f0edfcb5419886f1df6 | [
"CC0-1.0"
] | null | null | null | calculator.py | MateusLinharesDeAelencarLima/Calculator | 44e836aa92fd76d21b4c5f0edfcb5419886f1df6 | [
"CC0-1.0"
] | 1 | 2021-09-10T21:13:16.000Z | 2021-09-23T16:13:08.000Z | calculator.py | MateusLinharesDeAelencarLima/Calculator | 44e836aa92fd76d21b4c5f0edfcb5419886f1df6 | [
"CC0-1.0"
] | null | null | null | from functions.summation import summation
from functions.subtraction import subtraction
from functions.multiplication import multiplication
from functions.division import division
from functions.exponential import exponential
from functions.root import root
num1 = float(input('nmero 1: '))
num2 = float(input('nmero 2: '))
operation_1 = summation(num1, num2)
operation_2 = subtraction(num1, num2)
operation_3 = multiplication(num1, num2)
operation_4 = division(num1, num2)
operation_5 = exponential(num1, num2)
operation_6 = root(num1, num2)
print("A soma dos nmeros :", operation_1)
print("A diferena dos nmeros :", operation_2)
print("O produto dos nmeros :", operation_3)
print("O quociente dos nmeros :", operation_4)
print("A potncia dos nmeros :", operation_5)
print("A raiz dos nmeros :", operation_6)
| 34.541667 | 51 | 0.784077 |
6e0f3ad7fb4aa74ebb70351b2ab8036b7bfa68b3 | 2,949 | py | Python | tests.py | suetAndTie/ekho | fbf8a19e1babc3fc0f11220ec9440a7f05f4bfcd | [
"MIT"
] | 1 | 2019-01-31T19:17:01.000Z | 2019-01-31T19:17:01.000Z | tests.py | suetAndTie/ekho | fbf8a19e1babc3fc0f11220ec9440a7f05f4bfcd | [
"MIT"
] | null | null | null | tests.py | suetAndTie/ekho | fbf8a19e1babc3fc0f11220ec9440a7f05f4bfcd | [
"MIT"
] | null | null | null | ! pip install -q librosa nltk
import torch
import numpy as np
import librosa
import librosa.display
import IPython
from IPython.display import Audio
# need this for English text processing frontend
import nltk
! python -m nltk.downloader cmudict
preset = "20180505_deepvoice3_ljspeech.json"
checkpoint_path = "20180505_deepvoice3_checkpoint_step000640000.pth"
if not exists(preset):
!curl -O -L "https://www.dropbox.com/s/0ck82unm0bo0rxd/20180505_deepvoice3_ljspeech.json"
if not exists(checkpoint_path):
!curl -O -L "https://www.dropbox.com/s/5ucl9remrwy5oeg/20180505_deepvoice3_checkpoint_step000640000.pth"
import hparams
import json
# Load parameters from preset
with open(preset) as f:
hparams.hparams.parse_json(f.read())
# Inject frontend text processor
import synthesis
import train
from deepvoice3_pytorch import frontend
synthesis._frontend = getattr(frontend, "en")
train._frontend = getattr(frontend, "en")
# alises
fs = hparams.hparams.sample_rate
hop_length = hparams.hparams.hop_size
from train import build_model
from train import restore_parts, load_checkpoint
model = build_model()
model = load_checkpoint(checkpoint_path, model, None, True)
# Try your favorite senteneces:)
texts = [
"Scientists at the CERN laboratory say they have discovered a new particle.",
"There's a way to measure the acute emotional intelligence that has never gone out of style.",
"President Trump met with other leaders at the Group of 20 conference.",
"The Senate's bill to repeal and replace the Affordable Care Act is now imperiled.",
"Generative adversarial network or variational auto-encoder.",
"The buses aren't the problem, they actually provide a solution.",
"peter piper picked a peck of pickled peppers how many peppers did peter piper pick.",
"Some have accepted this as a miracle without any physical explanation.",
]
for idx, text in enumerate(texts):
print(idx, text)
tts(model, text, figures=False)
# With attention plot
text = "Generative adversarial network or variational auto-encoder."
tts(model, text, figures=True)
| 32.406593 | 106 | 0.758901 |
6e10c0ea90829d65558f7e100bd54ed82664fe76 | 405 | py | Python | lib/utils/checks.py | Matt-cloud/Discord.py-Template | 4b2ac9f0897bb44dfd799d821e536fc34ef3064e | [
"MIT"
] | null | null | null | lib/utils/checks.py | Matt-cloud/Discord.py-Template | 4b2ac9f0897bb44dfd799d821e536fc34ef3064e | [
"MIT"
] | null | null | null | lib/utils/checks.py | Matt-cloud/Discord.py-Template | 4b2ac9f0897bb44dfd799d821e536fc34ef3064e | [
"MIT"
] | null | null | null | from discord.ext import commands
from lib import exceptions
import os
import json
configFile = os.path.join(os.getcwd(), "data", "config.json")
with open(configFile, "rb") as f:
config = json.load(f)
| 22.5 | 61 | 0.681481 |
6e11308aa80bc676e3ca2d21a4edcb18f890e752 | 1,649 | py | Python | envs/fetch/interval.py | malikasng/Bbox_HGG_with_CTR_and_RRTstarFND | 2b1aae6c347f544fefface0c9f26dc4ecde51108 | [
"MIT"
] | 1 | 2020-09-16T06:15:17.000Z | 2020-09-16T06:15:17.000Z | envs/fetch/interval.py | malikasng/Bbox_HGG_with_CTR_and_RRTstarFND | 2b1aae6c347f544fefface0c9f26dc4ecde51108 | [
"MIT"
] | 5 | 2020-09-26T01:30:01.000Z | 2022-01-13T03:15:42.000Z | envs/fetch/interval.py | malikasng/Bbox_HGG_with_CTR_and_RRTstarFND | 2b1aae6c347f544fefface0c9f26dc4ecde51108 | [
"MIT"
] | null | null | null | import gym
import numpy as np
from torchvision.utils import save_image
from .fixobj import FixedObjectGoalEnv
| 37.477273 | 149 | 0.726501 |
6e11fb05adb494991b86d4b22a22f936a7c8a876 | 1,908 | py | Python | cactusbot/commands/magic/alias.py | CactusBot/CactusBot | 6d035bf74bdc8f7fb3ee1e79f8d443f5b17e7ea5 | [
"MIT"
] | 23 | 2016-02-16T05:09:11.000Z | 2016-09-20T14:22:51.000Z | cactusbot/commands/magic/alias.py | Alkali-Metal/CactusBot | 6d035bf74bdc8f7fb3ee1e79f8d443f5b17e7ea5 | [
"MIT"
] | 190 | 2016-09-30T05:31:59.000Z | 2018-12-22T08:46:49.000Z | cactusbot/commands/magic/alias.py | Alkali-Metal/CactusBot | 6d035bf74bdc8f7fb3ee1e79f8d443f5b17e7ea5 | [
"MIT"
] | 16 | 2016-10-09T16:51:48.000Z | 2017-10-25T05:29:10.000Z | """Alias command."""
from . import Command
from ...packets import MessagePacket
| 32.338983 | 78 | 0.545597 |
6e13a8102a55ae649fda3dcfedbae946ebff32c0 | 2,828 | py | Python | explorer/util.py | brianhouse/rlab | 4d878abd2299fd340a645ebd8b92a68c2b48f41e | [
"MIT"
] | null | null | null | explorer/util.py | brianhouse/rlab | 4d878abd2299fd340a645ebd8b92a68c2b48f41e | [
"MIT"
] | null | null | null | explorer/util.py | brianhouse/rlab | 4d878abd2299fd340a645ebd8b92a68c2b48f41e | [
"MIT"
] | null | null | null | import numpy as np
def normalize(signal, minimum=None, maximum=None):
"""Normalize a signal to the range 0, 1. Uses the minimum and maximum observed in the data unless explicitly passed."""
signal = np.array(signal).astype('float')
if minimum is None:
minimum = np.min(signal)
if maximum is None:
maximum = np.max(signal)
signal -= minimum
maximum -= minimum
signal /= maximum
signal = np.clip(signal, 0.0, 1.0)
return signal
def resample(ts, values, num_samples):
"""Convert a list of times and a list of values to evenly spaced samples with linear interpolation"""
assert np.all(np.diff(ts) > 0)
ts = normalize(ts)
return np.interp(np.linspace(0.0, 1.0, num_samples), ts, values)
def smooth(signal, size=10, window='blackman'):
"""Apply weighted moving average (aka low-pass filter) via convolution function to a signal"""
signal = np.array(signal)
if size < 3:
return signal
s = np.r_[2 * signal[0] - signal[size:1:-1], signal, 2 * signal[-1] - signal[-1:-size:-1]]
w = np.ones(size,'d')
y = np.convolve(w / w.sum(), s, mode='same')
return y[size - 1:-size + 1]
def detect_peaks(signal, lookahead=10, delta=0):
""" Detect the local maximas and minimas in a signal
lookahead -- samples to look ahead from a potential peak to see if a bigger one is coming
delta -- minimum difference between a peak and surrounding points to be considered a peak (no hills) and makes things faster
Note: careful if you have flat regions, may affect lookahead
"""
signal = np.array(signal)
peaks = []
valleys = []
min_value, max_value = np.Inf, -np.Inf
for index, value in enumerate(signal[:-lookahead]):
if value > max_value:
max_value = value
max_pos = index
if value < min_value:
min_value = value
min_pos = index
if value < max_value - delta and max_value != np.Inf:
if signal[index:index + lookahead].max() < max_value:
peaks.append([max_pos, max_value])
drop_first_peak = True
max_value = np.Inf
min_value = np.Inf
if index + lookahead >= signal.size:
break
continue
if value > min_value + delta and min_value != -np.Inf:
if signal[index:index + lookahead].min() > min_value:
valleys.append([min_pos, min_value])
drop_first_valley = True
min_value = -np.Inf
max_value = -np.Inf
if index + lookahead >= signal.size:
break
return peaks, valleys | 40.985507 | 132 | 0.597242 |
6e14c71363bc33135f20b63aec47306b9531737a | 2,839 | py | Python | dooly/converters/kobart_utils.py | jinmang2/DOOLY | 961c7b43b06dffa98dc8a39e72e417502e89470c | [
"Apache-2.0"
] | 17 | 2022-03-06T05:06:14.000Z | 2022-03-31T00:25:06.000Z | dooly/converters/kobart_utils.py | jinmang2/DOOLY | 961c7b43b06dffa98dc8a39e72e417502e89470c | [
"Apache-2.0"
] | 6 | 2022-03-27T18:18:40.000Z | 2022-03-31T17:35:34.000Z | dooly/converters/kobart_utils.py | jinmang2/DOOLY | 961c7b43b06dffa98dc8a39e72e417502e89470c | [
"Apache-2.0"
] | 1 | 2022-03-31T13:07:41.000Z | 2022-03-31T13:07:41.000Z | import os
import sys
import hashlib
import importlib
if is_available_boto3():
import boto3
from botocore import UNSIGNED
from botocore.client import Config
else:
raise ModuleNotFoundError("Please install boto3 with: `pip install boto3`.")
| 31.898876 | 80 | 0.610426 |
6e154f31690fe2c1e126dc21483f4d1d4a667900 | 348 | py | Python | Python_Files/murach/book_apps/ch13/factorial_recursion.py | Interloper2448/BCGPortfolio | c4c160a835c64c8d099d44c0995197f806ccc824 | [
"MIT"
] | null | null | null | Python_Files/murach/book_apps/ch13/factorial_recursion.py | Interloper2448/BCGPortfolio | c4c160a835c64c8d099d44c0995197f806ccc824 | [
"MIT"
] | null | null | null | Python_Files/murach/book_apps/ch13/factorial_recursion.py | Interloper2448/BCGPortfolio | c4c160a835c64c8d099d44c0995197f806ccc824 | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
main()
| 20.470588 | 39 | 0.514368 |
6e15e9506e9a75c167124e23e066dc0069217190 | 1,565 | py | Python | tests/uv/util/test_env.py | hartikainen/uv-metrics | 7b47b8ce1dff5fc41cdd540f816ea41a0cd27c21 | [
"ECL-2.0",
"Apache-2.0"
] | 9 | 2020-06-17T17:33:05.000Z | 2022-03-30T17:32:05.000Z | tests/uv/util/test_env.py | hartikainen/uv-metrics | 7b47b8ce1dff5fc41cdd540f816ea41a0cd27c21 | [
"ECL-2.0",
"Apache-2.0"
] | 28 | 2020-06-16T18:32:08.000Z | 2020-11-12T17:51:20.000Z | tests/uv/util/test_env.py | hartikainen/uv-metrics | 7b47b8ce1dff5fc41cdd540f816ea41a0cd27c21 | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2020-08-07T20:05:49.000Z | 2021-10-21T01:43:00.000Z | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uv.util.env as ue
| 29.528302 | 78 | 0.705431 |
6e1651dd40e1ae6c43644b4a77456f4eb701c53a | 1,054 | py | Python | models/fleet.py | gnydick/qairon | e67af1f88ac6c614ae33adc4f42ab2ec3cc5b257 | [
"MIT"
] | null | null | null | models/fleet.py | gnydick/qairon | e67af1f88ac6c614ae33adc4f42ab2ec3cc5b257 | [
"MIT"
] | null | null | null | models/fleet.py | gnydick/qairon | e67af1f88ac6c614ae33adc4f42ab2ec3cc5b257 | [
"MIT"
] | null | null | null | from sqlalchemy import *
from sqlalchemy.orm import relationship
from db import db
def __update_id__(fleet):
fleet.id = ':'.join([fleet.deployment_target_id, fleet.fleet_type_id, fleet.name])
| 30.114286 | 89 | 0.736243 |
6e17097d88bd49914581f2dfe02ed8fa34bee9d4 | 254 | py | Python | backend/authentication/admin.py | jklewis99/hypertriviation | e12be87e978505fb3a73f4fc606173f41a3aee81 | [
"MIT"
] | 1 | 2022-03-27T19:39:07.000Z | 2022-03-27T19:39:07.000Z | backend/authentication/admin.py | jklewis99/hypertriviation | e12be87e978505fb3a73f4fc606173f41a3aee81 | [
"MIT"
] | 5 | 2022-03-27T19:32:54.000Z | 2022-03-31T23:25:44.000Z | backend/authentication/admin.py | jklewis99/hypertriviation | e12be87e978505fb3a73f4fc606173f41a3aee81 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import HypertriviationUser
# Register your models here.
admin.site.register(HypertriviationUser, HypertriviationUserAdmin) | 25.4 | 66 | 0.838583 |
6e1773f3e2177f91fdf46e022af55af83edbbcb5 | 1,568 | py | Python | logs/followup_email.py | vreyespue/Movie_Bot | 192c74be62afcfda77a0984ff4da3014226c3432 | [
"Apache-2.0"
] | 26 | 2019-02-04T04:55:09.000Z | 2021-09-22T14:58:46.000Z | logs/followup_email.py | vreyespue/Movie_Bot | 192c74be62afcfda77a0984ff4da3014226c3432 | [
"Apache-2.0"
] | 2 | 2019-05-07T16:33:09.000Z | 2021-02-13T18:25:35.000Z | logs/followup_email.py | vreyespue/Movie_Bot | 192c74be62afcfda77a0984ff4da3014226c3432 | [
"Apache-2.0"
] | 27 | 2018-12-10T12:13:50.000Z | 2020-10-11T17:43:22.000Z | ###################################################################
######## Follow up email #############
###################################################################
"""
followup_email.py
This is special use case code written to assist bot developers. It consolidates topics that are not familiar to the bot
and sends it in a nicely formatted email to the developers team.
"""
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
import smtplib
import os,string,sys
sys.path.append(os.path.normpath(os.getcwd()))
from config import location
SERVER = " "
FROM = ["xxxx@gmail.com"]
TO = ["xxxx@gmail.com"] # must be a list
SUBJECT = "Follow up questions email"
TEXT = """Hello,
Here are the various questions users asked me today which I have no idea about. Could you help me learn these topics?
Regards,
Kelly
"""
msg = MIMEMultipart()
msg['From'] = ", ".join(FROM)
msg['To'] = ", ".join(TO)
msg['Subject'] = SUBJECT
body = TEXT
msg.attach(MIMEText(body, 'plain'))
filename = 'followup_file.TXT'
attachment = open(location + 'followup_file.TXT', "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(part)
message = msg.as_string()
server = smtplib.SMTP(SERVER)
server.sendmail(FROM, TO, message)
server.quit() | 26.133333 | 122 | 0.646684 |
6e18dbf82c0ab208ca098975575465ec97248c7b | 269 | py | Python | backend/validators/authorization_val.py | NelsonM9/senaSoft | d72b5ed32b86a53aac962ec440d84ecce4555780 | [
"Apache-2.0"
] | null | null | null | backend/validators/authorization_val.py | NelsonM9/senaSoft | d72b5ed32b86a53aac962ec440d84ecce4555780 | [
"Apache-2.0"
] | null | null | null | backend/validators/authorization_val.py | NelsonM9/senaSoft | d72b5ed32b86a53aac962ec440d84ecce4555780 | [
"Apache-2.0"
] | null | null | null | from marshmallow import validate, fields, Schema
| 38.428571 | 74 | 0.758364 |
6e1b6e602b092d059fb5b4b96bb130aa002770f4 | 1,213 | py | Python | wiwo/sender.py | CoreSecurity/wiwo | 44bd44b8ebea7e33105a7f4dac6480493cbb9623 | [
"Apache-1.1"
] | 76 | 2015-08-01T23:24:43.000Z | 2018-07-02T11:13:16.000Z | wiwo/sender.py | 6e726d/wiwo | 44bd44b8ebea7e33105a7f4dac6480493cbb9623 | [
"Apache-1.1"
] | 1 | 2016-01-28T22:11:17.000Z | 2016-02-03T22:14:46.000Z | wiwo/sender.py | 6e726d/wiwo | 44bd44b8ebea7e33105a7f4dac6480493cbb9623 | [
"Apache-1.1"
] | 27 | 2015-08-11T07:24:42.000Z | 2018-10-05T11:09:54.000Z | #!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
#
# Copyright 2003-2015 CORE Security Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Andres Blanco (6e726d)
# Andres Gazzoli
#
import ethernet
import pcapy
| 28.880952 | 74 | 0.678483 |
6e1d05dba9a266286addc73ec4950cdeada8c0b4 | 1,581 | py | Python | config.py | juanjtov/Twitter_PNL_PUBLIC | 473eea0e7b030c8358aa86f6d3ff9d787c94abe6 | [
"MIT"
] | null | null | null | config.py | juanjtov/Twitter_PNL_PUBLIC | 473eea0e7b030c8358aa86f6d3ff9d787c94abe6 | [
"MIT"
] | null | null | null | config.py | juanjtov/Twitter_PNL_PUBLIC | 473eea0e7b030c8358aa86f6d3ff9d787c94abe6 | [
"MIT"
] | null | null | null | import os
| 39.525 | 127 | 0.697027 |
6e1de2b972d3bacd17bc4fe230cc40342951d8ec | 130 | py | Python | code/helpers/__init__.py | briandesilva/discovery-of-physics-from-data | b79c34317f049c9b47aaf2cc4c54c5ec7219f3d7 | [
"MIT"
] | 11 | 2020-07-02T01:48:27.000Z | 2022-03-29T18:23:32.000Z | code/helpers/__init__.py | briandesilva/discovery-of-physics-from-data | b79c34317f049c9b47aaf2cc4c54c5ec7219f3d7 | [
"MIT"
] | null | null | null | code/helpers/__init__.py | briandesilva/discovery-of-physics-from-data | b79c34317f049c9b47aaf2cc4c54c5ec7219f3d7 | [
"MIT"
] | 3 | 2020-11-21T09:11:21.000Z | 2022-03-29T18:23:58.000Z | from .library import *
from .differentiation import *
from .sindy_ball import SINDyBall
from .tests import *
from .utils import *
| 21.666667 | 33 | 0.776923 |
6e1fd593ca8661737d9d161ba6774b763dcdbb57 | 341 | py | Python | users/models.py | diogor/desafio-backend | 4264a843503cc51f635bcfb31a009d53ebe671d8 | [
"MIT"
] | null | null | null | users/models.py | diogor/desafio-backend | 4264a843503cc51f635bcfb31a009d53ebe671d8 | [
"MIT"
] | null | null | null | users/models.py | diogor/desafio-backend | 4264a843503cc51f635bcfb31a009d53ebe671d8 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractUser
| 21.3125 | 63 | 0.609971 |
6e1ff72ebc4c23799d24fd64dfc337c27cbb1d44 | 151 | py | Python | python/glob/glob1.py | jtraver/dev | c7cd2181594510a8fa27e7325566ed2d79371624 | [
"MIT"
] | null | null | null | python/glob/glob1.py | jtraver/dev | c7cd2181594510a8fa27e7325566ed2d79371624 | [
"MIT"
] | null | null | null | python/glob/glob1.py | jtraver/dev | c7cd2181594510a8fa27e7325566ed2d79371624 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import glob
main()
| 13.727273 | 36 | 0.569536 |
6e201007363380e4d643bfc71a7961525d34bdc2 | 4,073 | py | Python | email_scrapper/readers/gmail_reader.py | datmellow/email-scrapper | 614e99a4b33f3a0d3d85d5eb9c359818991673a6 | [
"MIT"
] | 2 | 2018-01-07T23:12:28.000Z | 2018-01-10T00:58:17.000Z | email_scrapper/readers/gmail_reader.py | LucasCoderT/email-scrapper | 614e99a4b33f3a0d3d85d5eb9c359818991673a6 | [
"MIT"
] | null | null | null | email_scrapper/readers/gmail_reader.py | LucasCoderT/email-scrapper | 614e99a4b33f3a0d3d85d5eb9c359818991673a6 | [
"MIT"
] | 1 | 2019-12-09T17:01:08.000Z | 2019-12-09T17:01:08.000Z | import base64
import datetime
import email
import logging
import os
import typing
from email.message import Message
from googleapiclient import errors
from email_scrapper.models import Stores
from email_scrapper.readers.base_reader import BaseReader
logger = logging.getLogger(__name__)
| 41.141414 | 117 | 0.615762 |
6e218f16003cae78a4d29f7eb9e696aa4c77eb3e | 187 | py | Python | ClassCode/P2/HW - Copy.py | tsyet12/ClassCode | db1db97f71a6f31769d58739c6687863bc6b88c4 | [
"MIT"
] | null | null | null | ClassCode/P2/HW - Copy.py | tsyet12/ClassCode | db1db97f71a6f31769d58739c6687863bc6b88c4 | [
"MIT"
] | null | null | null | ClassCode/P2/HW - Copy.py | tsyet12/ClassCode | db1db97f71a6f31769d58739c6687863bc6b88c4 | [
"MIT"
] | null | null | null | a=[1,2,3]
b=[1,1,1]
#d={1:"ONE", 2:"TWO", 3:"THREE", 4:"FOUR", 5:"FIVE", 6:"SIX"}
f=[a[0]+b[0],a[1]+b[1],a[2]+b[2]]
if f[0]==1:
f[0]="ONE"
elif f[0]==2:
f[0]="TWO"
print(f)
| 11.6875 | 61 | 0.417112 |
6e2255b8f77a18ad6776515831039d97cfa15e3a | 748 | py | Python | Advanced_algorithm/oj_test/test04.py | mndream/MyOJ | ee92fb657475d998e6c201f222cb20bcbc2bfd64 | [
"Apache-2.0"
] | 1 | 2018-12-27T08:06:38.000Z | 2018-12-27T08:06:38.000Z | Advanced_algorithm/oj_test/test04.py | mndream/MyPythonOJ | ee92fb657475d998e6c201f222cb20bcbc2bfd64 | [
"Apache-2.0"
] | null | null | null | Advanced_algorithm/oj_test/test04.py | mndream/MyPythonOJ | ee92fb657475d998e6c201f222cb20bcbc2bfd64 | [
"Apache-2.0"
] | null | null | null | '''
A+B for Input-Output Practice (IV)
Your task is to Calculate the sum of some integers.
Input contains multiple test cases. Each test case contains a integer N,
and then N integers follow in the same line.
A test case starting with 0 terminates the input and this test case is not to be processed.
For each group of input integers you should output their sum in one line,
and with one line of output for each line in input.
4 1 2 3 4
5 1 2 3 4 5
0
10
15
'''
while(True):
input_list = list(map(int, input().split()))
# split()(\n)(\t)
# split(" ") RE
n = input_list[0]
if n == 0:
break
sum = 0
for i in range(n):
sum = sum + input_list[i + 1]
print(sum) | 24.933333 | 91 | 0.669786 |
6e22c62fbf96771a37ae5b157b23776e81cda2c5 | 2,421 | py | Python | pre-processing/obtain_audio_spectrogram.py | GeWu-Lab/OGM-GE_CVPR2022 | 08b3f2498dd3e89f57fe9a12b5bf0c162eba1fbf | [
"MIT"
] | 4 | 2022-03-06T17:57:24.000Z | 2022-03-24T04:26:32.000Z | pre-processing/obtain_audio_spectrogram.py | GeWu-Lab/OGM-GE_CVPR2022 | 08b3f2498dd3e89f57fe9a12b5bf0c162eba1fbf | [
"MIT"
] | null | null | null | pre-processing/obtain_audio_spectrogram.py | GeWu-Lab/OGM-GE_CVPR2022 | 08b3f2498dd3e89f57fe9a12b5bf0c162eba1fbf | [
"MIT"
] | 1 | 2022-03-31T08:12:15.000Z | 2022-03-31T08:12:15.000Z | import multiprocessing
import os
import os.path
import pickle
import librosa
import numpy as np
from scipy import signal
if __name__ == '__main__':
# Establish communication queues
tasks = multiprocessing.JoinableQueue()
# Start consumers
num_consumers = multiprocessing.cpu_count()
print('Creating {} consumers'.format(num_consumers))
consumers = [
Consumer(tasks)
for i in range(num_consumers)
]
for w in consumers:
w.start()
# path='data/'
save_dir = '/home/xiaokang_peng/data/AVE_av/audio_spec'
if not os.path.exists(save_dir):
os.mkdir(save_dir)
path_origin = '/home/xiaokang_peng/data/AVE_av/audio'
audios = os.listdir(path_origin)
for audio in audios:
audio_name = audio
audio_path = os.path.join(path_origin, audio)
tasks.put([save_dir, audio_name[:-4], audio_path])
# Add a poison pill for each consumer
for i in range(num_consumers):
tasks.put(None)
# Wait for all of the tasks to finish
tasks.join()
print("ok")
| 28.482353 | 106 | 0.646014 |
6e237945177ee47426cc1fcc873291dbba403f32 | 3,317 | py | Python | src/protean/core/event_handler.py | mpsiva89/protean | 315fa56da3f64178bbbf0edf1995af46d5eb3da7 | [
"BSD-3-Clause"
] | null | null | null | src/protean/core/event_handler.py | mpsiva89/protean | 315fa56da3f64178bbbf0edf1995af46d5eb3da7 | [
"BSD-3-Clause"
] | null | null | null | src/protean/core/event_handler.py | mpsiva89/protean | 315fa56da3f64178bbbf0edf1995af46d5eb3da7 | [
"BSD-3-Clause"
] | null | null | null | import inspect
import logging
from protean.container import Element, OptionsMixin
from protean.core.event import BaseEvent
from protean.exceptions import IncorrectUsageError
from protean.utils import DomainObjects, derive_element_class, fully_qualified_name
from protean.utils.mixins import HandlerMixin
logger = logging.getLogger(__name__)
| 36.855556 | 114 | 0.619234 |
6e246664f07a32e8eef7dfd24b7f3cda19fa9734 | 7,508 | py | Python | read_prepare_data.py | jlu-ilr-hydro/IPCC-Repots-Focus-Overview | bf631975eb6c3ea2cf2f8fe9382e3361ad700a6e | [
"Apache-2.0"
] | null | null | null | read_prepare_data.py | jlu-ilr-hydro/IPCC-Repots-Focus-Overview | bf631975eb6c3ea2cf2f8fe9382e3361ad700a6e | [
"Apache-2.0"
] | null | null | null | read_prepare_data.py | jlu-ilr-hydro/IPCC-Repots-Focus-Overview | bf631975eb6c3ea2cf2f8fe9382e3361ad700a6e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 17 10:12:26 2021
@author: Florian Jehn
"""
import os
import pandas as pd
import numpy as np
def read_ipcc_counts_temp():
"""reads all counts of temperatures for all reports and makes on df"""
files = os.listdir(os.getcwd()+os.sep+"Results"+ os.sep + "temperatures")
all_df = pd.DataFrame()
for file in files:
file_df = pd.read_csv("Results" + os.sep + "temperatures" + os.sep + file, sep=";", index_col=0)
file_df.columns = [file[:-4]]
all_df = pd.concat([all_df, file_df], axis=1)
return all_df.transpose()
def read_ipcc_counts_rfc():
"""reads all counts of reasons of concern for all reports and makes on df"""
files = os.listdir(os.getcwd()+os.sep+"Results"+ os.sep + "reasons_for_concern")
all_df = pd.DataFrame()
for file in files:
file_df = pd.read_csv("Results" + os.sep + "reasons_for_concern" + os.sep + file, sep=";", index_col=0)
file_df.columns = [file[:-4]]
all_df = pd.concat([all_df, file_df], axis=1)
return all_df.transpose()
def read_false_positive():
"""reads in all the counted false/true positive rates for the temperatres in the
IPCC and calculates a true positive rate for each entry"""
files = os.listdir(os.getcwd()+os.sep+"Results"+ os.sep + "false_positive_check_files")
all_df = pd.DataFrame()
for file in files:
# only read those files that contains the counting results
if "results" not in file:
continue
file_df = pd.read_csv("Results" + os.sep + "false_positive_check_files" + os.sep + file, sep=",", index_col=0)
# calculate the true positive rate
file_df["True Positive Rate [%]"] = (file_df["n true positive"]/(file_df["n true positive"]+file_df["n false positive"]))*100
# Arange the df for seaborn
file_df["Temperature [C]"] = file_df.index
file_df.reset_index(inplace=True, drop=True)
all_df = pd.concat([all_df, file_df])
return all_df
def scale_counts(ipcc_counts):
"""scale the counts by overall sum"""
sums = ipcc_counts.sum(axis=1)
for col in ipcc_counts:
ipcc_counts[col] = ipcc_counts[col]/sums*100
return ipcc_counts
def read_meta():
"""reads in the meta data of the reports"""
meta = pd.read_csv("Reports" + os.sep + "meta_data_reports.tsv", sep="\t")
meta["Year"] = meta["Year"].astype("str")
return meta
def group_temps(ipcc_counts):
"""groups the temperatures into three categories"""
ipcc_counts["0.5C - 2C"] = ipcc_counts[" 0.5C"] + ipcc_counts[" 1C"] + ipcc_counts[" 1.5C"] +ipcc_counts[" 2C"]
ipcc_counts["2.5C - 4C"] = ipcc_counts[" 2.5C"] + ipcc_counts[" 3C"] + ipcc_counts[" 3.5C"] +ipcc_counts[" 4C"]
ipcc_counts[" 4.5C"] = ipcc_counts[" 4.5C"] + ipcc_counts[" 5C"] + ipcc_counts[" 5.5C"] +ipcc_counts[" 6C"] +ipcc_counts[" 6.5C"] + ipcc_counts[" 7C"] + ipcc_counts[" 7.5C"] +ipcc_counts[" 8C"] + ipcc_counts[" 8.5C"] + ipcc_counts[" 9C"] + ipcc_counts[" 9.5C"] +ipcc_counts[" 10C"]
return ipcc_counts.iloc[:,20:]
def merge_counts_meta(ipcc_counts, meta):
"""merges the df with the counted temperatures/rfcs with the metadata"""
return pd.merge(meta, ipcc_counts, right_index=True, left_on="count_names")
def lookup_names():
""""Returns lookup dict for different files names to merge them"""
lookup_dict = {
"IPCC_AR6_WGI_Full_Report":"counts_IPCC_AR6_WGI_Full_Report_parsed",
"SROCC_FullReport_FINAL":"counts_SROCC_FullReport_FINAL_parsed",
"210714-IPCCJ7230-SRCCL-Complete-BOOK-HRES":"counts_210714-IPCCJ7230-SRCCL-Complete-BOOK-HRES_parsed",
"SR15_Full_Report_Low_Res":"counts_SR15_Full_Report_Low_Res_parsed",
"SYR_AR5_FINAL_full":"counts_SYR_AR5_FINAL_full_wcover_parsed",
"ipcc_wg3_ar5_full":"counts_ipcc_wg3_ar5_full_parsed",
"WGIIAR5-PartA_FINAL":"counts_WGIIAR5-PartA_FINAL_parsed",
"WGIIAR5-PartB_FINAL":"counts_WGIIAR5-PartB_FINAL_parsed",
"WG1AR5_all_final":"counts_WG1AR5_all_final_parsed",
"SREX_Full_Report-1":"counts_SREX_Full_Report-1_parsed",
"SRREN_Full_Report-1":"counts_SRREN_Full_Report-1_parsed",
"ar4_syr_full_report":"counts_ar4_syr_full_report_parsed",
"ar4_wg2_full_report":"counts_ar4_wg2_full_report_parsed",
"ar4_wg1_full_report-1":"counts_ar4_wg1_full_report-1_parsed",
"ar4_wg3_full_report-1":"counts_ar4_wg3_full_report-1_parsed",
"sroc_full-1":"counts_sroc_full-1_parsed",
"srccs_wholereport-1":"counts_srccs_wholereport-1_parsed",
"SYR_TAR_full_report":"counts_SYR_TAR_full_report_parsed",
"WGII_TAR_full_report-2":"counts_WGII_TAR_full_report-2_parsed",
"WGI_TAR_full_report":"counts_WGI_TAR_full_report_parsed",
"WGIII_TAR_full_report":"counts_WGIII_TAR_full_report_parsed",
"srl-en-1":"counts_srl-en-1_parsed",
"srtt-en-1":"counts_srtt-en-1_parsedd",
"emissions_scenarios-1":"counts_emissions_scenarios-1_parsed",
"av-en-1":"counts_av-en-1_parsed",
"The-Regional-Impact":"counts_The-Regional-Impact_parsed",
"2nd-assessment-en-1":"counts_2nd-assessment-en-1_parsed",
"ipcc_sar_wg_III_full_report":"counts_ipcc_sar_wg_III_full_report_parsed",
"ipcc_sar_wg_II_full_report":"counts_ipcc_sar_wg_II_full_report_parsed",
"ipcc_sar_wg_I_full_report":"counts_ipcc_sar_wg_I_full_report_parsed",
"climate_change_1994-2":"counts_climate_change_1994-2_parsed",
# "ipcc-technical-guidelines-1994n-1":"", # could not read in, but also contains no temp mentions
"ipcc_wg_I_1992_suppl_report_full_report":"counts_ipcc_wg_I_1992_suppl_report_full_report_parsed",
"ipcc_wg_II_1992_suppl_report_full_report":"counts_ipcc_wg_II_1992_suppl_report_full_report_parsed",
"ipcc_90_92_assessments_far_full_report":"counts_ipcc_90_92_assessments_far_full_report_parsed",
"ipcc_far_wg_III_full_report":"counts_ipcc_far_wg_III_full_report_parsed",
"ipcc_far_wg_II_full_report":"counts_ipcc_far_wg_II_full_report_parsed",
"ipcc_far_wg_I_full_report":"counts_ipcc_far_wg_I_full_report_parsed",
}
return lookup_dict
def create_temp_keys():
"""Creates a list of strings for all temperatures the paper looked at"""
temps = []
for i,temp in enumerate(np.arange(0.5,10.1,0.5)):
if i % 2 != 0:
temps.append(" "+str(int(temp))+"C")
else:
temps.append(" "+str(temp)+"C" )
return temps
def combine_all_raw_strings():
"""combines all raw strings into one big file to search through"""
reports = [file for file in os.listdir(os.getcwd() + os.sep + "Raw IPCC Strings") if file[-4:] == ".csv" ]
all_reports = " "
for report in reports:
print("Starting with " + report)
report_df = pd.read_csv(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + report, sep="\t", usecols=[0])
report_list = report_df[report_df.columns[0]].tolist()
report_str = " ".join([str(item) for item in report_list])
all_reports += report_str
with open(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + "all_ipcc_strings.csv", 'w', encoding='utf-8') as f:
# this file is not included in the repository, as it is too large for Github
f.write(all_reports)
if __name__ == "__main__":
combine_all_raw_strings()
| 48.128205 | 300 | 0.683404 |
6e25342e23a32ed5b961b03bb3584a54058a2d5c | 156 | py | Python | tests/test_get_filesize.py | zevaverbach/zev | 7330718f4eee28695fe57fb1107e506e6b0c9e4e | [
"MIT"
] | null | null | null | tests/test_get_filesize.py | zevaverbach/zev | 7330718f4eee28695fe57fb1107e506e6b0c9e4e | [
"MIT"
] | 1 | 2019-07-20T09:26:46.000Z | 2019-07-20T09:26:46.000Z | tests/test_get_filesize.py | zevaverbach/zev | 7330718f4eee28695fe57fb1107e506e6b0c9e4e | [
"MIT"
] | null | null | null | from pytest import fixture
from zev.get_filesize import get_filesize
| 19.5 | 44 | 0.814103 |
6e253d478e601785b1142f2b0dc902543e75cdbc | 179 | py | Python | part1/03.py | jbaltop/57_Challenges | fa66ac584fc02761803fbd5692b737a73bd57983 | [
"MIT"
] | 31 | 2017-10-08T15:57:07.000Z | 2021-06-16T11:55:05.000Z | part1/03.py | jbaltop/57_Challenges | fa66ac584fc02761803fbd5692b737a73bd57983 | [
"MIT"
] | 1 | 2021-04-30T20:39:01.000Z | 2021-04-30T20:39:01.000Z | part1/03.py | jbaltop/57_Challenges | fa66ac584fc02761803fbd5692b737a73bd57983 | [
"MIT"
] | 7 | 2017-10-16T17:13:36.000Z | 2019-07-03T16:24:01.000Z |
main()
| 16.272727 | 58 | 0.502793 |
6e265824cd5b4d3d09aa3a85134608484df9ae21 | 1,151 | py | Python | Integertask.py | Ainara12/Programing-Scripting-problems | 1017c1a8a3aeabc040886f9bdab35b252e7e08ea | [
"MIT"
] | null | null | null | Integertask.py | Ainara12/Programing-Scripting-problems | 1017c1a8a3aeabc040886f9bdab35b252e7e08ea | [
"MIT"
] | null | null | null | Integertask.py | Ainara12/Programing-Scripting-problems | 1017c1a8a3aeabc040886f9bdab35b252e7e08ea | [
"MIT"
] | null | null | null | #This program calculates the successive values of the following
# calculation: Next value by taking the positive integer added by user
# and if it is even divide it by 2, if it is odd, multiply by
#3 and add 1.Program ends if current value is 1.
#First: I created variable "pnumber" which will be the positive integer entered by the user.
pnumber=int(input("Enter a positive integer here:"))
#Created formula to find out if number entered by user is positive integer ( greater than 0)
while pnumber > 0:
if pnumber ==1:# then if number greater than 0 and equals 1, program stops with break statement.
print(pnumber)
break
if pnumber % 2 == 0:# if number entered by user is even we divide numbers by 2.
print(pnumber)
pnumber = pnumber / 2
elif pnumber % 2 != 0: #if number entered by user is odd we multiply the values by 3 and add 1.
print(pnumber)
pnumber = pnumber*3+1
#If user enters a not positive integer , the program confirmes this and stops.
while pnumber < 0:
print pnumber, "is not a positive integer."
break
print ("Thank you so much for using my program") | 34.878788 | 100 | 0.701998 |
6e2666a6e406e4ebd7fe6e6904bdb4696b8d2f47 | 404 | py | Python | has33.py | CombatPompano81/Python-Snippets-Galore | c2fb9c6ebef0477895749db9f2aa0f87132a72d6 | [
"Apache-2.0"
] | null | null | null | has33.py | CombatPompano81/Python-Snippets-Galore | c2fb9c6ebef0477895749db9f2aa0f87132a72d6 | [
"Apache-2.0"
] | null | null | null | has33.py | CombatPompano81/Python-Snippets-Galore | c2fb9c6ebef0477895749db9f2aa0f87132a72d6 | [
"Apache-2.0"
] | null | null | null | # main function
has33([1, 3, 3])
has33([3, 1, 3])
has33([3, 3, 3])
has33([1, 3, 1, 3])
| 22.444444 | 75 | 0.569307 |
6e26eeb7a1d51ccae528791cb9b9b4c924ad57bd | 914 | py | Python | proj/urls.py | vitali-rebkavets-itechart/students-lab | 574ad0249ee40b799a2e8faaced3661915bee756 | [
"MIT"
] | null | null | null | proj/urls.py | vitali-rebkavets-itechart/students-lab | 574ad0249ee40b799a2e8faaced3661915bee756 | [
"MIT"
] | 26 | 2019-05-21T13:24:59.000Z | 2019-06-13T10:24:29.000Z | proj/urls.py | vitali-r/students-lab | 574ad0249ee40b799a2e8faaced3661915bee756 | [
"MIT"
] | 2 | 2019-05-21T12:55:23.000Z | 2019-05-21T14:31:14.000Z | from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from products.views import (products, index,
products_detail)
from rest_framework_jwt.views import refresh_jwt_token
from users.views import ObtainCustomJSONWebToken
apipatterns = [
path('', include('products.urls')),
]
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include((apipatterns, 'api'), namespace='api')),
path('', index, name='index'),
path('products/', products, name='products'),
path('products/<int:product_id>/', products_detail, name='products_detail'),
path('', include('users.urls'), name='users'),
path('sign-in/', ObtainCustomJSONWebToken.as_view()),
path('api/sign-in/refresh', refresh_jwt_token)
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 35.153846 | 80 | 0.705689 |
6e2726ca9cbe233a3e8bac00017eecef8153cd91 | 17,692 | py | Python | survos2/frontend/plugins/objects.py | DiamondLightSource/SuRVoS2 | 42bacfb6a5cc267f38ca1337e51a443eae1a9d2b | [
"MIT"
] | 4 | 2017-10-10T14:47:16.000Z | 2022-01-14T05:57:50.000Z | survos2/frontend/plugins/objects.py | DiamondLightSource/SuRVoS2 | 42bacfb6a5cc267f38ca1337e51a443eae1a9d2b | [
"MIT"
] | 1 | 2022-01-11T21:11:12.000Z | 2022-01-12T08:22:34.000Z | survos2/frontend/plugins/objects.py | DiamondLightSource/SuRVoS2 | 42bacfb6a5cc267f38ca1337e51a443eae1a9d2b | [
"MIT"
] | 2 | 2018-03-06T06:31:29.000Z | 2019-03-04T03:33:18.000Z | from survos2.config import Config
import numpy as np
from numpy.lib.function_base import flip
from qtpy import QtWidgets
from qtpy.QtWidgets import QPushButton, QRadioButton
from survos2.frontend.components.base import *
from survos2.frontend.components.entity import (
SmallVolWidget,
TableWidget,
setup_entity_table,
setup_bb_table,
)
from survos2.frontend.components.icon_buttons import IconButton
from survos2.frontend.control import Launcher
from survos2.frontend.plugins.base import *
from survos2.frontend.plugins.plugins_components import MultiSourceComboBox
from survos2.frontend.utils import FileWidget
from survos2.improc.utils import DatasetManager
from survos2.model import DataModel
from survos2.server.state import cfg
from survos2.frontend.plugins.features import FeatureComboBox
from survos2.frontend.plugins.annotations import LevelComboBox
from survos2.entity.patches import PatchWorkflow, organize_entities, make_patches
| 37.562633 | 111 | 0.603154 |
6e27e9a98e0663d5f4593b8e13414810400eac10 | 1,248 | py | Python | src/calc_orientation.py | ouyang-lab/CAPC | e0fcc698da833b9195315d6769bd076646323289 | [
"Apache-2.0"
] | 5 | 2020-08-24T16:18:45.000Z | 2021-07-07T16:54:32.000Z | src/calc_orientation.py | ouyang-lab/CAPC | e0fcc698da833b9195315d6769bd076646323289 | [
"Apache-2.0"
] | null | null | null | src/calc_orientation.py | ouyang-lab/CAPC | e0fcc698da833b9195315d6769bd076646323289 | [
"Apache-2.0"
] | 1 | 2020-12-09T04:15:59.000Z | 2020-12-09T04:15:59.000Z | import sys
import gzip
import numpy as np
if __name__ == "__main__":
f_names = sys.argv[1:]
max_value = 100000
bin_size = 50
threshold = 0.01
data = []
total_bins = (max_value/bin_size)+1
for no, f_name in enumerate(f_names):
#prefix = f_name.split("/")[-1].replace(".txt.gz", "")
d = np.zeros(total_bins)
with gzip.open(f_name, "rb") as f:
for line in f:
row = line.strip("\r\n").split("\t")
size, count = (int(row[0]), int(row[1]))
if size < max_value:
s = size/bin_size
d[s] += count
else:
d[max_value/bin_size] += count
d = d[::-1].cumsum()
data.append(d)
data = np.array(data)
current_size = max_value
for no, d in enumerate(data.T):
p = d/d.sum()
if np.all(abs(p-0.25)<=threshold):
current_size = (total_bins-no)*bin_size
else:
break
print "Orientation Size (+/-%s): %s" % (threshold, current_size)
for no, d in enumerate(data.T):
p = d/d.sum()
print "\t".join(map(str, [(total_bins-no)*bin_size]+p.tolist()))
| 23.54717 | 72 | 0.491186 |
6e28319339ecb10a654afec47c04531f1e4fc2e5 | 5,459 | py | Python | tests/benchmark/preprocess_img/preproc.py | mpascucci/AST-image-processing | 54111e874237f0c146760d514eea96131177878a | [
"ECL-2.0",
"Apache-2.0"
] | 6 | 2020-11-24T15:55:35.000Z | 2021-12-31T11:52:56.000Z | tests/benchmark/preprocess_img/preproc.py | mpascucci/AST-image-processing | 54111e874237f0c146760d514eea96131177878a | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-11-24T15:46:15.000Z | 2020-11-24T15:46:15.000Z | tests/benchmark/preprocess_img/preproc.py | mpascucci/AST-image-processing | 54111e874237f0c146760d514eea96131177878a | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2021-02-04T10:08:43.000Z | 2022-02-21T02:00:47.000Z | from tqdm import tqdm
import os
import glob
import pickle
import numpy as np
from imageio import imread, imwrite
import astimp
from multiprocessing import Pool, cpu_count
from functools import partial
def preprocess_one_image(path):
img = np.array(imread(path)) # load image
ast = astimp.AST(img)
crop = ast.crop
circles = ast.circles
pellets = ast.pellets
labels = ast.labels_text
# create preprocessing object
# NOTE the preprocessing object is not created it no pellets where found.
preproc = ast.preproc if len(circles) != 0 else None
pobj = {"ast":ast,
"preproc": preproc,
"circles": circles,
"pellets": pellets,
"labels": labels,
"crop": crop,
"fname": os.path.basename(path),
"inhibitions": ast.inhibitions}
return pobj
def pickle_one_preproc(idx, output_path, image_paths, error_list, skip_existing=False, mute=True):
if mute:
log_function = lambda x : x
else:
log_function = tqdm.write
path = image_paths[idx]
try:
# create output path
fname = os.path.basename(path) # file name from path
ofpath = os.path.join(
output_path, f"{fname}.pickle") # output file path
if skip_existing:
# skip if output file exists already
if os.path.exists(ofpath):
return None
# WARNING for an unknown reason the pickle call must be inside this function
pobj = preprocess_one_image(path)
with open(ofpath, 'wb') as f:
pickle.dump(pobj, f)
if len(pobj['circles']) == 0:
# if no pellet found
error_list[idx] = "INFO : {}, No pellets found".format(fname)
log_function("No pellet found in {}".format(fname))
except Exception as e:
ex_text = ', '.join(map(lambda x: str(x), e.args))
error_list[idx] = "{}, {}".format(fname, ex_text)
log_function("Failed images: {} - {}".format(len(error_list), ex_text))
return None
def preprocess(img_paths, output_path, skip_existing=False, parallel=True):
"""preprocess images and pickle the preproc object.
img_paths : a list of paths of the image files."""
if not os.path.exists(output_path):
os.mkdir(output_path)
errors = [""]*len(img_paths)
if parallel:
jobs = cpu_count()
print("Running in parallel on {} processes".format(jobs))
f = partial(pickle_one_preproc,
image_paths=img_paths,
output_path=output_path,
error_list=errors,
skip_existing=skip_existing
)
with Pool(jobs) as p:
list(tqdm(p.imap(f,range(len(img_paths))), total=len(img_paths)))
errors = [e for e in errors if e != ""]
else:
for idx in tqdm(range(len(img_paths)), desc="Preprocessing"):
pickle_one_preproc(idx, output_path, img_paths, errors, skip_existing, mute=False)
return errors
| 31.923977 | 98 | 0.596263 |
6e28b70b57732d2994e0b212e99122e11d61d96f | 1,024 | py | Python | src/main.py | Evelkos/PAM-and-CLARA | 26fbb8d2d4a7924ce1d0d504c4b23bac38238c69 | [
"MIT"
] | null | null | null | src/main.py | Evelkos/PAM-and-CLARA | 26fbb8d2d4a7924ce1d0d504c4b23bac38238c69 | [
"MIT"
] | null | null | null | src/main.py | Evelkos/PAM-and-CLARA | 26fbb8d2d4a7924ce1d0d504c4b23bac38238c69 | [
"MIT"
] | null | null | null | from clustering_algorithms import CLARA, PAM, get_initial_points
from data_loaders import load_data
from timer import Timer
from visualizers import plot_data
# FILENAME = "datasets/artificial/sizes3.arff"
FILENAME = "datasets/artificial/zelnik4.arff"
# FILENAME = "datasets/artificial/xclara.arff"
# FILENAME = "datasets/real-world/glass.arff"
if __name__ == "__main__":
data = load_data(FILENAME)
# plot_data(data["df"], data["classes"], data["class_column"])
points = get_initial_points(data["df"], data["coordinates_columns"])
# result = run_clara(data, points)
result = run_pam(data, points)
plot_data(
result, data["classes"], "cluster", attributes_names=data["coordinates_columns"]
)
| 30.117647 | 88 | 0.709961 |
6e2927924bc2223cbcdf3f80649b9ddc5b016ea6 | 1,143 | py | Python | module/test.py | yuxy000/PythonSyntax | efbfddbd62d88fa6768035d0155c9e8d17cb5670 | [
"MIT"
] | null | null | null | module/test.py | yuxy000/PythonSyntax | efbfddbd62d88fa6768035d0155c9e8d17cb5670 | [
"MIT"
] | null | null | null | module/test.py | yuxy000/PythonSyntax | efbfddbd62d88fa6768035d0155c9e8d17cb5670 | [
"MIT"
] | null | null | null | from module import support
from module import fibo
import sys
support.print_func("Runoob")
fibo.fib(1000)
print(fibo.fib2(100))
print(fibo.__name__)
#
fib = fibo.fib
fib(10)
"""
fromimport
Pythonfrom
from modname import name1[, name2[, ... nameN]]
fibo fib
>>> from fibo import fib, fib2
>>> fib(500)
1 1 2 3 5 8 13 21 34 55 89 144 233 377
fibofibofib
Fromimport*
from modname import *
"""
"""
__name__
__name__
#!/usr/bin/python3
# Filename: using_name.py
if __name__ == '__main__':
print('')
else:
print('')
$ python using_name.py
$ python
>>> import using_name
>>>
__name__'__main__'
"""
"""
dir() :
"""
print(dir(fibo))
print(dir(sys))
# dir()
print(dir())
| 19.05 | 85 | 0.726159 |
6e2a9766e0a79f77304a55be682d4bc167bde209 | 4,459 | py | Python | src/utils.py | zimonitrome/AbstractionNet | a037b696ccac015936d60026cb1ac4ebafc68371 | [
"MIT"
] | null | null | null | src/utils.py | zimonitrome/AbstractionNet | a037b696ccac015936d60026cb1ac4ebafc68371 | [
"MIT"
] | null | null | null | src/utils.py | zimonitrome/AbstractionNet | a037b696ccac015936d60026cb1ac4ebafc68371 | [
"MIT"
] | null | null | null | import torch
from einops import rearrange
import svgwrite
###########################################
# Normalization / Standardization functions
###########################################
def normalize_functional(tensor: torch.Tensor, mean: list, std: list):
"""
Standardizes tensor in the channel dimension (dim -3) using mean and std.
[... C H W] -> [... C H W]
"""
mean = torch.tensor(mean).view(-1, 1, 1).to(tensor.device)
std = torch.tensor(std).view(-1, 1, 1).to(tensor.device)
return (tensor-mean)/std
def unnormalize_functional(tensor: torch.Tensor, mean: list, std: list):
"""
Un-standardizes tensor in the channel dimension (dim -3) using mean and std.
Also clips the tensor to be in the range [0, 1].
[... C H W] -> [... C H W]
"""
mean = torch.tensor(mean).view(-1, 1, 1).to(tensor.device)
std = torch.tensor(std).view(-1, 1, 1).to(tensor.device)
return ((tensor*std)+mean).clamp(0, 1)
def unnormalize_to(x, x_min, x_max):
"""
Linear normalization of x to [x_min, x_max].
In other words maps x.min() -> x_min and x.max() -> x_max.
"""
return x * (x_max - x_min) + x_min
############################
# Image convertion functions
############################
def rgba_to_rgb(rgba: torch.Tensor):
"""
Converts tensor from 3 channels into 4.
Multiplies first 3 channels with the last channel.
[... 4 H W] -> [... 3 H W]
"""
return rgba[..., :-1, :, :] * rgba[..., -1:, :, :]
def rgb_to_rgba(rgb: torch.Tensor, fill: float = 1.0):
"""
Converts tensor from 4 channels into 3.
Alpha layer will be filled with 1 by default, but can also be specified.
[... 3 H W] -> [... 4 H W]
"""
alpha_channel = torch.full_like(rgb[..., :1, :, :], fill_value=fill)
return torch.concat([rgb, alpha_channel], dim=-3)
###########################################
# Alpha compositing/decompositing functions
###########################################
def alpha_composite(base, added, eps=1e-8):
"""
Composite two tensors, i.e., layers `added` on top of `base`,
where the last channel is assumed to be an alpha channel.
[... C H W], [... C H W] -> [... C H W]
"""
# Separate color and alpha
alpha_b = base[..., -1:, :, :]
alpha_a = added[..., -1:, :, :]
color_b = base[..., :-1, :, :]
color_a = added[..., :-1, :, :]
# https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
alpha_0 = (1 - alpha_a) * alpha_b + alpha_a
color_0 = ((1-alpha_a) * alpha_b*color_b + alpha_a*color_a) / (alpha_0 + eps)
# Re-combine new color and alpha
return torch.concat([color_0, alpha_0], dim=-3)
def alpha_composite_multiple(images_tensor):
"""
Composite tensor of N images into a single image.
Assumes last channel is an alpha channel.
[... N C H W] -> [... C H W]
"""
image_iterator = rearrange(images_tensor, "... N C H W -> N ... C H W")
# Get first image
compositioned_image = image_iterator[0]
# Add the rest of the images
for image in image_iterator[1:]:
# TODO: Possibly need to add .copy() to prevent assignment error in autograd.
compositioned_image = alpha_composite(compositioned_image, image)
return compositioned_image
def get_visible_mask(shapes):
"""
Inputs a set of rendered images where C > 1 and the last channel is an alpha channel.
Assuming that images were to be compositioned first to last (N=0, 1, 2...),
returns a mask for each image that show what pixels of that image is visible in the final composition.
[... N C H W] -> [... N H W]
"""
shape_iterator = rearrange(shapes, "... N C H W -> N ... C H W").flip(0)
accumulated_alpha = torch.zeros_like(shape_iterator[0,..., 0, :, :]) # empty like first image, single channel
shape_maks = torch.zeros_like(shape_iterator[..., 0, :, :]) # empty image for each shape layer
for i, shape in enumerate(shape_iterator):
# a over b alpha compositioning
# alpha_0 = (1 - alpha_a) * alpha_b + alpha_a
# get b
# alpha_b = (alpha_0 - alpha_a) / (1 - alpha_a)
shape_alpha = shape[..., -1, :, :]
alpha_visible = shape_alpha - accumulated_alpha * shape_alpha
shape_maks[i] = alpha_visible
accumulated_alpha = (1 - shape_alpha) * accumulated_alpha + shape_alpha
return rearrange(shape_maks.flip(0), "N ... H W -> ... N H W").unsqueeze(-3) | 36.54918 | 113 | 0.589146 |
6e2c7487821c1b466bfeb152a868353bd01ba3f7 | 3,742 | py | Python | CellMQ.py | edjuaro/cell-migration-quantification | b6479cc8525a1ac8bdaf0abfc66dec57de0be21e | [
"MIT"
] | null | null | null | CellMQ.py | edjuaro/cell-migration-quantification | b6479cc8525a1ac8bdaf0abfc66dec57de0be21e | [
"MIT"
] | null | null | null | CellMQ.py | edjuaro/cell-migration-quantification | b6479cc8525a1ac8bdaf0abfc66dec57de0be21e | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from skimage import draw
from skimage import io
# Read image
im_in = cv2.imread("analyses/MDA231_stopper_1_c3.tif", cv2.IMREAD_GRAYSCALE);
# Threshold.
# Set values equal to or above 220 to 0.
# Set values below 220 to 255.
th, im_th = cv2.threshold(im_in, 20, 255, cv2.THRESH_BINARY_INV);
# Copy the thresholded image.
im_floodfill = im_th.copy()
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = im_th.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
# Floodfill from point (0, 0)
cv2.floodFill(im_floodfill, mask, (0,0), 255);
# Invert floodfilled image
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
# Combine the two images to get the foreground.
im_out = im_th | im_floodfill_inv
io.imsave(fname='temp_output.png', arr=im_out)
# im_out_inv = cv2.bitwise_not(im_out)
# dilate the mask:
k_size = 2
k_half = k_size/2
kernel = np.ones((k_size,k_size),np.uint8)
coords = draw.circle(k_half, k_half, k_half, shape=im_th.shape)
kernel[coords] = 1
erosion = cv2.erode(im_out,kernel,iterations = 1)
dilation = cv2.dilate(cv2.bitwise_not(erosion),kernel,iterations = 1)
dilation = cv2.bitwise_not(dilation)
# io.imshow(dilation)
io.imsave(fname='mask.png', arr=dilation)
# Display images.
# io.imsave(fname='mask.png', arr=im_out)
# # mostly from http://nickc1.github.io/python,/matlab/2016/05/17/Standard-Deviation-(Filters)-in-Matlab-and-Python.html
# import cv2
# from skimage import draw
# from skimage import io
# filename = 'analyses/MDA231_stopper_1_c3.tif'
# plate = io.imread(filename,as_grey=True)
# image = plate
# #io.imshow(image)
# # io.imsave(fname='temp_output.png', arr=image)
# import numpy as np
# # img = cv2.imread('....') # Read in the image
# sobelx = cv2.Sobel(image,cv2.CV_64F,1,0) # Find x and y gradients
# sobely = cv2.Sobel(image,cv2.CV_64F,0,1)
# # Find magnitude and angle
# I2 = np.sqrt(sobelx**2.0 + sobely**2.0)
# # angle = np.arctan2(sobely, sobelx) * (180 / np.pi)
# # io.imshow(I2)
# # io.imsave(fname='temp_output.png', arr=I2)
# from scipy.ndimage.filters import uniform_filter
# import numpy as np
# def window_stdev(X, window_size):
# c1 = uniform_filter(X, window_size, mode='reflect')
# c2 = uniform_filter(X*X, window_size, mode='reflect')
# return np.sqrt(c2 - c1*c1)
# # x = np.arange(16).reshape(4,4).astype('float')
# kernel_size = 3
# I1 = window_stdev(I2,kernel_size)*np.sqrt(kernel_size**2/(kernel_size**2 - 1))
# # io.imshow(I1)
# # io.imsave(fname='temp_output.png', arr=I1)
# from scipy.signal import medfilt2d
# I1 = medfilt2d(I1, kernel_size=3)
# # io.imshow(I1)
# # io.imsave(fname='temp_output.png', arr=I1)
# import numpy as np
# from skimage.morphology import reconstruction
# from skimage.exposure import rescale_intensity
# # image = rescale_intensity(I1, in_range=(50, 200))
# image = I1
# seed = np.copy(image)
# seed[1:-1, 1:-1] = image.max()
# mask = image
# filled = reconstruction(seed, mask, method='erosion')
# io.imsave(fname='temp_output.png', arr=filled)
# # kernel = np.zeros((80,80),np.uint8)
# # coords = draw.circle(40, 40, 40, shape=image.shape)
# # kernel[coords] = 1
# # erosion = cv2.erode(I1,kernel,iterations = 1)
# # # io.imshow(erosion)
# # # # kernel = np.ones((40,40),np.uint8)
# # # # erosion = cv2.erode(I1,kernel,iterations = 1)
# # # # io.imshow(erosion)
# # # io.imsave(fname='temp_output.png', arr=erosion)
# # from skimage.morphology import reconstruction
# # fill = reconstruction(I1, erosion, method='erosion')
# # # io.imshow(fill)
# # # io.imsave(fname='temp_output.png', arr=fill)
# # dilation = cv2.dilate(fill,kernel,iterations = 1)
# # # io.imshow(dilation)
# # io.imsave(fname='temp_output.png', arr=dilation) | 27.925373 | 120 | 0.69829 |
6e2d9335521cea1ce24ba509b262882641d75542 | 1,344 | py | Python | test/unit/messages/bloxroute/test_txs_message.py | dolphinridercrypto/bxcommon | 8f70557c1dbff785a5dd3fcdf91176066e085c3a | [
"MIT"
] | 12 | 2019-11-06T17:39:10.000Z | 2022-03-01T11:26:19.000Z | test/unit/messages/bloxroute/test_txs_message.py | dolphinridercrypto/bxcommon | 8f70557c1dbff785a5dd3fcdf91176066e085c3a | [
"MIT"
] | 8 | 2019-11-06T21:31:11.000Z | 2021-06-02T00:46:50.000Z | test/unit/messages/bloxroute/test_txs_message.py | dolphinridercrypto/bxcommon | 8f70557c1dbff785a5dd3fcdf91176066e085c3a | [
"MIT"
] | 5 | 2019-11-14T18:08:11.000Z | 2022-02-08T09:36:22.000Z | from bxcommon.test_utils.abstract_test_case import AbstractTestCase
from bxcommon.messages.bloxroute.txs_message import TxsMessage
from bxcommon.models.transaction_info import TransactionInfo
from bxcommon.test_utils import helpers
from bxcommon.utils.object_hash import Sha256Hash
| 38.4 | 110 | 0.738095 |
6e2e387eef5e879a3d06801f9f8eb44b9b39bb68 | 712 | py | Python | CursoEmVideo/Aula16 - Tuplas.py | caique-santana/CursoEmVideo-Curso_Python3 | 86bb67bbbf348544e1135d8657672d4e33fa70e2 | [
"MIT"
] | 1 | 2020-04-15T00:49:02.000Z | 2020-04-15T00:49:02.000Z | CursoEmVideo/Aula16 - Tuplas.py | caique-santana/CursoEmVideo-Curso_Python3 | 86bb67bbbf348544e1135d8657672d4e33fa70e2 | [
"MIT"
] | null | null | null | CursoEmVideo/Aula16 - Tuplas.py | caique-santana/CursoEmVideo-Curso_Python3 | 86bb67bbbf348544e1135d8657672d4e33fa70e2 | [
"MIT"
] | null | null | null | lanche = ('Hambrguer', 'Suco', 'Pizza', 'Pudim', 'Batata Frita')
# Tuplas so imutveis
# lanche[1] = 'Refrigerante' - Esse comando no vai funcionar
print(len(lanche))
print(sorted(lanche))
print(lanche)
print(lanche[-3:])
for comida in lanche:
print(f'Eu vou comer {comida}')
for cont in range(0, len(lanche)):
print(f'Eu vou comer {lanche[cont]} na posio {cont}')
for pos, comida in enumerate(lanche):
print(f'Eu Vou comer {comida} na posio {pos}')
print('Comi pra caramba!')
a = (2, 5, 4)
b = (5, 8, 1, 2)
c = b + a
print(c)
print(c.index(5, 1))
print(f'o tamanho de "c" {len(c)}')
print(f'Tem {c.count(5)} nmeros 5')
pessoa = ('Gustavo', 39, 'M', 99.88)
del(pessoa)
print(pessoa)
| 22.25 | 65 | 0.644663 |
6e2ec7ad4cbde5fb55995e9127da176c9b74eb60 | 167 | py | Python | app/config.py | akabbeke/sd44_server | 7755567c7b273a5ac23b2aacc52477dd4a11d290 | [
"MIT"
] | null | null | null | app/config.py | akabbeke/sd44_server | 7755567c7b273a5ac23b2aacc52477dd4a11d290 | [
"MIT"
] | null | null | null | app/config.py | akabbeke/sd44_server | 7755567c7b273a5ac23b2aacc52477dd4a11d290 | [
"MIT"
] | null | null | null | import yaml
import os
config_file = os.path.join(os.path.dirname(__file__), "config/config.yml")
with open(config_file, 'r') as stream:
CONFIG = yaml.load(stream) | 27.833333 | 74 | 0.736527 |
6e2f62475e9654f761ab72ca7f65f8bb7603adef | 921 | py | Python | python/projects/jenkins_config_xml_parser/main.py | zhaoace/codecraft | bf06267e86bd7386714911b0df4aa0ca0a91d882 | [
"Unlicense"
] | null | null | null | python/projects/jenkins_config_xml_parser/main.py | zhaoace/codecraft | bf06267e86bd7386714911b0df4aa0ca0a91d882 | [
"Unlicense"
] | null | null | null | python/projects/jenkins_config_xml_parser/main.py | zhaoace/codecraft | bf06267e86bd7386714911b0df4aa0ca0a91d882 | [
"Unlicense"
] | null | null | null | import xml.etree.ElementTree as ET
tree = ET.parse('/Users/zhaoli/workspace/splunk/playground/var/lib/jenkins/jobs/Splunk/jobs/develop/jobs/platform/jobs/cli/jobs/trigger_cli_linux/config.xml')
root = tree.getroot()
# SPs = root.findall("properties/hudson.model.ParametersDefinitionProperty/parameterDefinitions/[hudson.model.StringParameterDefinition]")
SPs = root.findall("properties/hudson.model.ParametersDefinitionProperty/parameterDefinitions/hudson.model.StringParameterDefinition/[name='branch']")
print "***"
print dir(SPs)
print "***"
for s in SPs:
print "-----"
# print s.tag, ":", s.text
ET.dump(s)
spd = ET.Element("hudson.model.StringParameterDefinition")
name = ET.SubElement(spd, 'name')
name.text="version"
description=ET.SubElement(spd, 'description')
description.text="The product version"
defaultValue=ET.SubElement(spd, 'defaultValue')
defaultValue.text=""
ET.dump(spd)
tree. | 27.909091 | 158 | 0.761129 |
6e2fe086028f0377c018ceee95df734b7ae1f811 | 986 | py | Python | BLAST/make_fasta.py | cdiaza/bootcamp | 2fda661a44930f70ac8ef15218cc99d099fc4019 | [
"MIT"
] | 1 | 2021-01-16T20:39:41.000Z | 2021-01-16T20:39:41.000Z | BLAST/make_fasta.py | cdiaza/bootcamp | 2fda661a44930f70ac8ef15218cc99d099fc4019 | [
"MIT"
] | null | null | null | BLAST/make_fasta.py | cdiaza/bootcamp | 2fda661a44930f70ac8ef15218cc99d099fc4019 | [
"MIT"
] | 1 | 2021-01-16T20:31:17.000Z | 2021-01-16T20:31:17.000Z | import random
def format_fasta(title, sequence):
"""
This formats a fasta sequence
Input:
title - String - Title of the sequence
sequence - String - Actual sequence
Output:
String - Fully formatted fasta sequence
"""
fasta_width = 70 # Number of characters in one line
n_lines = 1 + len(sequence) // fasta_width # Number of lines
lines = [ sequence[i*fasta_width: (i+1)*fasta_width] for i in range(n_lines)]
lines = "\n".join(lines)
formatted = f"> {title}\n{lines}\n\n"
return formatted
bases = "actg" # Bases for our randon protein
# Writing random sequences in a file
with open("random_sequences.fa", "w") as f:
for length in range(1, 25): # Sequences of different lengths
for run in range(10): # Trying several times
title = f"length_{length} run_{run}"
sequence = "".join(random.choices(bases, k=length))
f.write(format_fasta(title, sequence))
| 29.878788 | 81 | 0.631846 |
6e3054f23fea6a6c7c56f18a768f57df2c3c07ac | 1,604 | py | Python | unittesting/utils/output_panel.py | guillermooo/UnitTesting | 04802c56d5ccea44043a241050d6fe331c6ff694 | [
"MIT"
] | null | null | null | unittesting/utils/output_panel.py | guillermooo/UnitTesting | 04802c56d5ccea44043a241050d6fe331c6ff694 | [
"MIT"
] | null | null | null | unittesting/utils/output_panel.py | guillermooo/UnitTesting | 04802c56d5ccea44043a241050d6fe331c6ff694 | [
"MIT"
] | null | null | null | import sublime
import os
| 32.08 | 79 | 0.639651 |
6e3246c7687554b238139dfec4bd2b58d1c2ba17 | 673 | py | Python | main.py | jon-choi/hillsbarber | 346e9cbe5de7c5bf8a9136e71981b058323784a1 | [
"Apache-2.0"
] | null | null | null | main.py | jon-choi/hillsbarber | 346e9cbe5de7c5bf8a9136e71981b058323784a1 | [
"Apache-2.0"
] | null | null | null | main.py | jon-choi/hillsbarber | 346e9cbe5de7c5bf8a9136e71981b058323784a1 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, render_template
app = Flask(__name__)
app.config['DEBUG'] = True
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
# return render_template('bootstrap_cover.html', name=name)
# @app.route('/rates')
# def helloRates(name='rates'):
# return render_template('template.html',name=name)
| 29.26087 | 76 | 0.708767 |
6e330bec332cbcb5e47190df3547281fe5168a28 | 903 | py | Python | tests/test_echo_server_contextvar.py | rednafi/think-async | 3642afc0d8661b10affd953ce3b239f3e6b3009b | [
"MIT"
] | 87 | 2021-04-14T09:51:30.000Z | 2022-03-24T10:38:41.000Z | tests/test_echo_server_contextvar.py | rednafi/think-async | 3642afc0d8661b10affd953ce3b239f3e6b3009b | [
"MIT"
] | 3 | 2021-06-27T18:06:11.000Z | 2022-03-24T19:56:38.000Z | tests/test_echo_server_contextvar.py | rednafi/think-async | 3642afc0d8661b10affd953ce3b239f3e6b3009b | [
"MIT"
] | 4 | 2021-05-12T01:36:14.000Z | 2022-01-28T04:06:12.000Z | from unittest.mock import Mock, patch
import pytest
import patterns.echo_server_contextvar as main
| 25.8 | 77 | 0.743079 |
6e3355f7d36e6d39cee7c23d5acd90666f7629a8 | 693 | py | Python | test.py | riquedev/SSLProxies24Feed | 93ab23a6794ae7f40002eb464a9c443afe44db86 | [
"MIT"
] | null | null | null | test.py | riquedev/SSLProxies24Feed | 93ab23a6794ae7f40002eb464a9c443afe44db86 | [
"MIT"
] | 1 | 2017-09-15T13:27:09.000Z | 2017-09-15T14:43:28.000Z | test.py | riquedev/SSLProxies24Feed | 93ab23a6794ae7f40002eb464a9c443afe44db86 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Autor: rique_dev (rique_dev@hotmail.com)
from SSLProxies24.Feed import Feed
from SSLProxies24.Check import CheckProxy
import time
import gc
# Recupera a listagem
prx = Feed().PROXY_LIST
# Inicia classe
chk = CheckProxy()
# Comea validao
chk.validatelist(prx)
# Ativa garbage
gc.enable()
time.sleep(30)
# Contagem
print('Sucesso: '+str(chk.getsucesscount()))
print('Falhas: '+str(chk.getfailcount()))
print('Total de Proxys: '+str(chk.getproxycount()))
print('Restam: '+str(chk.getproxycount()-(chk.getsucesscount()+chk.getfailcount())))
# Lista de Proxys
print(chk.getproxylist())
del prx
del chk
print('Classes eliminadas.')
exit(0) | 19.25 | 84 | 0.730159 |
6e33da3d320ddccf5c2863568bc4b5fb0505e125 | 577 | py | Python | euler.py | user3719431/tna_lab1 | 183c34d927c39f502fea7d6a81f2945104d7b75b | [
"MIT"
] | null | null | null | euler.py | user3719431/tna_lab1 | 183c34d927c39f502fea7d6a81f2945104d7b75b | [
"MIT"
] | null | null | null | euler.py | user3719431/tna_lab1 | 183c34d927c39f502fea7d6a81f2945104d7b75b | [
"MIT"
] | null | null | null | import math as m
| 24.041667 | 60 | 0.363951 |
6e34180a8de5ed1a630ffd86a9a830130bbd1076 | 3,787 | py | Python | src/b2d/hud_b2d.py | VgTajdd/neuroevolver | 248c96b25ad936e15cfffc7a4223926db83ad540 | [
"MIT"
] | null | null | null | src/b2d/hud_b2d.py | VgTajdd/neuroevolver | 248c96b25ad936e15cfffc7a4223926db83ad540 | [
"MIT"
] | null | null | null | src/b2d/hud_b2d.py | VgTajdd/neuroevolver | 248c96b25ad936e15cfffc7a4223926db83ad540 | [
"MIT"
] | null | null | null | ## ========================================================================= ##
## Copyright (c) 2019 Agustin Durand Diaz. ##
## This code is licensed under the MIT license. ##
## hud_b2d.py ##
## ========================================================================= ##
from core.hud_base import HudBase
from enums import ScreenType, SimulationType
from core.utils import getPathWithoutExtension, existsFile, getImageSize
import settings
| 44.034884 | 108 | 0.520201 |
6e358277ee18f33ce73fddfacb850dc985cb0977 | 1,958 | py | Python | grblc/search/gcn/parser/combine.py | youngsm/adsgrb | a89b56b371888deb67788a9f5a91300b281784a6 | [
"MIT"
] | null | null | null | grblc/search/gcn/parser/combine.py | youngsm/adsgrb | a89b56b371888deb67788a9f5a91300b281784a6 | [
"MIT"
] | null | null | null | grblc/search/gcn/parser/combine.py | youngsm/adsgrb | a89b56b371888deb67788a9f5a91300b281784a6 | [
"MIT"
] | null | null | null | def get_final_txt(grb, tables, sentences, output_path):
"""
Combine the data from [grb]_final_sentences.txt and [grb]_final_tables.txt.
If a piece of data in tables and another piece in sentecnes are originially
from the same GCN. Put them in the same GCN in [grb]_final.txt.
"""
# Avoid modifying the data for the later use.
tables = tables.copy()
sentences = sentences.copy()
# Open up the file.
file = open(f"{output_path}{grb}/{grb}_final.txt", 'w')
# Loop through the sentences and for each sentence, check if there is any table
# that are originially from the same GCN.
for sentence in sentences:
# The number of the GCN.
num = sentence['number']
# The final string that we dumps into the text file.
result = "=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=\n\n"
result += f"GCN Number: {sentence['number']}\n\n"
result += f"SENTENCE DATA:\n\n{sentence['sentences']}\n\n"
# The variable to help check how many tables are from the same GCN.
table_with_the_same_number = 0
# Loop through the tables to see if there are any tables in the same GCN.
for idx, table in enumerate(tables):
# If we find any tables in the same GCN.
if table['number'] == num:
if table_with_the_same_number == 0:
result += "TABLE DATA:\n\n"
table_with_the_same_number += 1
result += '\n'.join(table['table']) + '\n\n'
tables.pop(idx)
file.write(result)
# Write the remaining tables to the text file.
for table in tables:
result = "=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=\n\n"
result += f"GCN Number: {table['number']}\n"
result += "TABLE DATA:\n\n" + '\n'.join(table['table']) + '\n\n'
file.write(result)
| 36.943396 | 88 | 0.550051 |
6e35f3a7bd64997a4e302cd1d8e7454d8298b774 | 972 | py | Python | hardware/headband.py | davidji/roundbot | 2ca34a83c9feb3331f1b818106f06b3182c4970e | [
"Apache-2.0"
] | null | null | null | hardware/headband.py | davidji/roundbot | 2ca34a83c9feb3331f1b818106f06b3182c4970e | [
"Apache-2.0"
] | null | null | null | hardware/headband.py | davidji/roundbot | 2ca34a83c9feb3331f1b818106f06b3182c4970e | [
"Apache-2.0"
] | null | null | null | from solid import *
from solid.utils import *
import util
from util import *
from math import pi
if __name__ == '__main__':
export_scad()
| 31.354839 | 123 | 0.588477 |
6e362218fdee0a3ed3f2a33dd6f1acddc1fd9111 | 106 | py | Python | native_shortuuid/apps.py | foundertherapy/django-nativeshortuuidfield | 47e5a5d5c0f4caedbadb88ed6ac279f513ae522a | [
"MIT"
] | 5 | 2020-09-30T00:21:05.000Z | 2022-01-10T08:56:47.000Z | native_shortuuid/apps.py | foundertherapy/django-nativeshortuuidfield | 47e5a5d5c0f4caedbadb88ed6ac279f513ae522a | [
"MIT"
] | 1 | 2020-03-11T15:39:44.000Z | 2020-03-11T15:39:44.000Z | native_shortuuid/apps.py | foundertherapy/django-nativeshortuuidfield | 47e5a5d5c0f4caedbadb88ed6ac279f513ae522a | [
"MIT"
] | 1 | 2021-03-03T12:49:52.000Z | 2021-03-03T12:49:52.000Z | from django.apps import AppConfig
| 17.666667 | 39 | 0.792453 |
6e364089d40bdc8f90fe2c5aa5081ef11b937f59 | 3,482 | py | Python | climlab/dynamics/meridional_advection_diffusion.py | nfeldl/climlab | 2cabb49e2c3f54c1795f24338ef5ee44e49fc7e7 | [
"BSD-3-Clause",
"MIT"
] | 160 | 2015-02-25T15:56:37.000Z | 2022-03-14T23:51:23.000Z | climlab/dynamics/meridional_advection_diffusion.py | nfeldl/climlab | 2cabb49e2c3f54c1795f24338ef5ee44e49fc7e7 | [
"BSD-3-Clause",
"MIT"
] | 137 | 2015-12-18T17:39:31.000Z | 2022-02-04T20:50:53.000Z | climlab/dynamics/meridional_advection_diffusion.py | nfeldl/climlab | 2cabb49e2c3f54c1795f24338ef5ee44e49fc7e7 | [
"BSD-3-Clause",
"MIT"
] | 54 | 2015-04-28T05:57:39.000Z | 2022-02-17T08:15:11.000Z | r"""General solver of the 1D meridional advection-diffusion equation on the sphere:
.. math::
\frac{\partial}{\partial t} \psi(\phi,t) &= -\frac{1}{a \cos\phi} \frac{\partial}{\partial \phi} \left[ \cos\phi ~ F(\phi,t) \right] \\
F &= U(\phi) \psi(\phi) -\frac{K(\phi)}{a} ~ \frac{\partial \psi}{\partial \phi}
for a state variable :math:`\psi(\phi,t)`, arbitrary diffusivity :math:`K(\phi)`
in units of :math:`x^2 ~ t^{-1}`, and advecting velocity :math:`U(\phi)`.
:math:`\phi` is latitude and :math:`a` is the Earth's radius (in meters).
:math:`K` and :math:`U` can be scalars,
or optionally vector *specified at grid cell boundaries*
(so their lengths must be exactly 1 greater than the length of :math:`\phi`).
:math:`K` and :math:`U` can be modified by the user at any time
(e.g., after each timestep, if they depend on other state variables).
A fully implicit timestep is used for computational efficiency. Thus the computed
tendency :math:`\frac{\partial \psi}{\partial t}` will depend on the timestep.
In addition to the tendency over the implicit timestep,
the solver also calculates several diagnostics from the updated state:
- ``diffusive_flux`` given by :math:`-\frac{K(\phi)}{a} ~ \frac{\partial \psi}{\partial \phi}` in units of :math:`[\psi]~[x]`/s
- ``advective_flux`` given by :math:`U(\phi) \psi(\phi)` (same units)
- ``total_flux``, the sum of advective, diffusive and prescribed fluxes
- ``flux_convergence`` (or instantanous scalar tendency) given by the right hand side of the first equation above, in units of :math:`[\psi]`/s
Non-uniform grid spacing is supported.
The state variable :math:`\psi` may be multi-dimensional, but the diffusion
will operate along the latitude dimension only.
"""
from __future__ import division
import numpy as np
from .advection_diffusion import AdvectionDiffusion, Diffusion
from climlab import constants as const
| 42.463415 | 143 | 0.661401 |
6e369cedee85dd513db727dff183f7bdbc8263b5 | 1,624 | py | Python | gnes/service/grpc.py | micro-pixel/gnes | 388d1ba718ec04eedaaff3ce34da43689c197ee7 | [
"Apache-2.0"
] | 1 | 2019-10-23T03:41:57.000Z | 2019-10-23T03:41:57.000Z | gnes/service/grpc.py | cmy9068/gnes | 44a54be4c80108ac65b2450b4af8deded6da3339 | [
"Apache-2.0"
] | null | null | null | gnes/service/grpc.py | cmy9068/gnes | 44a54be4c80108ac65b2450b4af8deded6da3339 | [
"Apache-2.0"
] | 1 | 2020-10-28T15:07:36.000Z | 2020-10-28T15:07:36.000Z | # Tencent is pleased to support the open source community by making GNES available.
#
# Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
from .base import BaseService as BS, MessageHandler
from ..helper import PathImporter
from ..proto import gnes_pb2
| 36.088889 | 86 | 0.711823 |
6e37060290900c339e29bf4d74171d48cbea8c69 | 3,508 | py | Python | dhost/logs/models.py | dhost-project/dhost | ca6a4a76a737174b24165e20edeb1d1019a9424b | [
"MIT"
] | null | null | null | dhost/logs/models.py | dhost-project/dhost | ca6a4a76a737174b24165e20edeb1d1019a9424b | [
"MIT"
] | 67 | 2021-07-06T11:50:25.000Z | 2021-10-14T13:45:51.000Z | dhost/logs/models.py | dhost-project/dhost | ca6a4a76a737174b24165e20edeb1d1019a9424b | [
"MIT"
] | null | null | null | import uuid
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils import timezone
from django.utils.timesince import timesince
from django.utils.translation import gettext_lazy as _
from dhost.dapps.models import Dapp
| 35.434343 | 79 | 0.679019 |
6e397c403213c314186ad9c8dc4d66123671cfea | 620 | py | Python | Day14/main.py | dloibl/AOC2021 | 80672a7ee8ebc1a7970c155e4e15e0ed2351e085 | [
"MIT"
] | null | null | null | Day14/main.py | dloibl/AOC2021 | 80672a7ee8ebc1a7970c155e4e15e0ed2351e085 | [
"MIT"
] | null | null | null | Day14/main.py | dloibl/AOC2021 | 80672a7ee8ebc1a7970c155e4e15e0ed2351e085 | [
"MIT"
] | null | null | null | data = open("input.txt", "r").readlines()
polymer = data[0]
pair_insertion = {}
for line in data[2:]:
[token, replacement] = line.strip().split(" -> ")
pair_insertion[token] = replacement
result = [i for i in polymer.strip()]
for step in range(0, 10):
next = []
for i, si in enumerate(result):
if i < len(result)-1:
next.append(si)
next.append(pair_insertion[result[i]+result[i+1]])
else:
next.append(si)
result = next
count = [result.count(a) for a in set(pair_insertion.values())]
print("The answer of part 1 is", max(count) - min(count))
| 23.846154 | 63 | 0.596774 |
6e399f9876b8a0c8affd85f404dc546dcab1961f | 1,199 | py | Python | raster/migrations/0006_auto_20141016_0522.py | bpneumann/django-raster | 74daf9d396f2332a2cd83723b7330e6b10d73b1c | [
"BSD-3-Clause"
] | null | null | null | raster/migrations/0006_auto_20141016_0522.py | bpneumann/django-raster | 74daf9d396f2332a2cd83723b7330e6b10d73b1c | [
"BSD-3-Clause"
] | null | null | null | raster/migrations/0006_auto_20141016_0522.py | bpneumann/django-raster | 74daf9d396f2332a2cd83723b7330e6b10d73b1c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
| 31.552632 | 236 | 0.539616 |
6e3ac431c3e1e4eb2271fa87cec379de652a2355 | 588 | py | Python | tests/tests/test_analysis/test_utils.py | klavinslab/coral | 17f59591211562a59a051f474cd6cecba4829df9 | [
"MIT"
] | 34 | 2015-12-26T22:13:51.000Z | 2021-11-17T11:46:37.000Z | tests/tests/test_analysis/test_utils.py | klavinslab/coral | 17f59591211562a59a051f474cd6cecba4829df9 | [
"MIT"
] | 13 | 2015-09-11T23:27:51.000Z | 2018-06-25T20:44:28.000Z | tests/tests/test_analysis/test_utils.py | klavinslab/coral | 17f59591211562a59a051f474cd6cecba4829df9 | [
"MIT"
] | 14 | 2015-10-08T17:08:48.000Z | 2022-02-22T04:25:54.000Z | '''
Tests for utils submodule of the analysis module.
'''
from nose.tools import assert_equal, assert_raises
from coral import analysis, DNA, RNA, Peptide
| 29.4 | 71 | 0.748299 |
6e3b1af1bee45ddc7a412b33a2fead806c9ec302 | 1,765 | py | Python | djangorecipebook/templating.py | tkhyn/djangorecipebook | 2cbb3d46631630e2c7a3c511b504de2088aac115 | [
"MIT"
] | null | null | null | djangorecipebook/templating.py | tkhyn/djangorecipebook | 2cbb3d46631630e2c7a3c511b504de2088aac115 | [
"MIT"
] | null | null | null | djangorecipebook/templating.py | tkhyn/djangorecipebook | 2cbb3d46631630e2c7a3c511b504de2088aac115 | [
"MIT"
] | null | null | null | """
Carry out template-based replacements in project files
"""
import os
import sys
from string import Template
def replace_name(path, mapping):
"""
Handles replacement strings in the file or directory name
"""
# look for replacement strings in filename
f_split = list(os.path.split(path))
name = f_split[1]
if '${' in name:
new_name = Template(name).substitute(mapping)
new_path = os.path.join(f_split[0], new_name)
os.rename(path, new_path)
else:
new_path = path
return new_path
def replace_ctnt(f, mapping):
"""
Handles replacement strings in the file content
"""
if not os.path.isfile(f):
return
try:
# look for replacement strings in file
t_file = open(f, 'r+')
t = Template(t_file.read())
t_file.seek(0)
t_file.write(t.substitute(mapping))
t_file.truncate()
except Exception as e:
sys.stderr.write("""
ERROR: while running template engine on file %s
""" % f)
raise e
finally:
t_file.close()
def process(path, mapping):
"""
Performs all templating operations on the given path
"""
replace_ctnt(replace_name(path, mapping), mapping)
def process_tree(directory, mapping):
"""
Performs all templating operations on the directory and its children
"""
directory = replace_name(directory, mapping)
for dirpath, dirnames, filenames in os.walk(directory):
for f in filenames:
process(os.path.join(dirpath, f), mapping)
for d in dirnames:
dirnames.remove(d)
dirnames.append(replace_name(os.path.join(dirpath, d), mapping))
| 25.214286 | 77 | 0.607932 |
6e3c23f713b7a54ba361ed5b6913012fed253e5e | 1,747 | py | Python | toHash.py | ElTarget/- | fcf774386514a7f070be25d643be7bbf1a92af1e | [
"MIT"
] | 1 | 2022-02-22T02:39:52.000Z | 2022-02-22T02:39:52.000Z | toHash.py | ElTarget/- | fcf774386514a7f070be25d643be7bbf1a92af1e | [
"MIT"
] | 1 | 2022-03-08T04:46:17.000Z | 2022-03-08T04:46:17.000Z | toHash.py | ElTarget/get_malware_bazaar | fcf774386514a7f070be25d643be7bbf1a92af1e | [
"MIT"
] | null | null | null | import hashlib
import os
# MD5
# SHA256
# MD5
| 23.293333 | 46 | 0.567258 |
6e3d50e4fe09a809ba48df4ba35365fe114afae0 | 609 | py | Python | final/VolleyballClubHouse/backend/Python/fb_post_scraper.py | Sabalone87/wp1092 | 3da36f3f3ae7ebc175bf0b015838de2928b3b5b9 | [
"MIT"
] | null | null | null | final/VolleyballClubHouse/backend/Python/fb_post_scraper.py | Sabalone87/wp1092 | 3da36f3f3ae7ebc175bf0b015838de2928b3b5b9 | [
"MIT"
] | null | null | null | final/VolleyballClubHouse/backend/Python/fb_post_scraper.py | Sabalone87/wp1092 | 3da36f3f3ae7ebc175bf0b015838de2928b3b5b9 | [
"MIT"
] | null | null | null | import os
import sys
from dotenv import load_dotenv
from facebook_scraper import get_posts
load_dotenv()
print ("hi")
result = []
for post in get_posts(group=os.environ.get("FacebookGroupId"), pages=1,
credentials=(os.environ.get("FacebookUser"), os.environ.get("FacebookPassword"))):
result.append({
"post_id": post["post_id"],
"text": post["text"],
"user_id": post["user_id"],
"username": post["username"],
"time": post["time"]
})
print ({ "post_id": post['post_id'] })
# print (post)
print (result)
sys.stdout.flush() | 25.375 | 102 | 0.609195 |
6e3ec2b42c30f989802844d030b6a4725567d1ae | 442 | py | Python | config.py | benperove/oneliner.sh | 0c6eb25f2dd32cdd5cc275ef5849b5e12c76e9db | [
"Apache-2.0"
] | 4 | 2019-02-15T01:35:17.000Z | 2020-07-08T17:47:33.000Z | config.py | benperove/oneliner.sh | 0c6eb25f2dd32cdd5cc275ef5849b5e12c76e9db | [
"Apache-2.0"
] | 1 | 2019-05-24T21:00:37.000Z | 2019-05-24T21:00:37.000Z | config.py | benperove/oneliner.sh | 0c6eb25f2dd32cdd5cc275ef5849b5e12c76e9db | [
"Apache-2.0"
] | 1 | 2020-04-10T08:03:16.000Z | 2020-04-10T08:03:16.000Z | import os
#github login
SITE = 'https://api.github.com'
CALLBACK = 'https://oneliner.sh/oauth2'
AUTHORIZE_URL = 'https://github.com/login/oauth/authorize'
TOKEN_URL = 'https://github.com/login/oauth/access_token'
SCOPE = 'user'
#redis config
REDIS_HOST = os.environ['REDIS_HOST']
#REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
DATA_DIR = 'oneliners'
DEBUG = True
#app
SUBMISSION_PATH = 'incoming'
| 26 | 61 | 0.68552 |
6e3f3c737da2c1c4948a6562ab3459af248d21f6 | 214 | py | Python | npt/utils/__init__.py | chbrandt/gpt-neanias | aa7c2e88972f9af280b7f02ee11170df6c967b55 | [
"MIT"
] | 2 | 2020-09-28T08:22:54.000Z | 2020-09-28T13:17:25.000Z | npt/utils/__init__.py | chbrandt/gpt-neanias | aa7c2e88972f9af280b7f02ee11170df6c967b55 | [
"MIT"
] | null | null | null | npt/utils/__init__.py | chbrandt/gpt-neanias | aa7c2e88972f9af280b7f02ee11170df6c967b55 | [
"MIT"
] | null | null | null | import json
from npt import log
from . import tmpdir
def read_geojson(filename):
"""
Return JSON object from GeoJSON
"""
with open(filename, 'r') as fp:
js = json.load(fp)
return js
| 14.266667 | 35 | 0.621495 |
6e3fe2c168f62972f11479c2284c380956d44257 | 6,351 | py | Python | apps/user/tests/user/test_users_crud.py | magocod/django_chat | 9c7f82a3fdaa7a8f2f34062d8803b4f33f8c07b7 | [
"MIT"
] | 1 | 2019-10-01T01:39:37.000Z | 2019-10-01T01:39:37.000Z | apps/user/tests/user/test_users_crud.py | magocod/django_chat | 9c7f82a3fdaa7a8f2f34062d8803b4f33f8c07b7 | [
"MIT"
] | 18 | 2019-12-14T15:09:56.000Z | 2022-01-02T16:22:41.000Z | apps/user/tests/user/test_users_crud.py | magocod/django_chat | 9c7f82a3fdaa7a8f2f34062d8803b4f33f8c07b7 | [
"MIT"
] | 1 | 2020-02-10T18:00:16.000Z | 2020-02-10T18:00:16.000Z | """
Prueba creacion de usuarios
"""
# import json
from typing import Any, Dict
import pytest
from django.contrib.auth import get_user_model
from apps.user.serializers import UserHeavySerializer
# from django.contrib.auth.models import User
User = get_user_model()
pytestmark = [pytest.mark.django_db, pytest.mark.users_views]
| 26.135802 | 79 | 0.623366 |
6e4153ef83e21bf087ec6ed89dceeb002c6fc185 | 319 | py | Python | examples/pybullet/examples/signedDistanceField.py | frk2/bullet3 | 225d823e4dc3f952c6c39920c3f87390383e0602 | [
"Zlib"
] | 27 | 2018-05-21T14:28:10.000Z | 2021-12-31T03:12:35.000Z | examples/pybullet/examples/signedDistanceField.py | frk2/bullet3 | 225d823e4dc3f952c6c39920c3f87390383e0602 | [
"Zlib"
] | 1 | 2018-11-19T19:07:47.000Z | 2018-11-19T19:07:47.000Z | examples/pybullet/examples/signedDistanceField.py | frk2/bullet3 | 225d823e4dc3f952c6c39920c3f87390383e0602 | [
"Zlib"
] | 13 | 2019-11-08T12:48:44.000Z | 2022-01-04T04:13:33.000Z | import pybullet as p
import pybullet
import time
p.connect(p.GUI)
p.loadURDF("toys/concave_box.urdf")
p.setGravity(0,0,-10)
for i in range (10):
p.loadURDF("sphere_1cm.urdf",[i*0.02,0,0.5])
p.loadURDF("duck_vhacd.urdf")
timeStep = 1./240.
p.setTimeStep(timeStep)
while (1):
p.stepSimulation()
time.sleep(timeStep)
| 21.266667 | 45 | 0.727273 |
6e415d21c97c8bf5b7c0199061ba4f235f80c0f3 | 2,472 | py | Python | Old/TitleTable.py | StephanM87/Sofie-Herrmann-Praktikum | 3fa7e715061e35aade8eb93756c30ebf10971059 | [
"MIT"
] | null | null | null | Old/TitleTable.py | StephanM87/Sofie-Herrmann-Praktikum | 3fa7e715061e35aade8eb93756c30ebf10971059 | [
"MIT"
] | 2 | 2021-10-04T08:22:40.000Z | 2021-10-05T13:30:02.000Z | Old/TitleTable.py | StephanM87/Sofie-Herrmann-Praktikum | 3fa7e715061e35aade8eb93756c30ebf10971059 | [
"MIT"
] | null | null | null | from pylatex import Document, Tabular, Section, NoEscape, Command, MultiRow
from Old.BioCatHubDatenmodell import DataModel
first_name = "some firstname"
last_name = "some lastname"
e_mail = "some@adress.com"
institution = "some institution"
vessel_type = "some vessel"
volume = int(42)
vol_unit = "mol/l"
add_attributes = [{"Sektor": "Kruzifix"}, {"Bereich": "Eisheiligen"}]
temp = int(42)
temp_unit = "C"
ph_value = int(7)
buffer = "some buffer"
doc = PdfLibrary(DataModel)
doc.create_pdf()
| 34.333333 | 76 | 0.552589 |
6e41787cb64edb79c7312a9c056163a1f57400e3 | 535 | py | Python | Lab2/la2_4.py | ThomCruz/ImageAnalysisLab | 6a524696ecf4aab96336931d22ead8e8c9ec9e30 | [
"MIT"
] | null | null | null | Lab2/la2_4.py | ThomCruz/ImageAnalysisLab | 6a524696ecf4aab96336931d22ead8e8c9ec9e30 | [
"MIT"
] | null | null | null | Lab2/la2_4.py | ThomCruz/ImageAnalysisLab | 6a524696ecf4aab96336931d22ead8e8c9ec9e30 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import matplotlib.pyplot as plt
pic = cv2.imread('image2.png',0)
#pic = imageio.imread('img/parrot.jpg')
gray = lambda rgb : np.dot(rgb[... , :3] , [0.299 , 0.587, 0.114])
gray = gray(pic)
'''
log transform
-> s = c*log(1+r)
So, we calculate constant c to estimate s
-> c = (L-1)/log(1+|I_max|)
'''
max_ = np.max(gray)
plt.figure(figsize = (5,5))
plt.imshow(log_transform(), cmap = plt.get_cmap(name = 'gray'))
plt.axis('off');
| 20.576923 | 67 | 0.637383 |
6e41cc5519a39b51f1547eae6ffa40cae08fd9e3 | 493 | py | Python | rabbit_mq_examples/new_task.py | audip/rabbitmq | f151dea427afa2a08a76fcdccf6fb99e6a81380f | [
"Apache-2.0"
] | null | null | null | rabbit_mq_examples/new_task.py | audip/rabbitmq | f151dea427afa2a08a76fcdccf6fb99e6a81380f | [
"Apache-2.0"
] | null | null | null | rabbit_mq_examples/new_task.py | audip/rabbitmq | f151dea427afa2a08a76fcdccf6fb99e6a81380f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True)
message = ''.join(sys.argv[1:]) or 'Hello World!'
for i in range(30):
message = str(i)+' '+i*'.'
channel.basic_publish(exchange='', routing_key='task_queue',body=message,properties=pika.BasicProperties(delivery_mode=2,))
print " [x] Sent " + message
connection.close()
| 25.947368 | 127 | 0.730223 |
6e45ae2f0c35533b4360de6c8858cfc005287327 | 4,100 | py | Python | metafilter/model/__init__.py | exhuma/metafilter | dfbc01877a3020f7fe58b9fda3e14ed073684f25 | [
"BSD-3-Clause"
] | null | null | null | metafilter/model/__init__.py | exhuma/metafilter | dfbc01877a3020f7fe58b9fda3e14ed073684f25 | [
"BSD-3-Clause"
] | null | null | null | metafilter/model/__init__.py | exhuma/metafilter | dfbc01877a3020f7fe58b9fda3e14ed073684f25 | [
"BSD-3-Clause"
] | null | null | null | from ConfigParser import SafeConfigParser
from cStringIO import StringIO
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy import MetaData
from sqlalchemy.orm import sessionmaker
from os.path import sep
from hashlib import md5
from datetime import datetime, timedelta
import re
import logging
import functools
NON_LTREE = re.compile(r'[^a-zA-Z0-9/]')
LOG = logging.getLogger(__name__)
CONFIG = None
metadata = MetaData()
Session = sessionmaker()
def uri_depth(uri):
"determines the depth of a uri"
if not uri:
return 0
if uri.endswith(sep):
uri = uri[0:-1]
return len(uri.split(sep))
def file_md5(path):
"""
Return the MD5 hash of the file
"""
hash = md5()
fptr = open(path, "rb")
chunk = fptr.read(1024)
while chunk:
hash.update(chunk)
chunk = fptr.read(1024)
fptr.close()
return hash.hexdigest()
from metafilter.model.nodes import Node
from metafilter.model.queries import Query
from metafilter.model.tags import Tag
#
# Parse the config file
#
from os.path import join, exists, expanduser
from os import getcwd
paths = [
join(getcwd(), 'config.ini'),
join(expanduser("~"), '.metafilter', 'config.ini'),
join('/', 'etc', 'metafilter', 'config.ini'),
]
for path in paths:
if not exists(path):
continue
LOG.debug('Reading config from %s' % path)
CONFIG = loadconfig(path)
if not CONFIG:
LOG.error('Unable to open config file (search order: %s)' % (', '.join(paths)))
| 26.973684 | 83 | 0.621463 |
6e46d398600e4b5a657c138522f24f0eef1938e9 | 3,067 | py | Python | manager/base.py | monocleface/viewer | 8ab47a9e846bd2716fe0208c34f33565513fc3f6 | [
"Apache-2.0"
] | 6 | 2020-02-28T21:18:16.000Z | 2020-03-13T16:45:57.000Z | manager/base.py | monocleface/viewer | 8ab47a9e846bd2716fe0208c34f33565513fc3f6 | [
"Apache-2.0"
] | 6 | 2020-02-28T12:42:52.000Z | 2020-03-16T03:49:09.000Z | manager/base.py | monocleface/viewer | 8ab47a9e846bd2716fe0208c34f33565513fc3f6 | [
"Apache-2.0"
] | 6 | 2020-03-05T13:04:25.000Z | 2020-03-13T16:46:03.000Z | from pathlib import Path
from typing import Union
import yaml
| 29.209524 | 103 | 0.538637 |
6e486d2de9698c2208f5c29100b107e8de344209 | 307 | py | Python | 007 - Intro List Comprehension.py/016 - Maior.py | rodrigoviannini/meus_Primeiros_Codigos | 828dec1c4ce06889efd491145e631c30a45e858f | [
"MIT"
] | 2 | 2021-07-22T23:26:54.000Z | 2021-07-22T23:27:27.000Z | 007 - Intro List Comprehension.py/016 - Maior.py | rodrigoviannini/meus_Primeiros_Codigos | 828dec1c4ce06889efd491145e631c30a45e858f | [
"MIT"
] | null | null | null | 007 - Intro List Comprehension.py/016 - Maior.py | rodrigoviannini/meus_Primeiros_Codigos | 828dec1c4ce06889efd491145e631c30a45e858f | [
"MIT"
] | null | null | null | """
List Comprehension Aninhada
OBJ: Encontrar o maior ou os maiores nmeros de uma lista e imprimir outra lista
"""
listaGenerica = [1, 2, 3, 4, 1, 2, 3, 4, 10, 10, 10, 5, 3, -4]
listaMaior = [x for x in listaGenerica if not False in [True if x >= y else False for y in listaGenerica]]
print(listaMaior) | 30.7 | 106 | 0.693811 |
6e487df26dabde97ea3f1c6bd9a631bd068d4b7f | 357 | py | Python | thehardway/practice3.py | sunquan9301/pythonLearn | f10760a4e32c3ac267e39d835c08f45800d081b6 | [
"Apache-2.0"
] | null | null | null | thehardway/practice3.py | sunquan9301/pythonLearn | f10760a4e32c3ac267e39d835c08f45800d081b6 | [
"Apache-2.0"
] | null | null | null | thehardway/practice3.py | sunquan9301/pythonLearn | f10760a4e32c3ac267e39d835c08f45800d081b6 | [
"Apache-2.0"
] | null | null | null |
if __name__ == '__main__':
main()
| 17 | 37 | 0.535014 |
6e4b454f9d9a661e964992d4f53efcc35fd88de8 | 651 | py | Python | ipt/td1/3.3-nbracines.py | lucas8/MPSI | edefa2155071910d95633acf87b9f3a9d34f67d3 | [
"MIT"
] | null | null | null | ipt/td1/3.3-nbracines.py | lucas8/MPSI | edefa2155071910d95633acf87b9f3a9d34f67d3 | [
"MIT"
] | null | null | null | ipt/td1/3.3-nbracines.py | lucas8/MPSI | edefa2155071910d95633acf87b9f3a9d34f67d3 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
a = float(input("Entrez le coefficient dominant du trinome : "))
b = float(input("Entrez le coefficient d'ordre 1 du trinome : "))
c = float(input("Entrez la constante du trinome : "))
nbracines(a, b, c)
nbracines(0, 3, 1)
nbracines(1, 0.2, 0.01)
| 28.304348 | 140 | 0.537634 |