text
stringlengths 0
1.05M
| meta
dict |
---|---|
"""2D and 3D vector classes.
These are used to represent points in 2D and 3D, as well as directions for translations.
"""
from typing import Any, Iterable, Iterator, List, Optional, Sized, Tuple, Union # noqa
import numpy as np
from six.moves import zip
if False:
from .polygons import Polygon3D # noqa
class Vector2D(Sized, Iterable):
"""Two dimensional point."""
def __init__(self, *args):
# type: (*Any) -> None
self.args = list(args)
self.x = float(args[0])
self.y = float(args[1])
self.z = 0.0
def __iter__(self):
# type: () -> Iterator
return (i for i in self.args)
def __repr__(self):
# type: () -> str
class_name = type(self).__name__
return "{}({!r}, {!r})".format(class_name, *self.args)
def __eq__(self, other):
for a, b in zip(self, other):
if a != b:
return False
return True
def __sub__(self, other):
# type: (Any) -> Union[Vector2D, Vector3D]
return self.__class__(*[self[i] - other[i] for i in range(len(self))])
def __add__(self, other):
# type: (Any) -> Union[Vector2D, Vector3D]
return self.__class__(*[self[i] + other[i] for i in range(len(self))])
def __neg__(self):
# type: () -> Union[Vector2D, Vector3D]
return self.__class__(*inverse_vector(self))
def __len__(self):
# type: () -> int
return len(self.args)
def __getitem__(self, key):
# type: (Union[int, slice]) -> Union[Any, List[Any]]
return self.args[key]
def __setitem__(self, key, value):
self.args[key] = value
def __hash__(self):
return hash(self.x) ^ hash(self.y)
def dot(self, other):
# type: (Vector3D) -> np.float64
return np.dot(self, other)
def cross(self, other):
# type: (Union[Vector2D, Vector3D]) -> np.ndarray
return np.cross(self, other)
@property
def length(self):
# type: () -> float
"""The length of a vector."""
length = sum(x ** 2 for x in self.args) ** 0.5
return length
def closest(self, poly):
# type: (Polygon3D) -> Optional[Any]
"""Find the closest vector in a polygon.
:param poly: Polygon or Polygon3D
"""
min_d = float("inf")
closest_pt = None
for pt2 in poly:
direction = self - pt2
sq_d = sum(x ** 2 for x in direction)
if sq_d < min_d:
min_d = sq_d
closest_pt = pt2
return closest_pt
def normalize(self):
# type: () -> Union[Vector2D, Vector3D]
return self.set_length(1.0)
def set_length(self, new_length):
# type: (float) -> Union[Vector2D, Vector3D]
current_length = self.length
multiplier = new_length / current_length
self.args = [i * multiplier for i in self.args]
return self
def invert(self):
# type: () -> Union[Vector2D, Vector3D]
return -self
def as_array(self, dims=3):
# type: (Union[Vector2D, Vector3D], int) -> np.ndarray
"""Convert a point to a numpy array.
Converts a Vector3D to a numpy.array([x,y,z]) or a Vector2D to a numpy.array([x,y]).
Ensures all values are floats since some other types cause problems in pyclipper (notably where sympy.Zero is
used to represent 0.0).
:param pt: The point to convert.
:param dims: Number of dimensions {default : 3}.
:returns: Vector as a Numpy array.
"""
# handle Vector3D
if dims == 3:
return np.array([float(self.x), float(self.y), float(self.z)])
# handle Vector2D
elif dims == 2:
return np.array([float(self.x), float(self.y)])
else:
raise ValueError("%s-dimensional vectors are not supported." % dims)
def as_tuple(self, dims=3):
# type: (Union[Vector2D, Vector3D, int]) -> Union[Tuple[float, float], Tuple[float, float, float]]
"""Convert a point to a numpy array.
Convert a Vector3D to an (x,y,z) tuple or a Vector2D to an (x,y) tuple.
Ensures all values are floats since some other types cause problems in pyclipper (notably where sympy.Zero is
used to represent 0.0).
:param pt: The point to convert.
:param dims: Number of dimensions {default : 3}.
:returns: Vector as a tuple.
"""
# handle Vector3D
if dims == 3:
return float(self.x), float(self.y), float(self.z)
# handle Vector2D
elif dims == 2:
return float(self.x), float(self.y)
else:
raise ValueError("%s-dimensional vectors are not supported." % dims)
def relative_distance(self, v2):
# type: (Vector3D) -> float
"""A distance function for sorting vectors by distance.
This only provides relative distance, not actual distance since we only use it for sorting.
:param v2: Another vector.
:returns: Relative distance between two point vectors.
"""
direction = self - v2
return sum(x ** 2 for x in direction)
class Vector3D(Vector2D):
"""Three dimensional point."""
def __init__(
self,
x, # type: Union[float, np.float64]
y, # type: Union[float, np.float64]
z=0, # type: Union[float, np.float64]
):
# type: (...) -> None
super(Vector3D, self).__init__(x, y, z)
self.z = float(z)
self.args = [self.x, self.y, self.z]
def __repr__(self):
# type: () -> str
class_name = type(self).__name__
return "{}({!r}, {!r}, {!r})".format(class_name, *self.args)
def __hash__(self):
# type: () -> int
return hash(self.x) ^ hash(self.y) ^ hash(self.z)
def inverse_vector(v):
# type: (Union[Vector2D, Vector3D]) -> List[float]
"""Convert a vector to the same vector but in the opposite direction
:param v: The vector.
:returns: The vector reversed.
"""
return [-i for i in v]
| {
"repo_name": "jamiebull1/geomeppy",
"path": "geomeppy/geom/vectors.py",
"copies": "1",
"size": "6139",
"license": "mit",
"hash": -1382481578784702200,
"line_mean": 29.0931372549,
"line_max": 117,
"alpha_frac": 0.5541619156,
"autogenerated": false,
"ratio": 3.615429917550059,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46695918331500585,
"avg_score": null,
"num_lines": null
} |
## 2. Data cleaning ##
import pandas as pd
columns = ["mpg", "cylinders", "displacement", "horsepower", "weight", "acceleration", "model year", "origin", "car name"]
cars = pd.read_table("auto-mpg.data", delim_whitespace=True, names=columns)
filtered_cars = cars[cars['horsepower']!='?']
filtered_cars['horsepower'] = filtered_cars['horsepower'].astype('float')
## 3. Data Exploration ##
import matplotlib.pyplot as plt
filtered_cars.plot('horsepower', 'mpg', kind='scatter', c='red')
filtered_cars.plot('weight', 'mpg', kind='scatter', c='blue')
plt.show()
## 4. Fitting a model ##
import sklearn
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(filtered_cars[['horsepower']],filtered_cars['mpg'])
predictions = lr.predict(filtered_cars[['horsepower']])
print(predictions[:5])
print(filtered_cars['mpg'].head())
## 5. Plotting the predictions ##
plt.scatter(filtered_cars[['horsepower']],predictions,c = 'b')
plt.scatter(filtered_cars[['horsepower']],filtered_cars['mpg'],c = 'r')
plt.show()
## 6. Error metrics ##
from sklearn.metrics import mean_squared_error
mse = mean_squared_error(filtered_cars['mpg'],predictions)
rmse = mse ** .5 | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Machine learning Beginner/Challenge_ Machine Learning Basics-205.py",
"copies": "1",
"size": "1179",
"license": "mit",
"hash": 7889563487431483000,
"line_mean": 31.7777777778,
"line_max": 122,
"alpha_frac": 0.7048346056,
"autogenerated": false,
"ratio": 3.0703125,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9194683463490853,
"avg_score": 0.01609272842182942,
"num_lines": 36
} |
"""2D backpropagation algorithm"""
import numpy as np
import scipy.ndimage
from . import util
def backpropagate_2d(uSin, angles, res, nm, lD=0, coords=None,
weight_angles=True,
onlyreal=False, padding=True, padval=0,
count=None, max_count=None, verbose=0):
r"""2D backpropagation with the Fourier diffraction theorem
Two-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,z)`
by a dielectric object with refractive index
:math:`n(x,z)`.
This method implements the 2D backpropagation algorithm
:cite:`Mueller2015arxiv`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{1D}}
\left \{
\left| k_\mathrm{Dx} \right|
\frac{\text{FFT}_{\mathrm{1D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}) \right \}
}{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with the forward :math:`\text{FFT}_{\mathrm{1D}}` and inverse
:math:`\text{FFT}^{-1}_{\mathrm{1D}}` 1D fast Fourier transform, the
rotational operator :math:`D_{-\phi_j}`, the angular distance between the
projections :math:`\Delta \phi_0`, the ramp filter in Fourier space
:math:`|k_\mathrm{Dx}|`, and the propagation distance
:math:`(z_{\phi_j}-l_\mathrm{D})`.
Parameters
----------
uSin: (A,N) ndarray
Two-dimensional sinogram of line recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None [(2,M) ndarray]
Computes only the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
.. versionadded:: 0.1.1
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
padval: float
The value used for padding. This is important for the Rytov
approximation, where an approximate zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is `None`, then the edge values are used for
padding (see documentation of :func:`numpy.pad`).
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (N,N), complex if `onlyreal` is `False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
radontea.backproject: backprojection based on the Fourier slice
theorem
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
"""
##
##
# TODO:
# - combine the 2nd filter and the rotation in the for loop
# to save memory. However, memory is not a big issue in 2D.
##
##
A = angles.shape[0]
if max_count is not None:
max_count.value += A + 2
# Check input data
assert len(uSin.shape) == 2, "Input data `uB` must have shape (A,N)!"
assert len(uSin) == A, "`len(angles)` must be equal to `len(uSin)`!"
if coords is not None:
raise NotImplementedError("Output coordinates cannot yet be set " +
+ "for the 2D backrpopagation algorithm.")
# Cut-Off frequency
# km [1/px]
km = (2 * np.pi * nm) / res
# Here, the notation defines
# a wave propagating to the right as:
#
# u0(x) = exp(ikx)
#
# However, in physics usually we use the other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consistent with programs like Meep or our
# scattering script for a dielectric cylinder, we want to use the
# latter sign convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
# Perform weighting
if weight_angles:
weights = util.compute_angle_weights_1d(angles).reshape(-1, 1)
sinogram = uSin * weights
else:
sinogram = uSin
# Size of the input data
ln = sinogram.shape[1]
# We perform padding before performing the Fourier transform.
# This gets rid of artifacts due to false periodicity and also
# speeds up Fourier transforms of the input image size is not
# a power of 2.
order = max(64., 2**np.ceil(np.log(ln * 2.1) / np.log(2)))
if padding:
pad = order - ln
else:
pad = 0
padl = int(np.ceil(pad / 2))
padr = int(pad - padl)
if padval is None:
sino = np.pad(sinogram, ((0, 0), (padl, padr)),
mode="edge")
if verbose > 0:
print("......Padding with edge values.")
else:
sino = np.pad(sinogram, ((0, 0), (padl, padr)),
mode="linear_ramp",
end_values=(padval,))
if verbose > 0:
print("......Verifying padding value: {}".format(padval))
# zero-padded length of sinogram.
lN = sino.shape[1]
# Ask for the filter. Do not include zero (first element).
#
# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]
# - double coverage factor 1/2 already included
# - unitary angular frequency to unitary ordinary frequency
# conversion performed in calculation of UB=FT(uB).
#
# f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor)
# * iint dϕ₀ dkx (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UBϕ₀(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)
#
# (r and s₀ are vectors. In the last term we perform the dot-product)
#
# kₘM = sqrt( kₘ² - kx² )
# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
#
# The filter can be split into two parts
#
# 1) part without dependence on the z-coordinate
#
# -i kₘ / ((2π)^(3/2) a₀)
# * iint dϕ₀ dkx
# * |kx|
# * exp(-i kₘ M lD )
#
# 2) part with dependence of the z-coordinate
#
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# The filter (1) can be performed using the classical filter process
# as in the backprojection algorithm.
#
#
if count is not None:
count.value += 1
# Corresponding sample frequencies
fx = np.fft.fftfreq(lN) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
# Differentials for integral
dphi0 = 2 * np.pi / A
# We will later multiply with phi0.
# a, x
kx = kx.reshape(1, -1)
# Low-pass filter:
# less-than-or-equal would give us zero division error.
filter_klp = (kx**2 < km**2)
# Filter M so there are no nans from the root
M = 1. / km * np.sqrt((km**2 - kx**2) * filter_klp)
prefactor = -1j * km / (2 * np.pi)
prefactor *= dphi0
prefactor *= np.abs(kx) * filter_klp
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
prefactor *= np.exp(-1j * km * (M-1) * lD)
# Perform filtering of the sinogram
projection = np.fft.fft(sino, axis=-1) * prefactor
#
# filter (2) must be applied before rotation as well
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
#
# This filter is effectively an inverse Fourier transform
#
# exp(i kx xD) exp(i kₘ (M - 1) yD )
#
# xD = x cos(ϕ₀) + y sin(ϕ₀)
# yD = - x sin(ϕ₀) + y cos(ϕ₀)
# Everything is in pixels
center = ln / 2.0
x = np.arange(lN) - center + .5
# Meshgrid for output array
yv = x.reshape(-1, 1)
Mp = M.reshape(1, -1)
filter2 = np.exp(1j * yv * km * (Mp - 1)) # .reshape(1,lN,lN)
projection = projection.reshape(A, 1, lN) # * filter2
# Prepare complex output image
if onlyreal:
outarr = np.zeros((ln, ln))
else:
outarr = np.zeros((ln, ln), dtype=np.dtype(complex))
if count is not None:
count.value += 1
# Calculate backpropagations
for i in np.arange(A):
# Create an interpolation object of the projection.
# interpolation of the rotated fourier transformed projection
# this is already tiled onto the entire image.
sino_filtered = np.fft.ifft(projection[i] * filter2, axis=-1)
# Resize filtered sinogram back to original size
sino = sino_filtered[:ln, padl:padl + ln]
rotated_projr = scipy.ndimage.interpolation.rotate(
sino.real, -angles[i] * 180 / np.pi,
reshape=False, mode="constant", cval=0)
# Append results
outarr += rotated_projr
if not onlyreal:
outarr += 1j * scipy.ndimage.interpolation.rotate(
sino.imag, -angles[i] * 180 / np.pi,
reshape=False, mode="constant", cval=0)
if count is not None:
count.value += 1
return outarr
| {
"repo_name": "paulmueller/ODTbrain",
"path": "odtbrain/_alg2d_bpp.py",
"copies": "2",
"size": "11346",
"license": "bsd-3-clause",
"hash": -6058115727231118000,
"line_mean": 33.463190184,
"line_max": 77,
"alpha_frac": 0.5732977303,
"autogenerated": false,
"ratio": 3.28029197080292,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48535897011029194,
"avg_score": null,
"num_lines": null
} |
"""2D canvas style graphics functionality backed by Qt's QGraphicsView."""
from PyQt5 import QtCore, QtGui, QtWidgets
class Canvas(QtWidgets.QGraphicsView):
"""A 2D canvas interface implemented using a QGraphicsView.
This view essentially just holds a QGraphicsScene that grows to fit the
size of the view, keeping the aspect ratio square. The scene is displayed
with a gray (by default) border.
See Qt's documentation for more information about working with
QGraphicsView (https://doc.qt.io/Qt-5/qgraphicsview.html).
"""
scaler = 1
border_width = 0.01
default_border_color = '#444444'
default_bg_color = '#dddddd'
def __init__(self, draw_border=True, bg_color=None, border_color=None,
parent=None, invert_x=False, invert_y=False):
super(Canvas, self).__init__(parent=parent)
if bg_color is None:
bg_color = self.default_bg_color
self.bg_color = bg_color
if border_color is None:
border_color = self.default_border_color
self.border_color = border_color
self.invert_x = invert_x
self.invert_y = invert_y
self._init_scene()
if draw_border:
self._init_border()
def _init_scene(self):
scene = QtWidgets.QGraphicsScene()
# x, y, width, height
scene.setSceneRect(-self.scaler, -self.scaler,
self.scaler*2, self.scaler*2)
self.setScene(scene)
if self.invert_x:
self.scale(-1, 1)
# Qt is positive downward, so invert logic for y inversion
if not self.invert_y:
self.scale(1, -1)
self.setRenderHint(QtGui.QPainter.Antialiasing)
self.setBackgroundBrush(QtGui.QColor(self.bg_color))
def _init_border(self):
rect = self.scene().sceneRect()
pen = QtGui.QPen(QtGui.QColor(self.border_color), self.border_width)
lines = [
QtCore.QLineF(rect.topLeft(), rect.topRight()),
QtCore.QLineF(rect.topLeft(), rect.bottomLeft()),
QtCore.QLineF(rect.topRight(), rect.bottomRight()),
QtCore.QLineF(rect.bottomLeft(), rect.bottomRight())
]
for line in lines:
self.scene().addLine(line, pen)
def add_item(self, item):
"""Add an item to the canvas.
Parameters
----------
item : Item or QGraphicsItem
The item to add to the canvas. This can be either one of AxoPy's
built-in items (:class:`Circle`, :class:`Text`, etc.) or any
QGraphicsItem.
"""
if isinstance(item, Item):
self.scene().addItem(item.qitem)
else:
self.scene().addItem(item)
def resizeEvent(self, event):
# override resize event to keep the scene rect intact (everything
# scales with the window changing size, aspect ratio is preserved)
super().resizeEvent(event)
self.fitInView(self.sceneRect(), QtCore.Qt.KeepAspectRatio)
class Item(object):
"""Canvas item base class.
This is simply a wrapper around any kind of ``QGraphicsItem``, adding the
ability to set some properties of the underlying item with a more Pythonic
API. You can always access the ``QGraphicsItem`` with the ``qitem``
attribute. Once you know what kind of ``QGraphicsItem`` is being wrapped,
you can use the corresponding Qt documentation to make use of more complete
functionality.
Attributes
----------
qitem : QGraphicsItem
The QGraphicsItem being wrapped. You can use this attribute to access
methods and properties of the item not exposed by the wrapper class. If
you find yourself routinely using a method of the QGraphicsItem,
consider recommending it for addition to AxoPy.
"""
def __init__(self, qitem):
self.qitem = qitem
@property
def x(self):
"""X coordinate of the item in the canvas."""
return self.qitem.x()
@x.setter
def x(self, x):
self.qitem.setX(x)
@property
def y(self):
"""Y coordinate of the item in the canvas."""
return self.qitem.y()
@y.setter
def y(self, y):
self.qitem.setY(y)
@property
def pos(self):
"""Both X and Y coordinates of the item in the canvas."""
return self.x, self.y
@pos.setter
def pos(self, pos):
self.qitem.setPos(*pos)
@property
def visible(self):
"""Visibility of the item."""
return self.qitem.isVisible()
@visible.setter
def visible(self, visible):
self.qitem.setVisible(visible)
@property
def opacity(self):
"""Opacity of the item (between 0 and 1)."""
self.qitem.opacity()
@opacity.setter
def opacity(self, opacity):
self.qitem.setOpacity(opacity)
@property
def color(self):
"""Color of the item."""
return self.qitem.brush().color().getRgb()
@color.setter
def color(self, color):
self.qitem.setBrush(QtGui.QColor(color))
def show(self):
"""Set the item to visible."""
self.qitem.show()
def hide(self):
"""Set the item to invisible."""
self.qitem.hide()
def set(self, **kwargs):
"""Set any properties of the underlying QGraphicsItem."""
for prop, val in kwargs.items():
self._qmeth(prop)(val)
def get(self, prop, *args, **kwargs):
"""Get any property of the underlying QGraphicsItem."""
self._qmeth(prop)(*args, **kwargs)
def collides_with(self, item):
"""Determine if the item intersects with another item."""
return self.qitem.collidesWithItem(item.qitem)
def _qmeth(self, prop):
return getattr(self.qitem, _to_camel_case(prop))
def _to_camel_case(snake_str):
components = snake_str.split('_')
return components[0] + ''.join(x.title() for x in components[1:])
class Circle(Item):
"""Circular item.
The coordinates of this item correspond to the center of the circle.
Parameters
----------
dia : float
Diameter of the circle with respect to the scene coordinate system.
color : str
Hex string to set the color of the circle. You can use the underlying
``qitem`` attribute to get the underlying QGraphicsEllipseItem to set
stroke color vs. fill color, etc. if needed.
"""
def __init__(self, diameter, color='#333333'):
qitem = QtWidgets.QGraphicsEllipseItem(-diameter/2, -diameter/2,
diameter, diameter)
qitem.setPen(QtGui.QPen(QtGui.QBrush(), 0))
super(Circle, self).__init__(qitem)
self.color = color
class Cross(Item):
"""Collection of two lines oriented as a "plus sign".
The coordinates of this item correspond to the center of the cross. This
item's ``qitem`` attribute is a ``QGraphicsItemGroup`` (a group of two
lines).
Parameters
----------
size : float
The size is the length of each line making up the cross.
linewidth : float
Thickness of each line making up the cross.
color : str
Color of the lines making up the cross.
"""
def __init__(self, size=0.05, linewidth=0.01, color='#333333'):
qitem = QtWidgets.QGraphicsItemGroup()
self._lh = Line(-size/2, 0, size/2, 0, width=linewidth, color=color)
self._lv = Line(0, -size/2, 0, size/2, width=linewidth, color=color)
qitem.addToGroup(self._lh.qitem)
qitem.addToGroup(self._lv.qitem)
super(Cross, self).__init__(qitem)
@property
def color(self):
"""Color of the lines in the cross."""
return self._lv.color
@color.setter
def color(self, color):
self._lh.color = color
self._lv.color = color
class Line(Item):
"""Line item."""
def __init__(self, x1, y1, x2, y2, width=0.01, color='#333333'):
self.width = width
qitem = QtWidgets.QGraphicsLineItem(x1, y1, x2, y2)
super(Line, self).__init__(qitem)
self.color = color
@property
def color(self):
return self.qitem.pen().color().getRgb()
@color.setter
def color(self, color):
self.qitem.setPen(QtGui.QPen(QtGui.QBrush(QtGui.QColor(color)),
self.width, cap=QtCore.Qt.FlatCap))
class Text(Item):
"""Text item."""
def __init__(self, text, color='#333333'):
qitem = QtWidgets.QGraphicsSimpleTextItem(text)
super(Text, self).__init__(qitem)
self.color = color
# invert because Canvas is inverted
self.qitem.scale(0.01, -0.01)
self._center()
def _center(self):
scene_bounds = self.qitem.sceneBoundingRect()
self.pos = -scene_bounds.width() / 2, scene_bounds.height() / 2
class Rectangle(Item):
"""Rectangular item.
This is a filled retangle that allows you to set the size, color, position,
etc. By default, the item's position is its *center*.
"""
def __init__(self, width, height, x=0, y=0, color='#333333',
penwidth=0.01):
self.penwidth = penwidth
qitem = QtWidgets.QGraphicsRectItem(x, y, width, height)
qitem.setTransformOriginPoint(width/2, height/2)
qitem.setTransform(QtGui.QTransform().translate(-width/2, -height/2))
super(Rectangle, self).__init__(qitem)
self.pos = x, y
self.color = color
@property
def color(self):
"""Color of the rectangle."""
return self.qitem.pen().color().getRgb()
@color.setter
def color(self, color):
"""Color of the rectangle."""
br = QtGui.QBrush(QtGui.QColor(color))
self.qitem.setBrush(br)
self.qitem.setPen(QtGui.QPen(br, self.penwidth,
cap=QtCore.Qt.FlatCap))
@property
def width(self):
return self.qitem.rect().width()
@width.setter
def width(self, width):
p = self.pos
rect = self.qitem.rect()
rect.setWidth(width)
self.qitem.setRect(rect)
self.pos = p
| {
"repo_name": "ucdrascal/hcibench",
"path": "axopy/gui/canvas.py",
"copies": "2",
"size": "10191",
"license": "mit",
"hash": -1388182486694122800,
"line_mean": 29.150887574,
"line_max": 79,
"alpha_frac": 0.6024923953,
"autogenerated": false,
"ratio": 3.8283245679939895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.543081696329399,
"avg_score": null,
"num_lines": null
} |
# 2D channel example
# ==================
#
# .. highlight:: python
#
# This example demonstrates a depth-averaged 2D simulation in a closed
# rectangular domain, where the flow is forced by an initial pertubation in the
# water elevation field.
#
# We begin by importing Thetis and creating a rectangular mesh with :py:func:`~.firedrake.utility_meshes.RectangleMesh`.
# The domain is 40 km long and 2 km wide.
# We generate 25 elements in the along-channel direction and 2 in the
# cross-channel direction::
from thetis import *
lx = 40e3
ly = 2e3
nx = 25
ny = 2
mesh2d = RectangleMesh(nx, ny, lx, ly)
# Next we define a bathymetry function in the 2D mesh, using continuous linear
# elements. In this example we set the bathymetry to constant 20 m depth::
P1_2d = FunctionSpace(mesh2d, 'CG', 1)
bathymetry_2d = Function(P1_2d, name='Bathymetry')
depth = 20.0
bathymetry_2d.assign(depth)
# .. note::
#
# See
# `Firedrake manual <http://firedrakeproject.org/variational-problems.html>`_
# for more information on mesh generation, functions and function spaces.
#
# We are now ready to create a 2D solver object, and set some options::
# total duration in seconds
t_end = 2 * 3600
# export interval in seconds
t_export = 100.0
solver_obj = solver2d.FlowSolver2d(mesh2d, bathymetry_2d)
options = solver_obj.options
options.simulation_export_time = t_export
options.simulation_end_time = t_end
# Here we simply define the total duration of the run, and the
# export interval. See :py:class:`~.ModelOptions` for more information about the
# available options.
#
# Next we define the used time integrator, and set the time step::
options.timestepper_type = 'CrankNicolson'
options.timestep = 50.0
# Because Crank-Nicolson is an uncondionally stable method, we can set
# the time step freely.
#
# We then define the initial condition for elevation. We begin by creating a
# function (in the same linear continous function space)::
elev_init = Function(P1_2d, name='initial elevation')
# We then need to define an analytical expression the the x,y coordinates of the
# mesh. To this end, we use
# :py:class:`~.ufl.classes.SpatialCoordinate` and define a `UFL <http://fenics-ufl.readthedocs.io/en/latest/>`_ expression (see
# `Firedrake's interpolation manual <http://firedrakeproject.org/interpolation.html>`_
# for more information)::
xy = SpatialCoordinate(mesh2d)
gauss_width = 4000.
gauss_ampl = 2.0
gauss_expr = gauss_ampl * exp(-((xy[0]-lx/2)/gauss_width)**2)
# This defines a 2 m tall Gaussian hill in the x-direction in the middle on the
# domain. We can then interpolate this expression on the function::
elev_init.interpolate(gauss_expr)
# and set this function as an initial condition to the elevation field::
solver_obj.assign_initial_conditions(elev=elev_init)
# Model setup is now complelete. We run the model by issuing::
solver_obj.iterate()
# While the model is running, Thetis prints some statistics on the command line:
#
# .. code-block:: none
#
# 0 0 T= 0.00 eta norm: 6251.2574 u norm: 0.0000 0.00
# 1 2 T= 100.00 eta norm: 5905.0262 u norm: 1398.1128 0.76
# 2 4 T= 200.00 eta norm: 5193.5227 u norm: 2377.8512 0.03
# 3 6 T= 300.00 eta norm: 4656.5334 u norm: 2856.5165 0.03
# ...
#
# The first column is the export index, the second one the number of executed
# time steps, followed by the simulation time. ``eta norm`` and ``u norm`` are
# the L2 norms of the elevation and depth averaged velocity fields, respectively.
# The last column stands for the (approximate) wall-clock time between exports.
#
# The simulation terminates once the end time is reached.
# See :doc:`outputs and visualization <../outputs_and_visu>` page on how to
# visualize the results.
#
# This tutorial can be dowloaded as a Python script `here <demo_2d_channel.py>`__.
| {
"repo_name": "tkarna/cofs",
"path": "demos/demo_2d_channel.py",
"copies": "2",
"size": "3870",
"license": "mit",
"hash": 5646640883570143000,
"line_mean": 34.504587156,
"line_max": 127,
"alpha_frac": 0.719379845,
"autogenerated": false,
"ratio": 3.1695331695331697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48889130145331694,
"avg_score": null,
"num_lines": null
} |
# 2D channel with time-dependent boundary conditions
# ==================================================
#
# .. highlight:: python
#
# Here we extend the :doc:`2D channel example <demo_2d_channel.py>` by adding constant and time
# dependent boundary conditions.
#
# We begin by defining the domain and solver as before::
from thetis import *
lx = 40e3
ly = 2e3
nx = 25
ny = 2
mesh2d = RectangleMesh(nx, ny, lx, ly)
P1_2d = FunctionSpace(mesh2d, 'CG', 1)
bathymetry_2d = Function(P1_2d, name='Bathymetry')
depth = 20.0
bathymetry_2d.assign(depth)
# total duration in seconds
t_end = 12 * 3600
# export interval in seconds
t_export = 300.0
solver_obj = solver2d.FlowSolver2d(mesh2d, bathymetry_2d)
options = solver_obj.options
options.simulation_export_time = t_export
options.simulation_end_time = t_end
options.timestepper_type = 'CrankNicolson'
options.timestep = 50.0
# We will force the model with a constant volume flux at the right boundary
# (x=40 km) and impose a tidal volume flux on the left boundary (x=0 km).
# Note that we have increased ``t_end`` and ``t_export`` to better illustrate
# tidal dynamics.
#
# Boundary condtitions are defined for each external boundary using their ID.
# In this example we are using a
# :py:func:`~.firedrake.utility_meshes.RectangleMesh` which assigns IDs 1, 2, 3,
# and 4 for the four sides of the rectangle::
left_bnd_id = 1
right_bnd_id = 2
# At each boundary we need to define the external value of the prognostic
# variables, i.e. in this case the water elevation and velocity.
# The value should be either a Firedrake :py:class:`~.firedrake.constant.Constant` or
# :py:class:`~.firedrake.function.Function` (in case the boundary condition is not uniform in space).
#
# We store the boundary conditions in a dictionary::
swe_bnd = {}
in_flux = 1e3
swe_bnd[right_bnd_id] = {'elev': Constant(0.0),
'flux': Constant(-in_flux)}
# Above we set the water elevation to zero and prescribe a constant volume flux.
# The volume flux is defined as outward normal flux, i.e. a negative value stands
# for flow into the domain.
# Alternatively we could also prescribe the normal velocity (with key ``'un'``)
# or the 2D velocity vector (``'uv'``).
# For all supported boundary conditions, see module :py:mod:`~.shallowwater_eq`.
#
# In order to set time-dependent boundary conditions we first define a python
# function that evaluates the time dependent variable::
def timedep_flux(simulation_time):
"""Time-dependent flux function"""
tide_amp = -2e3
tide_t = 12 * 3600.
flux = tide_amp*sin(2 * pi * simulation_time / tide_t) + in_flux
return flux
# We then create a Constant object with the initial value,
# and assign it to the left boundary::
tide_flux_const = Constant(timedep_flux(0))
swe_bnd[left_bnd_id] = {'flux': tide_flux_const}
# Boundary conditions are now complete, and we assign them to the solver
# object::
solver_obj.bnd_functions['shallow_water'] = swe_bnd
# Note that if boundary conditions are not assigned for some boundaries
# (the lateral boundaries 3 and 4 in this case), Thetis assumes impermeable land
# conditions.
#
# The only missing piece is to add a mechanism that re-evaluates the boundary
# condition as the simulation progresses.
# For this purpose we use the optional ``update_forcings`` argument of the
# :py:meth:`~.FlowSolver2d.iterate` method.
# ``update_forcings`` is a python function that updates all time dependent
# :py:class:`~.firedrake.constant.Constant`\s or
# :py:class:`~.firedrake.function.Function`\s used to force the model.
# In this case we only need to update ``tide_flux_const``::
def update_forcings(t_new):
"""Callback function that updates all time dependent forcing fields"""
tide_flux_const.assign(timedep_flux(t_new))
# and finally pass this callback to the time iterator::
solver_obj.iterate(update_forcings=update_forcings)
#
# This tutorial can be dowloaded as a Python script `here <demo_2d_channel_bnd.py>`__.
| {
"repo_name": "tkarna/cofs",
"path": "demos/demo_2d_channel_bnd.py",
"copies": "2",
"size": "3996",
"license": "mit",
"hash": -4719998112690868000,
"line_mean": 35,
"line_max": 101,
"alpha_frac": 0.7187187187,
"autogenerated": false,
"ratio": 3.256723716381418,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9860825422664152,
"avg_score": 0.022923402483453096,
"num_lines": 111
} |
# 2D Discrete Fourier Transform (DFT) and its inverse
# Warning: Computation is slow so only suitable for thumbnail size images!
# FB - 20150102
from PIL import Image
import cmath
pi2 = cmath.pi * 2.0
def DFT2D(image):
global M, N
(M, N) = image.size # (imgx, imgy)
dft2d_red = [[0.0 for k in range(M)] for l in range(N)]
dft2d_grn = [[0.0 for k in range(M)] for l in range(N)]
dft2d_blu = [[0.0 for k in range(M)] for l in range(N)]
pixels = image.load()
for k in range(M):
for l in range(N):
sum_red = 0.0
sum_grn = 0.0
sum_blu = 0.0
for m in range(M):
for n in range(N):
(red, grn, blu, alpha) = pixels[m, n]
e = cmath.exp(- 1j * pi2 * (float(k * m) / M + float(l * n) / N))
sum_red += red * e
sum_grn += grn * e
sum_blu += blu * e
dft2d_red[l][k] = sum_red / M / N
dft2d_grn[l][k] = sum_grn / M / N
dft2d_blu[l][k] = sum_blu / M / N
return (dft2d_red, dft2d_grn, dft2d_blu)
def IDFT2D(dft2d):
(dft2d_red, dft2d_grn, dft2d_blu) = dft2d
global M, N
image = Image.new("RGB", (M, N))
pixels = image.load()
for m in range(M):
for n in range(N):
sum_red = 0.0
sum_grn = 0.0
sum_blu = 0.0
for k in range(M):
for l in range(N):
e = cmath.exp(1j * pi2 * (float(k * m) / M + float(l * n) / N))
sum_red += dft2d_red[l][k] * e
sum_grn += dft2d_grn[l][k] * e
sum_blu += dft2d_blu[l][k] * e
red = int(sum_red.real + 0.5)
grn = int(sum_grn.real + 0.5)
blu = int(sum_blu.real + 0.5)
pixels[m, n] = (red, grn, blu)
return image
# TEST
# Recreate input image from 2D DFT results to compare to input image
image = IDFT2D(DFT2D(Image.open("input.png")))
image.save("output.png", "PNG")
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/578997_2D_Discrete_Fourier_Transform/recipe-578997.py",
"copies": "1",
"size": "2049",
"license": "mit",
"hash": -5047564682141014000,
"line_mean": 34.9473684211,
"line_max": 85,
"alpha_frac": 0.4782820888,
"autogenerated": false,
"ratio": 2.7356475300400533,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8670880961448435,
"avg_score": 0.008609731478323775,
"num_lines": 57
} |
# 2 DECIMAL POINT
#9-3-17
# Initialize resistor colors
BLACK = ['black', 0, 0, 0, 1, None]
BROWN = ['brown', 1, 1, 1, 10, "1%"]
RED = ['red', 2, 2, 2, 100, "2%"]
ORANGE = ['orange', 3, 3, 3, 1000, "3%"]
YELLOW = ['yellow', 4, 4, 4, 10000, "4%"]
GREEN = ['green', 5, 5, 5, 100000, "0.5%"]
BLUE = ['blue', 6, 6, 6, 1000000, "0.25"]
PURPLE = ['purple', 7, 7, 7, 10000000, "0.15%"]
GREY = ['grey', 8, 8, 8, None, "0.05%"]
WHITE = ['white', 9, 9, 9, None, None]
GOLD = ['gold', None, None, None, 0.1, "5%"]
SILVER = ['silver', None, None, None, 0.01, "10%"]
RESISTORCOLORS = [BLACK, BROWN, RED, ORANGE, YELLOW, GREEN, BLUE, PURPLE, GREY, WHITE, GOLD, SILVER]
currentColorBandDict = {}
newResistorValueList = []
# Ask for user input of resistor value, number of bands, and tolerance value
userInputResistorValue = float(input("Enter the resistor value: "))
#userInputNumBands = int(input("Enter the number of bands: "))
userInputNumBands = 5
print ("Tolerance Values: 0.05% | 0.1% | 0.25% | 0.5% | 1% | 2% | 5% | 10% | 20%")
#userInputToleranceValue = str(input("Enter the tolerance value: "))
userInputToleranceValue = '5%'
# Add each digit in userInputResistorValue to string in a list
oldResistorValueList = list(str(userInputResistorValue))
# Add a '0' to fix input values less than 1 rounded to 1 decimal place
if userInputResistorValue < 10 and (len(oldResistorValueList) == 3 or len(oldResistorValueList) == 4):
oldResistorValueList.append('0')
# Create a new list with digits other than 0 and a decimal point
# Determine if userInputResistorValue is a decimal number
for item in oldResistorValueList:
if item == '.':
if newResistorValueList[0] == 0:
del newResistorValueList[0]
else:
newResistorValueList.append(int(item))
# Executes if 4 band color code is requested
if userInputNumBands == 5:
i = 0 # first band iterator
j = 0 # second band iterator
k = 0 # third band iterator
l = 0 # multiplier iterator
m = 0 # tolerance iterator
# Get firstBandNum and firstBandColor
while i <= 11:
if RESISTORCOLORS[i][1] == newResistorValueList[0]:
currentColorBandDict['firstBandColor'] = RESISTORCOLORS[i][0]
#if DECIMAL == True and userInputResistorValue < 1:
if userInputResistorValue < 1:
firstBandNum = oldResistorValueList[2]
else:
firstBandNum = oldResistorValueList[0]
i += 1
# Get secondBandNum and secondBandColor
while j <= 11:
if RESISTORCOLORS[j][2] == newResistorValueList[1]:
currentColorBandDict['secondBandColor'] = RESISTORCOLORS[j][0]
if userInputResistorValue < 1:
secondBandNum = oldResistorValueList[3]
elif userInputResistorValue >= 1 and userInputResistorValue < 10:
secondBandNum = oldResistorValueList[2]
else:
secondBandNum = oldResistorValueList[1]
j += 1
# Get thirdBandNum and thirdBandColor
while k <= 11:
if RESISTORCOLORS[k][3] == newResistorValueList[2]:
currentColorBandDict['thirdBandColor'] = RESISTORCOLORS[k][0]
if oldResistorValueList[2] == '.':
thirdBandNum = oldResistorValueList[3]
else:
thirdBandNum = oldResistorValueList[2]
k += 1
# Calculate multiplier value
firstSecondAndThirdBandNum = float(firstBandNum + secondBandNum + thirdBandNum)
multiplier = round((userInputResistorValue / firstSecondAndThirdBandNum), 3)
# Get fourthBandColor
while l <= 11:
if RESISTORCOLORS[l][4] == multiplier:
currentColorBandDict['fourthBandColor'] = RESISTORCOLORS[l][0]
l += 1
# Get fifthBandColor
while m <= 11:
if RESISTORCOLORS[m][5] == userInputToleranceValue:
currentColorBandDict['tolerance'] = RESISTORCOLORS[m][0]
m += 1
# Display current color band in the terminal
print (currentColorBandDict)
| {
"repo_name": "dgaiero/Resistor-Band-Picture-Creator",
"path": "Test Scripts/resistorAlgorithm5Band.py",
"copies": "1",
"size": "4016",
"license": "mit",
"hash": 279273345368002000,
"line_mean": 37.6153846154,
"line_max": 102,
"alpha_frac": 0.6389442231,
"autogenerated": false,
"ratio": 3.217948717948718,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4356892941048718,
"avg_score": null,
"num_lines": null
} |
## 2. Defining custom classes ##
print(header)
class Player():
# The special __init__ function is run whenever a class is instantiated.
# The init function can take arguments, but self is always the first one.
# Self is just a reference to the instance of the class. It is automatically
# passed in when you instantiate an instance of the class.
def __init__(self, data_row):
self.player_name = data_row[0]
self.position = data_row[1]
self.age = data_row[2]
self.team = data_row[3]
# Initialize a player using the first row of our dataset
first_player = Player(nba[0])
# Implement the Team class
class Team():
def __init__(self,name):
self.team_name = name
spurs = Team('San Antonio Spurs')
## 3. More interesting instance properties ##
class Player():
# The special __init__ function is run whenever a class is instantiated.
# The init function can take arguments, but self is always the first one.
# Self is just a reference to the instance of the class. It is automatically
# passed in when you instantiate an instance of the class.
def __init__(self, data_row):
self.player_name = data_row[0]
self.position = data_row[1]
self.age = int(data_row[2])
self.team = data_row[3]
# Initialize a player using the first row of our dataset
first_player = Player(nba[0])
class Team():
def __init__(self, team_name):
self.team_name = team_name
# Team roster initially empty
self.roster = []
# Find the players for the roster in the dataset
for row in nba:
if row[3] == team_name:
self.roster.append(Player(row))
spurs = Team('San Antonio Spurs')
## 4. Instance Methods ##
class Player():
# The special __init__ function is run whenever a class is instantiated.
# The init function can take arguments, but self is always the first one.
# Self is just a reference to the instance of the class. It is automatically
# passed in when you instantiate an instance of the class.
def __init__(self, data_row):
self.player_name = data_row[0]
self.position = data_row[1]
self.age = int(data_row[2])
self.team = data_row[3]
class Team():
def __init__(self, team_name):
self.team_name = team_name
# Team roster initially empty
self.roster = []
# Find the players for the roster in the dataset
for row in nba:
if row[3] == self.team_name:
self.roster.append(Player(row))
def num_players(self):
count = 0
for player in self.roster:
count += 1
return count
# Implement the average_age instance method
def average_age(self):
age = 0
for player in self.roster:
age += player.age
return age/self.num_players()
spurs = Team("San Antonio Spurs")
spurs_num_players = spurs.num_players()
spurs_avg_age = spurs.average_age()
## 5. Class Methods ##
import math
class Player():
# The special __init__ function is run whenever a class is instantiated.
# The init function can take arguments, but self is always the first one.
# Self is just a reference to the instance of the class. It is automatically
# passed in when you instantiate an instance of the class.
def __init__(self, data_row):
self.player_name = data_row[0]
self.position = data_row[1]
self.age = int(data_row[2])
self.team = data_row[3]
class Team():
def __init__(self, team_name):
self.team_name = team_name
self.roster = []
for row in nba:
if row[3] == self.team_name:
self.roster.append(Player(row))
def num_players(self):
count = 0
for player in self.roster:
count += 1
return count
def average_age(self):
return math.fsum([player.age for player in self.roster]) / self.num_players()
@classmethod
def older_team(self, team1, team2):
if team1.average_age()>team2.average_age():
return team1
else:
return team2
old_team = Team.older_team(Team("New York Knicks"), Team("Miami Heat"))
## 7. Overriding ##
class Player(object):
# The special __init__ function is run whenever a class is instantiated.
# The init function can take arguments, but self is always the first one.
# Self is just a reference to the instance of the class. It is automatically
# passed in when you instantiate an instance of the class.
def __init__(self, data_row):
self.player_name = data_row[0]
self.position = data_row[1]
self.age = int(data_row[2])
self.team = data_row[3]
def __lt__(self, other):
return self.age < other.age
# Implement the rest of the comparison operators here
def __gt__(self, other):
return self.age > other.age
def __le__(self, other):
return self.age <= other.age
def __ge__(self, other):
return self.age >= other.age
def __eq__(self, other):
return self.age == other.age
def __ne__(self, other):
return self.age != other.age
carmelo = Player(nba[17])
kobe = Player(nba[68])
result = carmelo != kobe
## 9. Oldest NBA Team ##
import math
class Team(object):
def __init__(self, team_name):
self.team_name = team_name
self.roster = []
for row in nba:
if row[3] == self.team_name:
self.roster.append(Player(row))
def num_players(self):
count = 0
for player in self.roster:
count += 1
return count
def average_age(self):
return math.fsum([player.age for player in self.roster]) / self.num_players()
def __lt__(self, other):
return self.average_age() < other.average_age()
def __gt__(self, other):
return self.average_age() > other.average_age()
def __le__(self, other):
return self.average_age() <= other.average_age()
def __ge__(self, other):
return self.average_age() >= other.average_age()
def __eq__(self, other):
return self.average_age() == other.average_age()
def __ne__(self, other):
return self.average_age() != other.average_age()
team_names = ["Boston Celtics", "Brooklyn Nets", "New York Knicks", "Philadelphia 76ers", "Toronto Raptors",
"Chicago Bulls", "Cleveland Cavaliers", "Detroit Pistons", "Indiana Pacers", "Milwaukee Bucks",
"Atlanta Hawks", "Charlotte Hornets", "Miami Heat", "Orlando Magic", "Washington Wizards",
"Dallas Mavericks", "Houston Rockets", "Memphis Grizzlies", "New Orleans Pelicans", "San Antonio Spurs",
"Denver Nuggets", "Minnesota Timberwolves", "Oklahoma City Thunder", "Portland Trail Blazers", "Utah Jazz",
"Golden State Warriors", "Los Angeles Clippers", "Los Angeles Lakers", "Phoenix Suns", "Sacramento Kings"]
# Alter this list comprehension
teams = [Team(name) for name in team_names]
oldest_team = max(teams)
youngest_team = min(teams)
sorted_teams = sorted(teams) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Python Programming Advanced/Object-oriented programming-108.py",
"copies": "1",
"size": "7191",
"license": "mit",
"hash": -6317349503392777000,
"line_mean": 34.0829268293,
"line_max": 116,
"alpha_frac": 0.6136837714,
"autogenerated": false,
"ratio": 3.5146627565982405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.462834652799824,
"avg_score": null,
"num_lines": null
} |
# 2D example of viewing aggregates from SA using VTK
from pyamg.aggregation import standard_aggregation
from pyamg.vis import vis_coarse, vtk_writer
from pyamg.gallery import load_example
from pyamg import *
from scipy import *
# retrieve the problem
data = load_example('unit_square')
A = data['A'].tocsr()
V = data['vertices']
E2V = data['elements']
# perform smoothed aggregation
ml = smoothed_aggregation_solver(A,keep=True,max_coarse=10)
b = sin(pi*V[:,0])*sin(pi*V[:,1])
x = ml.solve(b)
# create the vtk file of aggregates
vis_coarse.vis_aggregate_groups(Verts=V, E2V=E2V,
Agg=ml.levels[0].AggOp, mesh_type='tri',
output='vtk', fname='output_aggs.vtu')
# create the vtk file for mesh and solution
vtk_writer.write_basic_mesh(Verts=V, E2V=E2V,
pdata = x,
mesh_type='tri',
fname='output_mesh.vtu')
# to use Paraview:
# start Paraview: Paraview --data=output_mesh.vtu
# apply
# under display in the object inspector:
# select wireframe representation
# select a better solid color
# open file: output_aggs.vtu
# under display in the object inspector:
# select surface with edges representation
# select a better solid color
# increase line width and point size to see these aggs (if present)
| {
"repo_name": "pombreda/pyamg",
"path": "Examples/WorkshopCopper11/task2.3.py",
"copies": "2",
"size": "1357",
"license": "bsd-3-clause",
"hash": 5867146414176847000,
"line_mean": 32.925,
"line_max": 77,
"alpha_frac": 0.6595431098,
"autogenerated": false,
"ratio": 3.3423645320197046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5001907641819705,
"avg_score": null,
"num_lines": null
} |
# 2D example of viewing aggregates from SA using VTK
from pyamg.aggregation import standard_aggregation
from pyamg.vis import vis_coarse, vtk_writer
from pyamg.gallery import load_example
# retrieve the problem
data = load_example('unit_square')
A = data['A'].tocsr()
V = data['vertices']
E2V = data['elements']
# perform smoothed aggregation
Agg = standard_aggregation(A)
# create the vtk file of aggregates
vis_coarse.vis_aggregate_groups(Verts=V, E2V=E2V, Agg=Agg, \
mesh_type='tri', output='vtk', \
fname='output_aggs.vtu')
# create the vtk file for a mesh
vtk_writer.write_basic_mesh(Verts=V, E2V=E2V, \
mesh_type='tri', \
fname='output_mesh.vtu')
# to use Paraview:
# start Paraview: Paraview --data=output_mesh.vtu
# apply
# under display in the object inspector:
# select wireframe representation
# select a better solid color
# open file: output_aggs.vtu
# under display in the object inspector:
# select surface with edges representation
# select a better solid color
# increase line width and point size to see these aggs (if present)
| {
"repo_name": "pombreda/pyamg",
"path": "Examples/VisualizingAggregation/demo1.py",
"copies": "1",
"size": "1224",
"license": "bsd-3-clause",
"hash": -5113691601155100000,
"line_mean": 33.9714285714,
"line_max": 77,
"alpha_frac": 0.6454248366,
"autogenerated": false,
"ratio": 3.5789473684210527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9702812376533836,
"avg_score": 0.004311965697443154,
"num_lines": 35
} |
# 2D Fluid Simulation using FHP LGCA (Lattice Gas Cellular Automata)
# Simulates fluid flow in a circular channel.
# Particles go out from right side and enter back from left.
# Reference:
# Lattice Gas Cellular Automata and Lattice Boltzmann Models by Wolf-Gladrow
# FB - 20140818
import math
import random
from PIL import Image
imgx = 512; imgy = 512 # image size
image = Image.new("RGB", (imgx, imgy))
pixels = image.load()
# simulation parameters:
tilesX = 32
tilesY = 32
n = 8 # coarse graining tile size is n by n
timeSteps = 300
nodesX = tilesX * n
nodesY = tilesY * n
nodes = [[[0 for x in range(nodesX)] for y in range(nodesY)] for z in range(6)]
obstacle = [[0 for x in range(nodesX)] for y in range(nodesY)]
# insert a square obstacle in the middle
for y in range(nodesY / 4):
for x in range(nodesX / 4):
obstacle[y + nodesY / 2 - nodesY / 8][x + nodesX / 2 - nodesX / 8] = 1
# fill-up with fluid flowing towards right
for y in range(1, nodesY - 1): # do not include top/bottom walls
for x in range(nodesX):
if obstacle[y][x] != 1:
nodes[0][y][x] = 1
for t in range(timeSteps): # run the simulation
# HANDLE COLLISIONS
# collisions at non-boundary nodes
for y in range(1, nodesY - 1): # do not include top/bottom walls
for x in range(nodesX):
if obstacle[y][x] != 1:
cell = [nodes[z][y][x] for z in range(6)]
numParticles = sum(cell)
# only 2 or 3 symmetric particle collisions implemented here
if numParticles == 3:
if cell[0] == cell[2] and cell[2] == cell[4]:
# invert the cell contents
for z in range(6):
nodes[z][y][x] = 1 - cell[z]
elif numParticles == 2:
# find the cell of one of the particles
p = cell.index(1)
# its diametric opposite must occupied as well
if p > 2:
pass
elif cell[p + 3] == 0:
pass
else:
# randomly rotate the particle pair clockwise or
# counterclockwise
if random.randint(0, 1) == 0: # counterclockwise
nodes[0][y][x] = cell[5]
nodes[1][y][x] = cell[0]
nodes[2][y][x] = cell[1]
nodes[3][y][x] = cell[2]
nodes[4][y][x] = cell[3]
nodes[5][y][x] = cell[4]
else: # clockwise
nodes[0][y][x] = cell[1]
nodes[1][y][x] = cell[2]
nodes[2][y][x] = cell[3]
nodes[3][y][x] = cell[4]
nodes[4][y][x] = cell[5]
nodes[5][y][x] = cell[0]
# collisions along top/bottom walls (no-slip)
for x in range(nodesX):
cell = [nodes[z][0][x] for z in range(6)]
nodes[0][0][x] = cell[3]
nodes[1][0][x] = cell[4]
nodes[2][0][x] = cell[5]
nodes[3][0][x] = cell[0]
nodes[4][0][x] = cell[1]
nodes[5][0][x] = cell[2]
cell = [nodes[z][nodesY - 1][x] for z in range(6)]
nodes[0][nodesY - 1][x] = cell[3]
nodes[1][nodesY - 1][x] = cell[4]
nodes[2][nodesY - 1][x] = cell[5]
nodes[3][nodesY - 1][x] = cell[0]
nodes[4][nodesY - 1][x] = cell[1]
nodes[5][nodesY - 1][x] = cell[2]
# collisions at obstacle points (no-slip)
for y in range(nodesY):
for x in range(nodesX):
if obstacle[y][x] == 1:
cell = [nodes[z][y][x] for z in range(6)]
nodes[0][y][x] = cell[3]
nodes[1][y][x] = cell[4]
nodes[2][y][x] = cell[5]
nodes[3][y][x] = cell[0]
nodes[4][y][x] = cell[1]
nodes[5][y][x] = cell[2]
# HANDLE MOVEMENTS
nodesNew = [[[0 for x in range(nodesX)] for y in range(nodesY)] for z in range(6)]
for y in range(nodesY):
for x in range(nodesX):
cell = [nodes[z][y][x] for z in range(6)]
# propagation in the 0-direction
neighbor_y = y
if x == nodesX - 1:
neighbor_x = 0
else:
neighbor_x = x + 1
nodesNew[0][neighbor_y][neighbor_x] = cell[0]
# propagation in the 1-direction
if y != nodesY - 1:
neighbor_y = y + 1
if y % 2 == 1:
if x == nodesX - 1:
neighbor_x = 1
else:
neighbor_x = x + 1
else:
neighbor_x = x
nodesNew[1][neighbor_y][neighbor_x] = cell[1]
# propagation in the 2-direction
if y != nodesY - 1:
neighbor_y = y + 1
if y % 2 == 0:
if x == 0:
neighbor_x = nodesX - 1
else:
neighbor_x = x - 1
else:
neighbor_x = x
nodesNew[2][neighbor_y][neighbor_x] = cell[2]
# propagation in the 3-direction
neighbor_y = y
if x == 0:
neighbor_x = nodesX - 1
else:
neighbor_x = x - 1
nodesNew[3][neighbor_y][neighbor_x] = cell[3]
# propagation in the 4-direction
if y != 0:
neighbor_y = y - 1
if y % 2 == 0:
if x == 0:
neighbor_x = nodesX - 1
else:
neighbor_x = x - 1
else:
neighbor_x = x
nodesNew[4][neighbor_y][neighbor_x] = cell[4]
# propagation in the 5-direction
if y != 0:
neighbor_y = y - 1
if y % 2 == 1:
if x == nodesX - 1:
neighbor_x = 0
else:
neighbor_x = x + 1
else:
neighbor_x = x
nodesNew[5][neighbor_y][neighbor_x] = cell[5]
nodes = nodesNew
print '%' + str(100 * t / timeSteps) # show progress
# Create an image from the final state
# Calculate average velocity vectors for tiles
aveVelocityVectorMag = [[0.0 for x in range(tilesX)] for y in range(tilesY)]
aveVelocityVectorAng = [[0.0 for x in range(tilesX)] for y in range(tilesY)]
pi2 = math.pi * 2.0
dx = [math.cos(i * pi2 / 6.0) for i in range(6)]
dy = [math.sin(i * pi2 / 6.0) for i in range(6)]
for ty in range(tilesY):
for tx in range(tilesX):
vx = 0.0
vy = 0.0
for cy in range(n):
for cx in range(n):
for z in range(6):
if nodes[z][ty * n + cy][tx * n + cx] == 1 \
and obstacle[ty * n + cy][tx * n + cx] == 0:
vx += dx[z]
vy += dy[z]
aveVelocityVectorMag[ty][tx] = math.hypot(vx, vy) / n ** 2.0
aveVelocityVectorAng[ty][tx] = (math.atan2(vy, vx) + pi2) % pi2
for ky in range(imgy):
iy = nodesY * ky / imgy
jy = tilesY * ky / imgy
for kx in range(imgx):
ix = nodesX * kx / imgx
jx = tilesX * kx / imgx
if obstacle[iy][ix] == 1: # paint the obstacle(s)
red = 0
grn = 0
blu = 255
else: # use vector magnitude and angle for coloring
aveVelVecMag = aveVelocityVectorMag[jy][jx]
aveVelVecAng = aveVelocityVectorAng[jy][jx]
red = int(aveVelVecMag * 255)
grn = int(aveVelVecAng / pi2 * 255)
blu = 0
pixels[kx, ky] = (red, grn, blu)
image.save("FHP_LGCA_2DFluidSim.png", "PNG")
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/578924_2D_Fluid_Simulatiusing_FHP/recipe-578924.py",
"copies": "1",
"size": "8116",
"license": "mit",
"hash": 808702404464751400,
"line_mean": 35.7239819005,
"line_max": 86,
"alpha_frac": 0.4499753573,
"autogenerated": false,
"ratio": 3.525629887054735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44756052443547345,
"avg_score": null,
"num_lines": null
} |
"""2D Fourier mapping"""
import numpy as np
import scipy.interpolate as intp
def fourier_map_2d(uSin, angles, res, nm, lD=0, semi_coverage=False,
coords=None, count=None, max_count=None, verbose=0):
r"""2D Fourier mapping with the Fourier diffraction theorem
Two-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,z)`
by a dielectric object with refractive index
:math:`n(x,z)`.
This function implements the solution by interpolation in
Fourier space.
Parameters
----------
uSin: (A,N) ndarray
Two-dimensional sinogram of line recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
semi_coverage: bool
If set to `True`, it is assumed that the sinogram does not
necessarily cover the full angular range from 0 to 2π, but an
equidistant coverage over 2π can be achieved by inferring point
(anti)symmetry of the (imaginary) real parts of the Fourier
transform of f. Valid for any set of angles {X} that result in
a 2π coverage with the union set {X}U{X+π}.
coords: None [(2,M) ndarray]
Computes only the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (N,N), complex if `onlyreal` is `False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
backpropagate_2d: implementation by backpropagation
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
The interpolation in Fourier space (which is done with
:func:`scipy.interpolate.griddata`) may be unstable and lead to
artifacts if the data to interpolate contains sharp spikes. This
issue is not handled at all by this method (in fact, a test has
been removed in version 0.2.6 because ``griddata`` gave different
results on Windows and Linux).
"""
##
##
# TODO:
# - zero-padding as for backpropagate_2D - However this is not
# necessary as Fourier interpolation is not parallelizable with
# multiprocessing and thus unattractive. Could be interesting for
# specific environments without the Python GIL.
# - Deal with oversampled data. Maybe issue a warning.
##
##
A = angles.shape[0]
if max_count is not None:
max_count.value += 4
# Check input data
assert len(uSin.shape) == 2, "Input data `uSin` must have shape (A,N)!"
assert len(uSin) == A, "`len(angles)` must be equal to `len(uSin)`!"
if coords is not None:
raise NotImplementedError("Output coordinates cannot yet be set"
+ "for the 2D backrpopagation algorithm.")
# Cut-Off frequency
# km [1/px]
km = (2 * np.pi * nm) / res
# Fourier transform of all uB's
# In the script we used the unitary angular frequency (uaf) Fourier
# Transform. The discrete Fourier transform is equivalent to the
# unitary ordinary frequency (uof) Fourier transform.
#
# uof: f₁(ξ) = int f(x) exp(-2πi xξ)
#
# uaf: f₃(ω) = (2π)^(-n/2) int f(x) exp(-i ωx)
#
# f₁(ω/(2π)) = (2π)^(n/2) f₃(ω)
# ω = 2πξ
#
# Our Backpropagation Formula is with uaf convention of the Form
#
# F(k) = 1/sqrt(2π) U(kD)
#
# If we convert now to uof convention, we get
#
# F(k) = U(kD)
#
# This means that if we divide the Fourier transform of the input
# data by sqrt(2π) to convert f₃(ω) to f₁(ω/(2π)), the resulting
# value for F is off by a factor of 2π.
#
# Instead, we can just multiply *UB* by sqrt(2π) and calculate
# everything in uof.
# UB = np.fft.fft(np.fft.ifftshift(uSin, axes=-1))/np.sqrt(2*np.pi)
#
#
# Furthermore, we define
# a wave propagating to the right as:
#
# u0(x) = exp(ikx)
#
# However, in physics usually we use the other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consistent with programs like Meep or our
# scattering script for a dielectric cylinder, we want to use the
# latter sign convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
UB = np.fft.fft(np.fft.ifftshift(uSin, axes=-1)) * np.sqrt(2 * np.pi)
# Corresponding sample frequencies
fx = np.fft.fftfreq(len(uSin[0])) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
if count is not None:
count.value += 1
# Undersampling/oversampling?
# Determine if the resolution of the image is too low by looking
# at the maximum value for kx. This is no comparison between
# Nyquist and Rayleigh frequency.
if verbose and np.max(kx**2) <= km**2:
# Detector is not set up properly. Higher resolution
# can be achieved.
print("......Measurement data is undersampled.")
else:
print("......Measurement data is oversampled.")
# raise NotImplementedError("Oversampled data not yet supported."+
# " Please rescale xD-axis of the input data.")
# DEAL WITH OVERSAMPLED DATA?
# lenk = len(kx)
# kx = np.fft.ifftshift(np.linspace(-np.sqrt(km),
# np.sqrt(km),
# len(fx), endpoint=False))
#
# F(kD-kₘs₀) = - i kₘ sqrt(2/π) / a₀ * M exp(-i kₘ M lD) * UB(kD)
# kₘM = sqrt( kₘ² - kx² )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
#
# We create the 2D interpolation object F
# - We compute the real coordinates (krx,kry) = kD-kₘs₀
# - We set as grid points the right side of the equation
#
# The interpolated griddata may go up to sqrt(2)*kₘ for kx and ky.
kx = kx.reshape(1, -1)
# a0 should have same shape as kx and UB
# a0 = np.atleast_1d(a0)
# a0 = a0.reshape(1,-1)
filter_klp = (kx**2 < km**2)
M = 1. / km * np.sqrt(km**2 - kx**2)
# Fsin = -1j * km * np.sqrt(2/np.pi) / a0 * M * np.exp(-1j*km*M*lD)
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
Fsin = -1j * km * np.sqrt(2 / np.pi) * M * np.exp(-1j * km * (M-1) * lD)
# UB has same shape (len(angles), len(kx))
Fsin = Fsin * UB * filter_klp
ang = angles.reshape(-1, 1)
if semi_coverage:
Fsin = np.vstack((Fsin, np.conj(Fsin)))
ang = np.vstack((ang, ang + np.pi))
if count is not None:
count.value += 1
# Compute kxl and kyl (in rotated system ϕ₀)
kxl = kx
kyl = np.sqrt((km**2 - kx**2) * filter_klp) - km
# rotate kxl and kyl to where they belong
krx = np.cos(ang) * kxl + np.sin(ang) * kyl
kry = - np.sin(ang) * kxl + np.cos(ang) * kyl
Xf = krx.flatten()
Yf = kry.flatten()
Zf = Fsin.flatten()
# DEBUG: plot kry vs krx
# from matplotlib import pylab as plt
# plt.figure()
# for i in range(len(krx)):
# plt.plot(krx[i],kry[i],"x")
# plt.axes().set_aspect('equal')
# plt.show()
# interpolation on grid with same resolution as input data
kintp = np.fft.fftshift(kx.reshape(-1))
Fcomp = intp.griddata((Xf, Yf), Zf, (kintp[None, :], kintp[:, None]))
if count is not None:
count.value += 1
# removed nans
Fcomp[np.where(np.isnan(Fcomp))] = 0
# Filter data
kinx, kiny = np.meshgrid(np.fft.fftshift(kx), np.fft.fftshift(kx))
Fcomp[np.where((kinx**2 + kiny**2) > np.sqrt(2) * km)] = 0
# Fcomp[np.where(kinx**2+kiny**2<km)] = 0
# Fcomp is centered at K = 0 due to the way we chose kintp/coords
f = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(Fcomp)))
if count is not None:
count.value += 1
return f[::-1]
| {
"repo_name": "RI-imaging/ODTbrain",
"path": "odtbrain/_alg2d_fmp.py",
"copies": "2",
"size": "9369",
"license": "bsd-3-clause",
"hash": 4093693129342112000,
"line_mean": 34.6283524904,
"line_max": 76,
"alpha_frac": 0.6084525218,
"autogenerated": false,
"ratio": 3.2054463977938643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4813898919593864,
"avg_score": null,
"num_lines": null
} |
# 2d grid posterior approximation to N(x|mu,sigma^2) N(mu) Cauchy(sigma)
# https://www.ritchievink.com/blog/2019/06/10/bayesian-inference-how-we-are-able-to-chase-the-posterior/
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
figdir = "../figures"
import os
def save_fig(fname):
if figdir: plt.savefig(os.path.join(figdir, fname))
data = np.array([195, 182])
# lets create a grid of our two parameters
mu = np.linspace(150, 250)
sigma = np.linspace(0, 15)[::-1]
mm, ss = np.meshgrid(mu, sigma) # just broadcasted parameters
# Likeliohood
likelihood = stats.norm(mm, ss).pdf(data[0]) * stats.norm(mm, ss).pdf(data[1])
aspect = mm.max() / ss.max() / 3
extent = [mm.min(), mm.max(), ss.min(), ss.max()]
# extent = left right bottom top
plt.figure()
plt.imshow(likelihood, cmap='Reds', aspect=aspect, extent=extent)
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
plt.title('Likelihood')
save_fig('posteriorGridLik.pdf')
plt.show()
# Prior
prior = stats.norm(200, 15).pdf(mm) * stats.cauchy(0, 10).pdf(ss)
plt.figure()
plt.imshow(prior, cmap='Greens', aspect=aspect, extent=extent)
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
plt.title('Prior')
save_fig('posteriorGridPrior.pdf')
plt.show()
# Posterior - grid
unnormalized_posterior = prior * likelihood
posterior = unnormalized_posterior / np.nan_to_num(unnormalized_posterior).sum()
plt.figure()
plt.imshow(posterior, cmap='Blues', aspect=aspect, extent=extent)
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
plt.title('Posterior')
save_fig('posteriorGridPost.pdf')
plt.show()
| {
"repo_name": "probml/pyprobml",
"path": "scripts/posteriorGrid2d.py",
"copies": "1",
"size": "1566",
"license": "mit",
"hash": -2924953730165457400,
"line_mean": 26,
"line_max": 104,
"alpha_frac": 0.7056194125,
"autogenerated": false,
"ratio": 2.76678445229682,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39724038647968196,
"avg_score": null,
"num_lines": null
} |
"""2D histograms
"""
import pylab
import pandas as pd
import numpy as np
from .core import VizInput2D
__all__ = ["Hist2D"]
class Hist2D(VizInput2D):
"""2D histogram
.. plot::
:include-source:
:width: 80%
from numpy import random
from biokit.viz import hist2d
X = random.randn(10000)
Y = random.randn(10000)
h = hist2d.Hist2D(X,Y)
h.plot(bins=100, contour=True)
"""
def __init__(self, x, y=None, verbose=False):
""".. rubric:: constructor
:param x: an array for X values. See :class:`~biokit.viz.core.VizInput2D` for details.
:param y: an array for Y values. See :class:`~biokit.viz.core.VizInput2D` for details.
"""
super(Hist2D, self).__init__(x=x, y=y, verbose=verbose)
def plot(self, bins=100, cmap="hot_r", fontsize=10, Nlevels=4,
xlabel=None, ylabel=None, norm=None, range=None, normed=False,
colorbar=True, contour=True, grid=True, **kargs):
"""plots histogram of mean across replicates versus coefficient variation
:param int bins: binning for the 2D histogram (either a float or list
of 2 binning values).
:param cmap: a valid colormap (defaults to hot_r)
:param fontsize: fontsize for the labels
:param int Nlevels: must be more than 2
:param str xlabel: set the xlabel (overwrites content of the dataframe)
:param str ylabel: set the ylabel (overwrites content of the dataframe)
:param norm: set to 'log' to show the log10 of the values.
:param normed: normalise the data
:param range: as in pylab.Hist2D : a 2x2 shape [[-3,3],[-4,4]]
:param contour: show some contours (default to True)
:param bool grid: Show unerlying grid (defaults to True)
If the input is a dataframe, the xlabel and ylabel will be populated
with the column names of the dataframe.
"""
X = self.df[self.df.columns[0]].values
Y = self.df[self.df.columns[1]].values
if len(X) > 10000:
print("Computing 2D histogram. Please wait")
pylab.clf()
if norm == 'log':
from matplotlib import colors
res = pylab.hist2d(X, Y, bins=bins, normed=normed,
cmap=cmap, norm=colors.LogNorm())
else:
res = pylab.hist2d(X, Y, bins=bins, cmap=cmap,
normed=normed, range=range)
if colorbar is True:
pylab.colorbar()
if contour:
try:
bins1 = bins[0]
bins2 = bins[1]
except:
bins1 = bins
bins2 = bins
X, Y = pylab.meshgrid(res[1][0:bins1], res[2][0:bins2])
if contour:
if res[0].max().max() < 10 and norm == 'log':
pylab.contour(X, Y, res[0].transpose())
else:
levels = [round(x) for x in
pylab.logspace(0, pylab.log10(res[0].max().max()), Nlevels)]
pylab.contour(X, Y, res[0].transpose(), levels[2:])
#pylab.clabel(C, fontsize=fontsize, inline=1)
if ylabel is None:
ylabel = self.df.columns[1]
if xlabel is None:
xlabel = self.df.columns[0]
pylab.xlabel(xlabel, fontsize=fontsize)
pylab.ylabel(ylabel, fontsize=fontsize)
if grid is True:
pylab.grid(True)
return res
| {
"repo_name": "biokit/biokit",
"path": "biokit/viz/hist2d.py",
"copies": "1",
"size": "3513",
"license": "bsd-2-clause",
"hash": 4153284619454772700,
"line_mean": 30.9363636364,
"line_max": 94,
"alpha_frac": 0.559920296,
"autogenerated": false,
"ratio": 3.6978947368421053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47578150328421054,
"avg_score": null,
"num_lines": null
} |
"""2D plots of sound fields etc."""
import matplotlib as _mpl
import matplotlib.pyplot as _plt
from mpl_toolkits import axes_grid1 as _axes_grid1
import numpy as _np
from . import default as _default
from . import util as _util
def _register_cmap_clip(name, original_cmap, alpha):
"""Create a color map with "over" and "under" values."""
from matplotlib.colors import LinearSegmentedColormap
cdata = _plt.cm.datad[original_cmap]
if isinstance(cdata, dict):
cmap = LinearSegmentedColormap(name, cdata)
else:
cmap = LinearSegmentedColormap.from_list(name, cdata)
cmap.set_over([alpha * c + 1 - alpha for c in cmap(1.0)[:3]])
cmap.set_under([alpha * c + 1 - alpha for c in cmap(0.0)[:3]])
_plt.cm.register_cmap(cmap=cmap)
# The 'coolwarm' colormap is based on the paper
# "Diverging Color Maps for Scientific Visualization" by Kenneth Moreland
# http://www.sandia.gov/~kmorel/documents/ColorMaps/
_register_cmap_clip('coolwarm_clip', 'coolwarm', 0.7)
def _register_cmap_transparent(name, color):
"""Create a color map from a given color to transparent."""
from matplotlib.colors import colorConverter, LinearSegmentedColormap
red, green, blue = colorConverter.to_rgb(color)
cdict = {'red': ((0, red, red), (1, red, red)),
'green': ((0, green, green), (1, green, green)),
'blue': ((0, blue, blue), (1, blue, blue)),
'alpha': ((0, 0, 0), (1, 1, 1))}
cmap = LinearSegmentedColormap(name, cdict)
_plt.cm.register_cmap(cmap=cmap)
_register_cmap_transparent('blacktransparent', 'black')
def virtualsource(xs, ns=None, type='point', *, ax=None):
"""Draw position/orientation of virtual source."""
xs = _np.asarray(xs)
ns = _np.asarray(ns)
if ax is None:
ax = _plt.gca()
if type == 'point':
vps = _plt.Circle(xs, .05, edgecolor='k', facecolor='k')
ax.add_artist(vps)
for n in range(1, 3):
vps = _plt.Circle(xs, .05+n*0.05, edgecolor='k', fill=False)
ax.add_artist(vps)
elif type == 'plane':
ns = 0.2 * ns
ax.arrow(xs[0], xs[1], ns[0], ns[1], head_width=0.05,
head_length=0.1, fc='k', ec='k')
def reference(xref, *, size=0.1, ax=None):
"""Draw reference/normalization point."""
xref = _np.asarray(xref)
if ax is None:
ax = _plt.gca()
ax.plot((xref[0]-size, xref[0]+size), (xref[1]-size, xref[1]+size), 'k-')
ax.plot((xref[0]-size, xref[0]+size), (xref[1]+size, xref[1]-size), 'k-')
def secondary_sources(x0, n0, *, size=0.05, grid=None):
"""Simple visualization of secondary source locations.
Parameters
----------
x0 : (N, 3) array_like
Loudspeaker positions.
n0 : (N, 3) or (3,) array_like
Normal vector(s) of loudspeakers.
size : float, optional
Size of loudspeakers in metres.
grid : triple of array_like, optional
If specified, only loudspeakers within the *grid* are shown.
"""
x0 = _np.asarray(x0)
n0 = _np.asarray(n0)
ax = _plt.gca()
# plot only secondary sources inside simulated area
if grid is not None:
x0, n0 = _visible_secondarysources(x0, n0, grid)
# plot symbols
for x00 in x0:
ss = _plt.Circle(x00[0:2], size, edgecolor='k', facecolor='k')
ax.add_artist(ss)
def loudspeakers(x0, n0, a0=0.5, *, size=0.08, show_numbers=False, grid=None,
ax=None):
"""Draw loudspeaker symbols at given locations and angles.
Parameters
----------
x0 : (N, 3) array_like
Loudspeaker positions.
n0 : (N, 3) or (3,) array_like
Normal vector(s) of loudspeakers.
a0 : float or (N,) array_like, optional
Weighting factor(s) of loudspeakers.
size : float, optional
Size of loudspeakers in metres.
show_numbers : bool, optional
If ``True``, loudspeaker numbers are shown.
grid : triple of array_like, optional
If specified, only loudspeakers within the *grid* are shown.
ax : Axes object, optional
The loudspeakers are plotted into this `matplotlib.axes.Axes`
object or -- if not specified -- into the current axes.
"""
x0 = _util.asarray_of_rows(x0)
n0 = _util.asarray_of_rows(n0)
a0 = _util.asarray_1d(a0).reshape(-1, 1)
# plot only secondary sources inside simulated area
if grid is not None:
x0, n0 = _visible_secondarysources(x0, n0, grid)
# normalized coordinates of loudspeaker symbol (see IEC 60617-9)
codes, coordinates = zip(*(
(_mpl.path.Path.MOVETO, [-0.62, 0.21]),
(_mpl.path.Path.LINETO, [-0.31, 0.21]),
(_mpl.path.Path.LINETO, [0, 0.5]),
(_mpl.path.Path.LINETO, [0, -0.5]),
(_mpl.path.Path.LINETO, [-0.31, -0.21]),
(_mpl.path.Path.LINETO, [-0.62, -0.21]),
(_mpl.path.Path.CLOSEPOLY, [0, 0]),
(_mpl.path.Path.MOVETO, [-0.31, 0.21]),
(_mpl.path.Path.LINETO, [-0.31, -0.21]),
))
coordinates = _np.column_stack([coordinates, _np.zeros(len(coordinates))])
coordinates *= size
patches = []
for x00, n00 in _util.broadcast_zip(x0, n0):
# rotate and translate coordinates
R = _util.rotation_matrix([1, 0, 0], n00)
transformed_coordinates = _np.inner(coordinates, R) + x00
patches.append(_mpl.patches.PathPatch(_mpl.path.Path(
transformed_coordinates[:, :2], codes)))
# add collection of patches to current axis
p = _mpl.collections.PatchCollection(
patches, edgecolor='0', facecolor=_np.tile(1 - a0, 3))
if ax is None:
ax = _plt.gca()
ax.add_collection(p)
if show_numbers:
for idx, (x00, n00) in enumerate(_util.broadcast_zip(x0, n0)):
x, y = x00[:2] - 1.2 * size * n00[:2]
ax.text(x, y, idx + 1, horizontalalignment='center',
verticalalignment='center', clip_on=True)
def _visible_secondarysources(x0, n0, grid):
"""Determine secondary sources which lie within *grid*."""
x, y = _util.as_xyz_components(grid[:2])
idx = _np.where((x0[:, 0] > x.min()) & (x0[:, 0] < x.max()) &
(x0[:, 1] > y.min()) & (x0[:, 1] < x.max()))
idx = _np.squeeze(idx)
return x0[idx, :], n0[idx, :]
def amplitude(p, grid, *, xnorm=None, cmap='coolwarm_clip',
vmin=-2.0, vmax=2.0, xlabel=None, ylabel=None,
colorbar=True, colorbar_kwargs={}, ax=None, **kwargs):
"""Two-dimensional plot of sound field (real part).
Parameters
----------
p : array_like
Sound pressure values (or any other scalar quantity if you
like). If the values are complex, the imaginary part is
ignored.
Typically, *p* is two-dimensional with a shape of *(Ny, Nx)*,
*(Nz, Nx)* or *(Nz, Ny)*. This is the case if
`sfs.util.xyz_grid()` was used with a single number for *z*,
*y* or *x*, respectively.
However, *p* can also be three-dimensional with a shape of *(Ny,
Nx, 1)*, *(1, Nx, Nz)* or *(Ny, 1, Nz)*. This is the case if
:func:`numpy.meshgrid` was used with a scalar for *z*, *y* or
*x*, respectively (and of course with the default
``indexing='xy'``).
.. note:: If you want to plot a single slice of a pre-computed
"full" 3D sound field, make sure that the slice still
has three dimensions (including one singleton
dimension). This way, you can use the original *grid*
of the full volume without changes.
This works because the grid component corresponding to
the singleton dimension is simply ignored.
grid : triple or pair of numpy.ndarray
The grid that was used to calculate *p*, see
`sfs.util.xyz_grid()`. If *p* is two-dimensional, but
*grid* has 3 components, one of them must be scalar.
xnorm : array_like, optional
Coordinates of a point to which the sound field should be
normalized before plotting. If not specified, no normalization
is used. See `sfs.util.normalize()`.
Returns
-------
AxesImage
See :func:`matplotlib.pyplot.imshow`.
Other Parameters
----------------
xlabel, ylabel : str
Overwrite default x/y labels. Use ``xlabel=''`` and
``ylabel=''`` to remove x/y labels. The labels can be changed
afterwards with :func:`matplotlib.pyplot.xlabel` and
:func:`matplotlib.pyplot.ylabel`.
colorbar : bool, optional
If ``False``, no colorbar is created.
colorbar_kwargs : dict, optional
Further colorbar arguments, see `add_colorbar()`.
ax : Axes, optional
If given, the plot is created on *ax* instead of the current
axis (see :func:`matplotlib.pyplot.gca`).
cmap, vmin, vmax, **kwargs
All further parameters are forwarded to
:func:`matplotlib.pyplot.imshow`.
See Also
--------
sfs.plot2d.level
"""
p = _np.asarray(p)
grid = _util.as_xyz_components(grid)
# normalize sound field wrt xnorm
if xnorm is not None:
p = _util.normalize(p, grid, xnorm)
if p.ndim == 3:
if p.shape[2] == 1:
p = p[:, :, 0] # first axis: y; second axis: x
plotting_plane = 'xy'
elif p.shape[1] == 1:
p = p[:, 0, :].T # first axis: z; second axis: y
plotting_plane = 'yz'
elif p.shape[0] == 1:
p = p[0, :, :].T # first axis: z; second axis: x
plotting_plane = 'xz'
else:
raise ValueError("If p is 3D, one dimension must have length 1")
elif len(grid) == 3:
if grid[2].ndim == 0:
plotting_plane = 'xy'
elif grid[1].ndim == 0:
plotting_plane = 'xz'
elif grid[0].ndim == 0:
plotting_plane = 'yz'
else:
raise ValueError(
"If p is 2D and grid is 3D, one grid component must be scalar")
else:
# 2-dimensional case
plotting_plane = 'xy'
if plotting_plane == 'xy':
x, y = grid[[0, 1]]
elif plotting_plane == 'xz':
x, y = grid[[0, 2]]
elif plotting_plane == 'yz':
x, y = grid[[1, 2]]
dx = 0.5 * x.ptp() / p.shape[0]
dy = 0.5 * y.ptp() / p.shape[1]
if ax is None:
ax = _plt.gca()
# see https://github.com/matplotlib/matplotlib/issues/10567
if _mpl.__version__.startswith('2.1.'):
p = _np.clip(p, -1e15, 1e15) # clip to float64 range
im = ax.imshow(_np.real(p), cmap=cmap, origin='lower',
extent=[x.min()-dx, x.max()+dx, y.min()-dy, y.max()+dy],
vmax=vmax, vmin=vmin, **kwargs)
if xlabel is None:
xlabel = plotting_plane[0] + ' / m'
if ylabel is None:
ylabel = plotting_plane[1] + ' / m'
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if colorbar:
add_colorbar(im, **colorbar_kwargs)
return im
def level(p, grid, *, xnorm=None, power=False, cmap=None, vmax=3, vmin=-50,
**kwargs):
"""Two-dimensional plot of level (dB) of sound field.
Takes the same parameters as `sfs.plot2d.amplitude()`.
Other Parameters
----------------
power : bool, optional
See `sfs.util.db()`.
"""
# normalize before converting to dB!
if xnorm is not None:
p = _util.normalize(p, grid, xnorm)
L = _util.db(p, power=power)
return amplitude(L, grid=grid, xnorm=None, cmap=cmap,
vmax=vmax, vmin=vmin, **kwargs)
def particles(x, *, trim=None, ax=None, xlabel='x (m)', ylabel='y (m)',
edgecolors=None, marker='.', s=15, **kwargs):
"""Plot particle positions as scatter plot.
Parameters
----------
x : triple or pair of array_like
x, y and optionally z components of particle positions. The z
components are ignored.
If the values are complex, the imaginary parts are ignored.
Returns
-------
Scatter
See :func:`matplotlib.pyplot.scatter`.
Other Parameters
----------------
trim : array of float, optional
xmin, xmax, ymin, ymax limits for which the particles are plotted.
ax : Axes, optional
If given, the plot is created on *ax* instead of the current
axis (see :func:`matplotlib.pyplot.gca`).
xlabel, ylabel : str
Overwrite default x/y labels. Use ``xlabel=''`` and
``ylabel=''`` to remove x/y labels. The labels can be changed
afterwards with :func:`matplotlib.pyplot.xlabel` and
:func:`matplotlib.pyplot.ylabel`.
edgecolors, markr, s, **kwargs
All further parameters are forwarded to
:func:`matplotlib.pyplot.scatter`.
"""
XX, YY = [_np.real(c) for c in x[:2]]
if trim is not None:
xmin, xmax, ymin, ymax = trim
idx = _np.where((XX > xmin) & (XX < xmax) & (YY > ymin) & (YY < ymax))
XX = XX[idx]
YY = YY[idx]
if ax is None:
ax = _plt.gca()
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
return ax.scatter(XX, YY, edgecolors=edgecolors, marker=marker, s=s,
**kwargs)
def vectors(v, grid, *, cmap='blacktransparent', headlength=3,
headaxislength=2.5, ax=None, clim=None, **kwargs):
"""Plot a vector field in the xy plane.
Parameters
----------
v : triple or pair of array_like
x, y and optionally z components of vector field. The z
components are ignored.
If the values are complex, the imaginary parts are ignored.
grid : triple or pair of array_like
The grid that was used to calculate *v*, see
`sfs.util.xyz_grid()`. Any z components are ignored.
Returns
-------
Quiver
See :func:`matplotlib.pyplot.quiver`.
Other Parameters
----------------
ax : Axes, optional
If given, the plot is created on *ax* instead of the current
axis (see :func:`matplotlib.pyplot.gca`).
clim : pair of float, optional
Limits for the scaling of arrow colors.
See :func:`matplotlib.pyplot.quiver`.
cmap, headlength, headaxislength, **kwargs
All further parameters are forwarded to
:func:`matplotlib.pyplot.quiver`.
"""
v = _util.as_xyz_components(v[:2]).apply(_np.real)
X, Y = _util.as_xyz_components(grid[:2])
speed = _np.linalg.norm(v)
with _np.errstate(invalid='ignore'):
U, V = v.apply(_np.true_divide, speed)
if ax is None:
ax = _plt.gca()
if clim is None:
v_ref = 1 / (_default.rho0 * _default.c) # reference particle velocity
clim = 0, 2 * v_ref
return ax.quiver(X, Y, U, V, speed, cmap=cmap, pivot='mid', units='xy',
angles='xy', headlength=headlength,
headaxislength=headaxislength, clim=clim, **kwargs)
def add_colorbar(im, *, aspect=20, pad=0.5, **kwargs):
r"""Add a vertical color bar to a plot.
Parameters
----------
im : ScalarMappable
The output of `sfs.plot2d.amplitude()`, `sfs.plot2d.level()` or any
other `matplotlib.cm.ScalarMappable`.
aspect : float, optional
Aspect ratio of the colorbar. Strictly speaking, since the
colorbar is vertical, it's actually the inverse of the aspect
ratio.
pad : float, optional
Space between image plot and colorbar, as a fraction of the
width of the colorbar.
.. note:: The *pad* argument of
:meth:`matplotlib.figure.Figure.colorbar` has a
slightly different meaning ("fraction of original
axes")!
\**kwargs
All further arguments are forwarded to
:meth:`matplotlib.figure.Figure.colorbar`.
See Also
--------
matplotlib.pyplot.colorbar
"""
ax = im.axes
divider = _axes_grid1.make_axes_locatable(ax)
width = _axes_grid1.axes_size.AxesY(ax, aspect=1/aspect)
pad = _axes_grid1.axes_size.Fraction(pad, width)
current_ax = _plt.gca()
cax = divider.append_axes("right", size=width, pad=pad)
_plt.sca(current_ax)
return ax.figure.colorbar(im, cax=cax, orientation='vertical', **kwargs)
| {
"repo_name": "sfstoolbox/sfs-python",
"path": "sfs/plot2d.py",
"copies": "1",
"size": "16320",
"license": "mit",
"hash": -32249871491684236,
"line_mean": 33.8717948718,
"line_max": 79,
"alpha_frac": 0.5824754902,
"autogenerated": false,
"ratio": 3.4503171247357294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9530488929038294,
"avg_score": 0.00046073717948717953,
"num_lines": 468
} |
## 2. Drawing lines ##
import matplotlib.pyplot as plt
import numpy as np
x = [0, 1, 2, 3, 4, 5]
# Going by our formula, every y value at a position is the same as the x-value in the same position.
# We could write y = x, but let's write them all out to make this more clear.
y = [0, 1, 2, 3, 4, 5]
# As you can see, this is a straight line that passes through the points (0,0), (1,1), (2,2), and so on.
plt.plot(x, y)
plt.show()
# Let's try a slightly more ambitious line.
# What if we did y = x + 1?
# We'll make x an array now, so we can add 1 to every element more easily.
x = np.asarray([0, 1, 2, 3, 4, 5])
y = x + 1
# y is the same as x, but every element has 1 added to it.
print(y)
# This plot passes through (0,1), (1,2), and so on.
# It's the same line as before, but shifted up 1 on the y-axis.
plt.plot(x, y)
plt.show()
# By adding 1 to the line, we moved what's called the y-intercept -- where the line intersects with the y-axis.
# Moving the intercept can shift the whole line up (or down when we subtract).
plt.plot(x,x-1)
plt.show()
plt.plot(x,x+10)
plt.show()
## 3. Working with slope ##
import matplotlib.pyplot as plt
import numpy as np
x = np.asarray([0, 1, 2, 3, 4, 5])
# Let's set the slope of the line to 2.
y = 2 * x
# See how this line is "steeper" than before? The larger the slope is, the steeper the line becomes.
# On the flipside, fractional slopes will create a "shallower" line.
# Negative slopes will create a line where y values decrease as x values increase.
plt.plot(x, y)
plt.show()
plt.plot(x,4*x)
plt.show()
plt.plot(x,0.5*x)
plt.show()
plt.plot(x, (-2*x))
plt.show()
## 4. Starting out with linear regression ##
# The wine quality data is loaded into wine_quality
from numpy import cov
slope_density = cov(wine_quality['density'],wine_quality['quality'])
## 5. Finishing linear regression ##
from numpy import cov
# This function will take in two columns of data, and return the slope of the linear regression line.
def calc_slope(x, y):
return cov(x, y)[0, 1] / x.var()
intercept_density = wine_quality["quality"].mean() - (calc_slope(wine_quality["density"], wine_quality["quality"]) * wine_quality["density"].mean())
## 6. Making predictions ##
from numpy import cov
def calc_slope(x, y):
return cov(x, y)[0, 1] / x.var()
# Calculate the intercept given the x column, y column, and the slope
def calc_intercept(x, y, slope):
return y.mean() - (slope * x.mean())
def predicted(x):
return x * slope + intercept
slope = calc_slope(wine_quality["density"], wine_quality["quality"])
intercept = calc_intercept(wine_quality["density"], wine_quality["quality"], slope)
predicted_quality = wine_quality["density"].apply(predicted)
## 7. Finding error ##
from scipy.stats import linregress
# We've seen the r_value before -- we'll get to what p_value and stderr_slope are soon -- for now, don't worry about them.
slope, intercept, r_value, p_value, stderr_slope = linregress(wine_quality["density"], wine_quality["quality"])
# As you can see, these are the same values we calculated (except for slight rounding differences)
print(slope)
print(intercept)
def pred(x):
return x*slope + intercept
pred_d = wine_quality['density'].apply(pred)
rss = 0
for i in range(len(pred_d)):
rss += (wine_quality['quality'][i] - pred_d[i])**2
## 8. Standard error ##
from scipy.stats import linregress
import numpy as np
# We can do our linear regression
# Sadly, the stderr_slope isn't the standard error, but it is the standard error of the slope fitting only
# We'll need to calculate the standard error of the equation ourselves
slope, intercept, r_value, p_value, stderr_slope = linregress(wine_quality["density"], wine_quality["quality"])
predicted_y = np.asarray([slope * x + intercept for x in wine_quality["density"]])
residuals = (wine_quality["quality"] - predicted_y) ** 2
rss = sum(residuals)
stderr = (rss / (len(wine_quality["quality"]) - 2)) ** .5
def within_percentage(y, predicted_y, stderr, error_count):
within = stderr * error_count
differences = abs(predicted_y - y)
lower_differences = [d for d in differences if d <= within]
within_count = len(lower_differences)
return within_count / len(y)
within_one = within_percentage(wine_quality["quality"], predicted_y, stderr, 1)
within_two = within_percentage(wine_quality["quality"], predicted_y, stderr, 2)
within_three = within_percentage(wine_quality["quality"], predicted_y, stderr, 3) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Probability Statistics Beginner/Linear regression-15.py",
"copies": "1",
"size": "4449",
"license": "mit",
"hash": -6005366155877097000,
"line_mean": 30.7857142857,
"line_max": 148,
"alpha_frac": 0.6970105642,
"autogenerated": false,
"ratio": 3.119915848527349,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43169264127273493,
"avg_score": null,
"num_lines": null
} |
"""2D Refocusing of an HL60 cell
The data show a live HL60 cell imaged with quadriwave lateral shearing
interferometry (SID4Bio, Phasics S.A., France).
The diameter of the cell is about 20µm.
"""
import matplotlib.pylab as plt
import numpy as np
import unwrap
import nrefocus
from example_helper import load_cell
# load initial cell
cell1 = load_cell("HL60_field.zip")
# refocus to two different positions
cell2 = nrefocus.refocus(cell1, 15, 1, 1) # forward
cell3 = nrefocus.refocus(cell1, -15, 1, 1) # backward
# amplitude range
vmina = np.min(np.abs(cell1))
vmaxa = np.max(np.abs(cell1))
ampkw = {"cmap": plt.get_cmap("gray"),
"vmin": vmina,
"vmax": vmaxa}
# phase range
cell1p = unwrap.unwrap(np.angle(cell1))
cell2p = unwrap.unwrap(np.angle(cell2))
cell3p = unwrap.unwrap(np.angle(cell3))
vminp = np.min(cell1p)
vmaxp = np.max(cell1p)
phakw = {"cmap": plt.get_cmap("coolwarm"),
"vmin": vminp,
"vmax": vmaxp}
# plots
fig, axes = plt.subplots(2, 3, figsize=(8, 4.5))
axes = axes.flatten()
for ax in axes:
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
# titles
axes[0].set_title("focused backward")
axes[1].set_title("original image")
axes[2].set_title("focused forward")
# data
mapamp = axes[0].imshow(np.abs(cell3), **ampkw)
axes[1].imshow(np.abs(cell1), **ampkw)
axes[2].imshow(np.abs(cell2), **ampkw)
mappha = axes[3].imshow(cell3p, **phakw)
axes[4].imshow(cell1p, **phakw)
axes[5].imshow(cell2p, **phakw)
# colobars
cbkwargs = {"fraction": 0.045}
plt.colorbar(mapamp, ax=axes[0], label="amplitude [a.u.]", **cbkwargs)
plt.colorbar(mapamp, ax=axes[1], label="amplitude [a.u.]", **cbkwargs)
plt.colorbar(mapamp, ax=axes[2], label="amplitude [a.u.]", **cbkwargs)
plt.colorbar(mappha, ax=axes[3], label="phase [rad]", **cbkwargs)
plt.colorbar(mappha, ax=axes[4], label="phase [rad]", **cbkwargs)
plt.colorbar(mappha, ax=axes[5], label="phase [rad]", **cbkwargs)
plt.tight_layout()
plt.show()
| {
"repo_name": "RI-imaging/nrefocus",
"path": "examples/refocus_cell.py",
"copies": "2",
"size": "1999",
"license": "bsd-3-clause",
"hash": -6772581231595450000,
"line_mean": 27.9565217391,
"line_max": 70,
"alpha_frac": 0.6861861862,
"autogenerated": false,
"ratio": 2.6289473684210525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9315133554621053,
"avg_score": 0,
"num_lines": 69
} |
# 2D Semantic Segmentation
# License John Lambert
# Stanford University
from scipy.misc import imread, imresize
import numpy as np
#from '/Applications/MATLAB_R2016a.app/extern/engines/python/build/lib/matlab/engine'
import matlab.engine
import os.path
import time
import tensorflow as tf
import os, sys
import csv
import os.path
from sys import argv
from os.path import exists
import matplotlib.pyplot as plt
from datetime import datetime
import scipy.io
import h5py
import matplotlib
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def getImages( maxZSlice):
#maxZSlice = 39
dir_path = 'June_6_2011_415-5_Series008/'
images = np.zeros( (maxZSlice , 1024, 1024, 3) )
for idx in xrange( maxZSlice ):
zSliceNumber = str( idx )
if len( zSliceNumber ) == 1:
zSliceNumber = '0' + zSliceNumber
imageName = 'June_6_2011__415-5__Series008_z0%s_ch00.tif' % ( zSliceNumber )
if os.path.isfile( os.path.join(dir_path, imageName ) ):
imagePath = os.path.join( dir_path, imageName )
testIm = imread( imagePath )
#print testIm.shape
images[idx] = imread( imagePath )
images = images.astype( np.float32 )
return images
def getDendriticSpineData():
print 'Data will be retrieved:'
Z_SLICE_DEPTH = 39
IMAGE_SIZE = (1024,1024,3)
NUM_CLASSES = 2
#NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 25
#NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 25 # I DUNNO
#eng = matlab.engine.start_matlab()
#labelsObject = eng.GenerateDendriticSpineVoxels() # should specify folders here, etc.
# for property, value in vars(labelsObject).iteritems():
# print property #, ": ", value
#labelsPythonArray = getattr( labelsObject, '_data' )
#python_type = getattr( labelsObject, '_python_type' )
#sizeOfArray = getattr( labelsObject, '_size' )
#startOfArray = getattr( labelsObject, '_start' )
#labels = np.asarray( labelsPythonArray , dtype=np.float64 )
#labels = np.frombuffer( labelsPythonArray, dtype=np.float64 )
#print 'Buffered Labels shape', labels.shape
#labels = np.reshape( labels, (1024,1024, Z_SLICE_DEPTH )) # labels is of shape 1024 x 1024 x 47
f = h5py.File('/Users/johnlambert/Documents/Stanford_2015-2016/SpringQuarter_2015-2016/CS_231A/FinalProject/dendriticSpines.mat','r')
data = f.get('voxels') # Get a certain dataset
h5pyLabels = np.array(data)
print 'h5pylabels shape', h5pyLabels.shape
#rint 'Previous Labels shape', labels.shape
h5pyLabels = np.transpose( h5pyLabels , ( 0, 2 , 1) ) # or should it be ( 2 , 1, 0 ) ? IS X OR Y FIRST?
print 'h5pylabels shape', h5pyLabels.shape
#labelsToPlot = labels[:,:,8]
# labelsToPlot = h5pyLabels[:,:,8]
# plt.subplot(1, 1, 1)
# plt.imshow( labelsToPlot.astype('uint8') , cmap='Greys_r')
# plt.axis('off')
# plt.gcf().set_size_inches(10, 10)
# plt.title( 'Segmentation GT')
# plt.show()
labels = h5pyLabels
labels = np.reshape( labels, (Z_SLICE_DEPTH, 1024,1024, 1) ) # MAX_POOL needs batch, height, width, channels
print 'New Labels shape', labels.shape
images = getImages( Z_SLICE_DEPTH )
labels = 1 - labels
return images, labels
#_activation_summary(h_conv1)
# pool1
#bias = tf.nn.bias_add(conv, biases)
# norm1
#norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
# conv2
#biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
#_activation_summary(conv2)
# norm2
#norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
# pool2
# two dimensions are the patch size, the next is the number of input channels,
# and the last is the number of output channels.
def inference( images ):
print 'Running network forward, performing inference'
with tf.variable_scope('conv1') as scope:
weight_conv1 = weight_variable([7, 7, 3, 64]) # I use 32 Filters of size (3 x 3). Later try ( 5 x 5 )
conv1_output = tf.nn.conv2d( images, weight_conv1 , strides=[1, 1, 1, 1], padding='SAME')
bias_conv1 = bias_variable([64])
conv1_output_wBias = tf.nn.bias_add( conv1_output , bias_conv1 )
conv1_activation = tf.nn.relu( conv1_output_wBias )
pool1 = tf.nn.max_pool( conv1_activation , ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
with tf.variable_scope('conv2') as scope:
weight_conv2 = weight_variable([7, 7, 64, 2]) # kernel_size: 2 , stride: 2 #stddev=1e-4, wd=0.0)
conv2_output = tf.nn.conv2d( pool1 , weight_conv2, strides=[1, 1, 1, 1], padding='SAME')
bias_conv2 = bias_variable([2])
conv2_output_wBias = tf.nn.bias_add( conv2_output, bias_conv2 )
conv2_activation = tf.nn.relu( conv2_output_wBias )
pool2 = tf.nn.max_pool( conv2_activation , ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
return pool2
# try putting on fc layer at the end?
# or end with 2 filters as 2 class buckets?
# For sparse_softmax_cross_entropy_with_logits, labels must have the shape [batch_size]
# and the dtype int64. Each label is an int in range [0, num_classes).
def computeLoss(logits, labels, batch_size ):
batch_size = 256 * 256
print 'computing loss'
# NEED A 4-D TENSOR IN ORDER TO DO MAX-POOLING. (Z_SLICE_DEPTH, 1024,1024, 1)
# `logits` must have the shape `[batch_size, num_classes]` and dtype `float32` or `float64`.
logits = tf.reshape( logits, [batch_size, -1])
labels = tf.cast(labels, tf.float32)
firstPooledLabels = tf.nn.max_pool( labels, ksize=[1,4,4,1], strides=[1,4,4,1], padding='SAME') # equivalent to 1,4,4,1 ?
#pooledLabels2 = tf.nn.max_pool( pooledLabels1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# Sparse means we write the index of the class ( not one-hot)
# Calculate the average cross entropy loss across the batch.
# `labels` must have the shape `[batch_size]` and dtype `int32` or `int64`
pooledLabels = tf.reshape( firstPooledLabels, [ batch_size ] ) # [ -1 ]
pooledLabels = tf.cast( pooledLabels , tf.int64)
# USE WEIGHTS
# tf.nn.weighted_cross_entropy_with_logits(logits, targets, pos_weight, name=None)
predictions = tf.nn.softmax( tf.cast( logits, 'float64') ) # logits is float32, float64. 2-D with shape [batch_size, num_classes].
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits, pooledLabels, name='cross_entropy_per_pixel_example')
cross_entropy_sum = tf.reduce_sum(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_sum)
#cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
#tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss'), predictions, firstPooledLabels
# def computeLoss( logits, labels, batch_size ): # weighted version
# batch_size = 256 * 256
# print 'computing loss'
# #NEED A 4-D TENSOR IN ORDER TO DO MAX-POOLING. (Z_SLICE_DEPTH, 1024,1024, 1)
# #`logits` must have the shape `[batch_size, num_classes]` and dtype `float32` or `float64`.
# logits = tf.reshape( logits, [batch_size, -1])
# labels = tf.cast(labels, tf.float32)
# firstPooledLabels = tf.nn.max_pool( labels, ksize=[1,4,4,1], strides=[1,4,4,1], padding='SAME') # equivalent to 1,4,4,1 ?
# #pooledLabels2 = tf.nn.max_pool( pooledLabels1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# #Sparse means we write the index of the class ( not one-hot)
# #Calculate the average cross entropy loss across the batch.
# #`labels` must have the shape `[batch_size]` and dtype `int32` or `int64`
# pooledLabels = tf.reshape( firstPooledLabels, [ batch_size ] ) # [ -1 ]
# pooledLabels = tf.cast( pooledLabels , tf.int64)
# #USE WEIGHTS
# predictions = tf.nn.softmax( tf.cast( logits, 'float64') ) # logits is float32, float64. 2-D with shape [batch_size, num_classes].
# num_labels = 2
# label_batch = tf.cast( pooledLabels , tf.int32 )
# sparse_labels = tf.reshape( label_batch, [-1, 1])
# derived_size = tf.shape(label_batch)[0]
# indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1])
# concated = tf.concat(1, [indices, sparse_labels])
# outshape = tf.pack([derived_size, num_labels])
# oneHotLabels = tf.sparse_to_dense( concated, outshape, 1.0, 0.0)
# cross_entropy = tf.nn.weighted_cross_entropy_with_logits( logits, oneHotLabels, 200, name='weighted_CE_per_pixel') # pos_weight = 2
# cross_entropy_sum = tf.reduce_sum(cross_entropy, name='cross_entropy')
# tf.add_to_collection('losses', cross_entropy_sum)
# # cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
# # tf.add_to_collection('losses', cross_entropy_mean)
# # The total loss is defined as the cross entropy loss plus all of the weight
# # decay terms (L2 loss).
# return tf.add_n(tf.get_collection('losses'), name='total_loss'), predictions, firstPooledLabels
def trainNetwork( total_loss, global_step ):
print 'Training Network'
#num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
lr = 0.000001
train_op = tf.train.GradientDescentOptimizer( lr ).minimize( total_loss )
return train_op
def sampleMinibatch( images, labels, batch_size=1 ):
full_data_size = images.shape[0]
mask = np.random.choice( full_data_size, batch_size )
mask = [9] # for now
print 'ImageSlice# ', mask
minibatchImages = images[ mask ]
minibatchLabels = labels[ mask ]
return (minibatchImages, minibatchLabels)
def test_ConvNet():
print 'Begin Test of ConvNet'
with tf.Graph().as_default():
train_dir = '/Users/johnlambert/Documents/Stanford_2015-2016/SpringQuarter_2015-2016/CS_231A/FinalProject'
batch_size = 1
max_epochs = 1000
#with tf.Graph().as_default():
global_step = tf.Variable( 0 , trainable = False )
images, labels = getDendriticSpineData()
# print images.shape
# print images.dtype
# print labels.shape
# print labels.dtype
inputX_placeholder = tf.placeholder( tf.float32, shape=[ batch_size, 1024, 1024, 3 ], name="imagesForSeg" )
gtY_placeholder = tf.placeholder( tf.float64, shape=[ batch_size, 1024, 1024, 1 ], name="labelsForSeg")
#minibatch = sampleMinibatch( images, labels, 1 )
#minibatchImages, minibatchLabels = minibatch # this is one image
logits = inference( inputX_placeholder )
loss, predictions, pooledLabels = computeLoss( logits, gtY_placeholder, batch_size )
train_op = trainNetwork( loss, global_step )
#correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
#accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver(tf.all_variables())
init = tf.initialize_all_variables()
with tf.Session() as session:
#session = tf.InteractiveSession()
session.run(init)
summary_writer = tf.train.SummaryWriter( train_dir, session.graph )
for step in xrange( 1, max_epochs, 1 ):
start_time = time.time()
minibatch = sampleMinibatch( images, labels, 1 )
minibatchImages, minibatchLabels = minibatch # this is one image
#rescale from -1 to 1
minibatchImages -= 127
minibatchImages /= 255.0
print 'minibatchImages.shape',minibatchImages.shape # ( should be 1 x 1024 x 1024 x 1)
print 'Sum of all of the labels at full size', np.sum( minibatchLabels )
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
feed = { inputX_placeholder: minibatchImages, gtY_placeholder: minibatchLabels }
loss_value , y_pred, logits_vals, pooledLabelsVals , _ = session.run( [ loss, predictions, logits, pooledLabels, train_op ], feed_dict=feed)
print 'Sum of all of the labels at 1/4 by 1/4 size', np.sum( pooledLabelsVals )
print 'y_pred.shape',y_pred.shape
# print 'logits_vals.shape', logits_vals.shape
# print
# summary_str = sess.run(summary_op)
# summary_writer.add_summary(summary_str, step)
if step%10 == 0: # or %100
imagesToPlot = np.reshape( minibatchImages , (1024,1024,3) )
#plt.subplot(1, 1, 1)
plt.imshow( imagesToPlot.astype('uint8') ) #.transpose(2,1,0) )
#plt.axis('off')
#plt.gcf().set_size_inches(10, 10)
plt.rcParams["axes.titlesize"] = 8
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
plt.gca().axis('off')
plt.axis('off')
plt.title( 'Segmentation Input Image')
plt.savefig('input_image%d.png' % (step) )
#plt.show()
print 'minibatchLabels.shape',minibatchLabels.shape
labelsToPlot = np.reshape( minibatchLabels , (1024,1024) )
#plt.subplot(1, 1, 1)
plt.imshow( labelsToPlot.astype('uint8') , cmap='Greys_r')
plt.axis('off')
plt.gcf().set_size_inches(10, 10)
plt.title( 'Segmentation GT')
plt.savefig('gt_label_Seg%d.png' % (step) )
#plt.show()
y_pred = np.argmax( y_pred, axis=1)
y_pred = np.reshape( y_pred, (256,256) )
#plt.subplot(1, 1, 1)
plt.imshow( y_pred.astype('uint8') , cmap='Greys_r')
plt.axis('off')
plt.gcf().set_size_inches(10, 10)
plt.title( 'Segmentation prediction')
plt.savefig('segmentation_prediction%d.png' % (step) )
pooledLabelsVals = np.reshape( pooledLabelsVals, (256,256) )
#plt.subplot(1, 1, 1)
plt.imshow( pooledLabelsVals.astype('uint8') , cmap='Greys_r')
plt.axis('off')
plt.gcf().set_size_inches(10, 10)
plt.title( 'gt downsampled')
plt.savefig('gt_Downsampled_max_pool_%d.png' % (step) )
#plt.show()
#if i%100 == 0:
# train_accuracy = accuracy.eval(feed_dict={
# x:batch[0], y_: batch[1], keep_prob: 1.0})
# print("step %d, training accuracy %g"%(i, train_accuracy))
# print("test accuracy %g"%accuracy.eval(feed_dict={
# x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
#if step % 10 == 0:
num_examples_per_step = 1 #FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f ' 'sec/batch)')
print (format_str % (datetime.now(), step, loss_value, examples_per_sec, sec_per_batch))
# if step % 100 == 0:
# summary_str = session.run(summary_op)
# summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == max_epochs: #FLAGS.max_steps:
checkpoint_path = os.path.join( train_dir, 'model_5005epochs.ckpt')
saver.save(session, checkpoint_path, global_step=step)
# cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
# train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# sess.run(tf.initialize_all_variables())
# for i in range(20000):
# batch = mnist.train.next_batch(50)
# if i%100 == 0:
# train_accuracy = accuracy.eval(feed_dict={
# x:batch[0], y_: batch[1], keep_prob: 1.0})
# print("step %d, training accuracy %g"%(i, train_accuracy))
# train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
# print("test accuracy %g"%accuracy.eval(feed_dict={
# x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
if __name__ == "__main__":
test_ConvNet() | {
"repo_name": "johnwlambert/DendriticSpineSegmentationAndDetection",
"path": "2DFullyConvSegmentationConvNet_v4WeightedSigmoidCE.py",
"copies": "1",
"size": "15564",
"license": "mit",
"hash": -1183719267047980300,
"line_mean": 39.0128534704,
"line_max": 145,
"alpha_frac": 0.6786815729,
"autogenerated": false,
"ratio": 2.8134490238611716,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39921305967611714,
"avg_score": null,
"num_lines": null
} |
"""2D slow integration"""
import numpy as np
def integrate_2d(uSin, angles, res, nm, lD=0, coords=None,
count=None, max_count=None, verbose=0):
r"""(slow) 2D reconstruction with the Fourier diffraction theorem
Two-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,z)`
by a dielectric object with refractive index
:math:`n(x,z)`.
This function implements the solution by summation in real
space, which is extremely slow.
Parameters
----------
uSin: (A,N) ndarray
Two-dimensional sinogram of line recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None or (2,M) ndarray]
Computes only the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (N,N), complex if `onlyreal` is `False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
backpropagate_2d: implementation by backprojection
fourier_map_2d: implementation by Fourier interpolation
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
This method is not meant for production use. The computation time
is very long and the reconstruction quality is bad. This function
is included in the package, because of its educational value,
exemplifying the backpropagation algorithm.
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
"""
if coords is None:
lx = uSin.shape[1]
x = np.linspace(-lx/2, lx/2, lx, endpoint=False)
xv, yv = np.meshgrid(x, x)
coords = np.zeros((2, lx**2))
coords[0, :] = xv.flat
coords[1, :] = yv.flat
if max_count is not None:
max_count.value += coords.shape[1] + 1
# Cut-Off frequency
km = (2 * np.pi * nm) / res
# Fourier transform of all uB's
# In the script we used the unitary angular frequency (uaf) Fourier
# Transform. The discrete Fourier transform is equivalent to the
# unitary ordinary frequency (uof) Fourier transform.
#
# uof: f₁(ξ) = int f(x) exp(-2πi xξ)
#
# uaf: f₃(ω) = (2π)^(-n/2) int f(x) exp(-i ωx)
#
# f₁(ω/(2π)) = (2π)^(n/2) f₃(ω)
# ω = 2πξ
#
# We have a one-dimensional (n=1) Fourier transform and UB in the
# script is equivalent to f₃(ω). Because we are working with the
# uaf, we divide by sqrt(2π) after computing the fft with the uof.
#
# We calculate the fourier transform of uB further below. This is
# necessary for memory control.
# Corresponding sample frequencies
fx = np.fft.fftfreq(uSin[0].shape[0]) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
# Undersampling/oversampling?
# Determine if the resolution of the image is too low by looking
# at the maximum value for kx. This is no comparison between
# Nyquist and Rayleigh frequency.
if np.max(kx**2) <= 2 * km**2:
# Detector is not set up properly. Higher resolution
# can be achieved.
if verbose:
print("......Measurement data is undersampled.")
else:
if verbose:
print("......Measurement data is oversampled.")
raise NotImplementedError("Oversampled data not yet supported." +
" Please rescale input data")
# Differentials for integral
dphi0 = 2 * np.pi / len(angles)
dkx = kx[1] - kx[0]
# We will later multiply with phi0.
# Make sure we are using correct shapes
kx = kx.reshape(1, kx.shape[0])
# Low-pass filter:
# less-than-or-equal would give us zero division error.
filter_klp = (kx**2 < km**2)
# a0 will be multiplied with kx
# a0 = np.atleast_1d(a0)
# a0 = a0.reshape(1,-1)
# Create the integrand
# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]
# - double coverage factor 1/2 already included
# - unitary angular frequency to unitary ordinary frequency
# conversion performed in calculation of UB=FT(uB).
#
# f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor)
# * iint dϕ₀ dkx (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UBϕ₀(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)
#
# (r and s₀ are vectors. In the last term we perform the dot-product)
#
# kₘM = sqrt( kₘ² - kx² )
# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
#
#
# everything that is not dependent on phi0:
#
# Filter M so there are no nans from the root
M = 1. / km * np.sqrt((km**2 - kx**2) * filter_klp)
prefactor = -1j * km / ((2 * np.pi)**(3. / 2))
prefactor *= dphi0 * dkx
# Also filter the prefactor, so nothing outside the required
# low-pass contributes to the sum.
prefactor *= np.abs(kx) * filter_klp
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
prefactor *= np.exp(-1j * km * (M-1) * lD)
# Initiate function f
f = np.zeros(len(coords[0]), dtype=np.complex128)
lenf = len(f)
lenu0 = len(uSin[0]) # lenu0 = len(kx[0])
# Initiate vector r that corresponds to calculating a value of f.
r = np.zeros((2, 1, 1))
# Everything is normal.
# Get the angles ϕ₀.
phi0 = angles.reshape(-1, 1)
# Compute the Fourier transform of uB.
# This is true: np.fft.fft(UB)[0] == np.fft.fft(UB[0])
# because axis -1 is always used.
#
#
# Furthermore, The notation in the our optical tomography script for
# a wave propagating to the right is:
#
# u0(x) = exp(ikx)
#
# However, in physics usually usethe other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consisten with programs like Meep or our scattering
# script for a dielectric cylinder, we want to use the latter sign
# convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
UB = np.fft.fft(np.fft.ifftshift(uSin, axes=-1)) / np.sqrt(2 * np.pi)
UBi = UB.reshape(len(angles), lenu0)
if count is not None:
count.value += 1
for j in range(lenf):
# Get r (We compute f(r) in this for-loop)
r[0][:] = coords[0, j] # x
r[1][:] = coords[1, j] # y
# Integrand changes with r, so we have to create a new
# array:
integrand = prefactor * UBi
# We save memory by directly applying the following to
# the integrand:
#
# Vector along which we measured
# s0 = np.zeros((2, phi0.shape[0], kx.shape[0]))
# s0[0] = -np.sin(phi0)
# s0[1] = +np.cos(phi0)
# Vector perpendicular to s0
# t_perp_kx = np.zeros((2, phi0.shape[0], kx.shape[1]))
#
# t_perp_kx[0] = kx*np.cos(phi0)
# t_perp_kx[1] = kx*np.sin(phi0)
#
# term3 = np.exp(1j*np.sum(r*( t_perp_kx + (gamma-km)*s0 ), axis=0))
# integrand* = term3
#
# Reminder:
# f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor)
# * iint dϕ₀ dkx (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UB(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ(M - 1) s₀) r ) (dependent on ϕ₀ and r)
#
# (r and s₀ are vectors. In the last term we perform the dot-product)
#
# kₘM = sqrt( kₘ² - kx² )
# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
integrand *= np.exp(1j * (
r[0] * (kx * np.cos(phi0) - km * (M - 1) * np.sin(phi0)) +
r[1] * (kx * np.sin(phi0) + km * (M - 1) * np.cos(phi0))))
# Calculate the integral for the position r
# integrand.sort()
f[j] = np.sum(integrand)
# free memory
del integrand
if count is not None:
count.value += 1
return f.reshape(lx, lx)
| {
"repo_name": "paulmueller/ODTbrain",
"path": "odtbrain/_alg2d_int.py",
"copies": "2",
"size": "9866",
"license": "bsd-3-clause",
"hash": -4585641611240731000,
"line_mean": 34.9225092251,
"line_max": 77,
"alpha_frac": 0.5715459682,
"autogenerated": false,
"ratio": 3.140322580645161,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47118685488451617,
"avg_score": null,
"num_lines": null
} |
# 2D Vector
import math
class Vector(object):
@staticmethod
def add(v1, v2):
"""Adds two vectors and returns the product. """
return Vector(v1._x + v2._x, v1._y + v2._y)
@staticmethod
def sub(v1, v2):
"""Subtracts v2 from v1 and returns the product."""
return Vector(v1._x - v2._x, v1._y - v2._y)
@staticmethod
def project(v1, v2):
"""Projects one vector (v1) onto another (v2)"""
return v1.clone().scale(v1.dot(v2) / v1.mag_squared())
def __init__(self, x=0.0, y=0.0):
"""Creates a new Vector instance."""
self._x, self._y = x, y
def set(self, x, y):
"""Sets the components of this vector."""
self._x, self._y = x, y
return self
def add(self, v):
"""Add a vector to this one."""
self._x += v._x
self._y += v._y
return self
def sub(self, v):
"""Subtracts a vector from this one."""
self._x -= v._x
self._y -= v._y
return self
def scale(self, f):
"""Scales this vector by a value."""
self._x *= f
self._y *= f
return self
def dot(self, v):
"""Computes the dot product between vectors."""
return self._x * v._x + self._y * v._y
def cross(self, v):
"""# Computes the cross product between vectors."""
return (self._x * v._y) - (self._y * v._x)
def mag(self):
"""Computes the magnitude (length)."""
return math.sqrt(self._x ** 2 + self._y ** 2)
def mag_squared(self):
"""Computes the squared magnitude (length)."""
return self._x ** 2 + self._y ** 2
def dist(self, v):
"""Computes the distance to another vector."""
dx = v._x - self._x
dy = v._y - self._y
return math.sqrt(dx * dx + dy * dy)
def dist_squared(self, v):
"""# Computes the squared distance to another vector."""
dx = v._x - self._x
dy = v._y - self._y
return dx * dx + dy * dy
def norm(self):
"""# Normalises the vector, making it a unit vector (of length 1)."""
m = math.sqrt(self._x ** 2 + self._y ** 2)
self._x /= m
self._y /= m
return self
def limit(self, limit):
"""# Limits the vector length to a given amount."""
m_sq = self._x ** 2 + self._y ** 2
if m_sq > limit**2:
m = math.sqrt(m_sq)
self._x /= m
self._y /= m
self._x *= limit
self._y *= limit
def copy(self, v):
"""# Copies components from another vector."""
self._x = v._x
self._y = v._y
return self
def clone(self):
"""# Clones this vector to a new identical one."""
return Vector(self._x, self._y)
def clear(self):
"""# Resets the vector to zero."""
self._x = 0.0
self._y = 0.0
| {
"repo_name": "gregroper/Pycipia",
"path": "lwmath/Vector.py",
"copies": "1",
"size": "2920",
"license": "mit",
"hash": 3995369033671237000,
"line_mean": 26.2897196262,
"line_max": 77,
"alpha_frac": 0.4917808219,
"autogenerated": false,
"ratio": 3.5436893203883497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4535470142288349,
"avg_score": null,
"num_lines": null
} |
# 2-electron VMC code for 2dim quantum dot with importance sampling
# No Coulomb interaction
# Using gaussian rng for new positions and Metropolis- Hastings
# Energy minimization using standard gradient descent
# Common imports
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
from math import exp, sqrt
from random import random, seed, normalvariate
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import sys
from numba import jit
from scipy.optimize import minimize
import multiprocessing as mp
# Trial wave function for the 2-electron quantum dot in two dims
def WaveFunction(r,alpha):
r1 = r[0,0]**2 + r[0,1]**2
r2 = r[1,0]**2 + r[1,1]**2
return exp(-0.5*alpha*(r1+r2))
# Local energy for the 2-electron quantum dot in two dims, using analytical local energy
def LocalEnergy(r,alpha):
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
return 0.5*(1-alpha*alpha)*(r1 + r2) +2.0*alpha
# Derivate of wave function ansatz as function of variational parameters
def DerivativeWFansatz(r,alpha):
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
WfDer = -0.5*(r1+r2)
return WfDer
# Setting up the quantum force for the two-electron quantum dot, recall that it is a vector
def QuantumForce(r,alpha):
qforce = np.zeros((NumberParticles,Dimension), np.double)
qforce[0,:] = -2*r[0,:]*alpha
qforce[1,:] = -2*r[1,:]*alpha
return qforce
# Computing the derivative of the energy and the energy
# jit decorator tells Numba to compile this function.
# The argument types will be inferred by Numba when function is called.
@jit
def EnergyMinimization(alpha):
NumberMCcycles= 1000
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
# seed for rng generator
seed()
energy = 0.0
DeltaE = 0.0
EnergyDer = 0.0
DeltaPsi = 0.0
DerivativePsiE = 0.0
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha)
QuantumForceOld = QuantumForce(PositionOld,alpha)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha)
QuantumForceNew = QuantumForce(PositionNew,alpha)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
(D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
PositionNew[i,j]+PositionOld[i,j])
GreensFunction = 1.0#exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha)
DerPsi = DerivativeWFansatz(PositionOld,alpha)
DeltaPsi +=DerPsi
energy += DeltaE
DerivativePsiE += DerPsi*DeltaE
# We calculate mean values
energy /= NumberMCcycles
DerivativePsiE /= NumberMCcycles
DeltaPsi /= NumberMCcycles
EnergyDer = 2*(DerivativePsiE-DeltaPsi*energy)
return energy, EnergyDer
#Here starts the main program with variable declarations
NumberParticles = 2
Dimension = 2
# guess for variational parameters
x0 = 0.5
# Set up iteration using stochastic gradient method
Energy =0 ; EnergyDer = 0
pool = mp.Pool(processes=2)
Energy, EnergyDer = EnergyMinimization(x0)
# No adaptive search for a minimum
eta = 0.5
Niterations = 50
Energies = np.zeros(Niterations)
EnergyDerivatives = np.zeros(Niterations)
AlphaValues = np.zeros(Niterations)
Totiterations = np.zeros(Niterations)
for iter in range(Niterations):
gradients = EnergyDer
x0 -= eta*gradients
Energy, EnergyDer = EnergyMinimization(x0)
Energies[iter] = Energy
EnergyDerivatives[iter] = EnergyDer
AlphaValues[iter] = x0
Totiterations[iter] = iter
plt.subplot(2, 1, 1)
plt.plot(Totiterations, Energies, 'o-')
plt.title('Energy and energy derivatives')
plt.ylabel('Dimensionless energy')
plt.subplot(2, 1, 2)
plt.plot(Totiterations, EnergyDerivatives, '.-')
plt.xlabel(r'$\mathrm{Iterations}$', fontsize=15)
plt.ylabel('Energy derivative')
save_fig("QdotNonint")
plt.show()
#nice printout with Pandas
import pandas as pd
from pandas import DataFrame
data ={'Alpha':AlphaValues, 'Energy':Energies,'Derivative':EnergyDerivatives}
frame = pd.DataFrame(data)
print(frame)
| {
"repo_name": "CompPhysics/ComputationalPhysics2",
"path": "doc/src/MCsummary/src/mpqdot.py",
"copies": "1",
"size": "5905",
"license": "cc0-1.0",
"hash": -2267066061847252700,
"line_mean": 31.8055555556,
"line_max": 92,
"alpha_frac": 0.6758679086,
"autogenerated": false,
"ratio": 3.225013653741125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9302620591892358,
"avg_score": 0.019652194089753508,
"num_lines": 180
} |
# 2-electron VMC code for 2dim quantum dot with importance sampling
# Using gaussian rng for new positions and Metropolis- Hastings
# Added energy minimization
# Common imports
from math import exp, sqrt
from random import random, seed, normalvariate
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import sys
# Trial wave function for the 2-electron quantum dot in two dims
def WaveFunction(r,alpha,beta):
r1 = r[0,0]**2 + r[0,1]**2
r2 = r[1,0]**2 + r[1,1]**2
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = r12/(1+beta*r12)
return exp(-0.5*alpha*(r1+r2)+deno)
# Local energy for the 2-electron quantum dot in two dims, using analytical local energy
def LocalEnergy(r,alpha,beta):
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
return 0.5*(1-alpha*alpha)*(r1 + r2) +2.0*alpha + 1.0/r12+deno2*(alpha*r12-deno2+2*beta*deno-1.0/r12)
# Derivate of wave function ansatz as function of variational parameters
def DerivativeWFansatz(r,alpha,beta):
WfDer = np.zeros((2), np.double)
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
WfDer[0] = -0.5*(r1+r2)
WfDer[1] = -r12*r12*deno2
return WfDer
# Setting up the quantum force for the two-electron quantum dot, recall that it is a vector
def QuantumForce(r,alpha,beta):
qforce = np.zeros((NumberParticles,Dimension), np.double)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
qforce[0,:] = -2*r[0,:]*alpha*(r[0,:]-r[1,:])*deno*deno/r12
qforce[1,:] = -2*r[1,:]*alpha*(r[1,:]-r[0,:])*deno*deno/r12
return qforce
# Computing the derivative of the energy and the energy
def EnergyMinimization(alpha, beta):
NumberMCcycles= 10000
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
# seed for rng generator
seed()
energy = 0.0
DeltaE = 0.0
EnergyDer = np.zeros((2), np.double)
DeltaPsi = np.zeros((2), np.double)
DerivativePsiE = np.zeros((2), np.double)
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha,beta)
QuantumForceOld = QuantumForce(PositionOld,alpha, beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha,beta)
QuantumForceNew = QuantumForce(PositionNew,alpha, beta)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
(D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
DerPsi = DerivativeWFansatz(PositionOld,alpha,beta)
DeltaPsi += DerPsi
energy += DeltaE
DerivativePsiE += DerPsi*DeltaE
# We calculate mean values
energy /= NumberMCcycles
DerivativePsiE /= NumberMCcycles
DeltaPsi /= NumberMCcycles
EnergyDer = 2*(DerivativePsiE-DeltaPsi*energy)
return energy, EnergyDer
#Here starts the main program with variable declarations
NumberParticles = 2
Dimension = 2
# guess for variational parameters
alpha = 0.95
beta = 0.3
# Set up iteration using stochastic gradient method
Energy = 0
EDerivative = np.zeros((2), np.double)
# Learning rate eta, max iterations, need to change to adaptive learning rate
eta = 0.01
MaxIterations = 50
iter = 0
Energies = np.zeros(MaxIterations)
EnergyDerivatives1 = np.zeros(MaxIterations)
EnergyDerivatives2 = np.zeros(MaxIterations)
AlphaValues = np.zeros(MaxIterations)
BetaValues = np.zeros(MaxIterations)
while iter < MaxIterations:
Energy, EDerivative = EnergyMinimization(alpha,beta)
alphagradient = EDerivative[0]
betagradient = EDerivative[1]
alpha -= eta*alphagradient
beta -= eta*betagradient
Energies[iter] = Energy
EnergyDerivatives1[iter] = EDerivative[0]
EnergyDerivatives2[iter] = EDerivative[1]
AlphaValues[iter] = alpha
BetaValues[iter] = beta
iter += 1
#nice printout with Pandas
import pandas as pd
from pandas import DataFrame
pd.set_option('max_columns', 6)
data ={'Alpha':AlphaValues,'Beta':BetaValues,'Energy':Energies,'Alpha Derivative':EnergyDerivatives1,'Beta Derivative':EnergyDerivatives2}
frame = pd.DataFrame(data)
print(frame)
| {
"repo_name": "CompPhysics/ComputationalPhysics2",
"path": "doc/src/MCsummary/src/qdoteminim.py",
"copies": "2",
"size": "5916",
"license": "cc0-1.0",
"hash": 5398027345953398000,
"line_mean": 35.2944785276,
"line_max": 138,
"alpha_frac": 0.6512846518,
"autogenerated": false,
"ratio": 3.0076258261311644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46589104779311646,
"avg_score": null,
"num_lines": null
} |
# 2-electron VMC code for 2dim quantum dot with importance sampling
# Using gaussian rng for new positions and Metropolis- Hastings
# Added energy minimization
from math import exp, sqrt
from random import random, seed, normalvariate
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from scipy.optimize import minimize
import sys
import os
# Where to save data files
PROJECT_ROOT_DIR = "Results"
DATA_ID = "Results/EnergyMin"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
outfile = open(data_path("Energies.dat"),'w')
# Trial wave function for the 2-electron quantum dot in two dims
def WaveFunction(r,alpha,beta):
r1 = r[0,0]**2 + r[0,1]**2
r2 = r[1,0]**2 + r[1,1]**2
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = r12/(1+beta*r12)
return exp(-0.5*alpha*(r1+r2)+deno)
# Local energy for the 2-electron quantum dot in two dims, using analytical local energy
def LocalEnergy(r,alpha,beta):
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
return 0.5*(1-alpha*alpha)*(r1 + r2) +2.0*alpha + 1.0/r12+deno2*(alpha*r12-deno2+2*beta*deno-1.0/r12)
# Derivate of wave function ansatz as function of variational parameters
def DerivativeWFansatz(r,alpha,beta):
WfDer = np.zeros((2), np.double)
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
WfDer[0] = -0.5*(r1+r2)
WfDer[1] = -r12*r12*deno2
return WfDer
# Setting up the quantum force for the two-electron quantum dot, recall that it is a vector
def QuantumForce(r,alpha,beta):
qforce = np.zeros((NumberParticles,Dimension), np.double)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
qforce[0,:] = -2*r[0,:]*alpha*(r[0,:]-r[1,:])*deno*deno/r12
qforce[1,:] = -2*r[1,:]*alpha*(r[1,:]-r[0,:])*deno*deno/r12
return qforce
# Computing the derivative of the energy and the energy
def EnergyDerivative(x0):
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
energy = 0.0
DeltaE = 0.0
alpha = x0[0]
beta = x0[1]
EnergyDer = 0.0
DeltaPsi = 0.0
DerivativePsiE = 0.0
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha,beta)
QuantumForceOld = QuantumForce(PositionOld,alpha, beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha,beta)
QuantumForceNew = QuantumForce(PositionNew,alpha, beta)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
(D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
DerPsi = DerivativeWFansatz(PositionOld,alpha,beta)
DeltaPsi += DerPsi
energy += DeltaE
DerivativePsiE += DerPsi*DeltaE
# We calculate mean values
energy /= NumberMCcycles
DerivativePsiE /= NumberMCcycles
DeltaPsi /= NumberMCcycles
EnergyDer = 2*(DerivativePsiE-DeltaPsi*energy)
return EnergyDer
# Computing the expectation value of the local energy
def Energy(x0):
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
energy = 0.0
DeltaE = 0.0
alpha = x0[0]
beta = x0[1]
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha,beta)
QuantumForceOld = QuantumForce(PositionOld,alpha, beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha,beta)
QuantumForceNew = QuantumForce(PositionNew,alpha, beta)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
(D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
energy += DeltaE
if Printout:
outfile.write('%f\n' %(energy/(MCcycle+1.0)))
# We calculate mean values
energy /= NumberMCcycles
return energy
#Here starts the main program with variable declarations
NumberParticles = 2
Dimension = 2
# seed for rng generator
seed()
# Monte Carlo cycles for parameter optimization
Printout = False
NumberMCcycles= 10000
# guess for variational parameters
x0 = np.array([0.9,0.2])
# Using Broydens method to find optimal parameters
res = minimize(Energy, x0, method='BFGS', jac=EnergyDerivative, options={'gtol': 1e-4,'disp': True})
x0 = res.x
# Compute the energy again with the optimal parameters and increased number of Monte Cycles
NumberMCcycles= 2**19
Printout = True
FinalEnergy = Energy(x0)
EResult = np.array([FinalEnergy,FinalEnergy])
outfile.close()
#nice printout with Pandas
import pandas as pd
from pandas import DataFrame
data ={'Optimal Parameters':x0, 'Final Energy':EResult}
frame = pd.DataFrame(data)
print(frame)
| {
"repo_name": "CompPhysics/ComputationalPhysics2",
"path": "doc/Programs/BoltzmannMachines/VMC/python/qdotBroyden.py",
"copies": "2",
"size": "8051",
"license": "cc0-1.0",
"hash": -5736781454099594000,
"line_mean": 34.6238938053,
"line_max": 105,
"alpha_frac": 0.6337100981,
"autogenerated": false,
"ratio": 3.1266019417475728,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9637944236428164,
"avg_score": 0.024473560683881578,
"num_lines": 226
} |
# 2-electron VMC code for 2dim quantum dot with importance sampling
# Using gaussian rng for new positions and Metropolis- Hastings
# No energy minimization
from math import exp, sqrt
from random import random, seed, normalvariate
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import sys
from numba import jit
from scipy.optimize import minimize
# Trial wave function for the 2-electron quantum dot in two dims
def WaveFunction(r,alpha):
r1 = r[0,0]**2 + r[0,1]**2
r2 = r[1,0]**2 + r[1,1]**2
return exp(-0.5*alpha*(r1+r2))
# Local energy for the 2-electron quantum dot in two dims, using analytical local energy
def LocalEnergy(r,alpha):
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
return 0.5*(1-alpha*alpha)*(r1 + r2) +2.0*alpha
# Derivate of wave function ansatz as function of variational parameters
def DerivativeWFansatz(r,alpha):
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
WfDer = -0.5*(r1+r2)
return WfDer
# Setting up the quantum force for the two-electron quantum dot, recall that it is a vector
def QuantumForce(r,alpha):
qforce = np.zeros((NumberParticles,Dimension), np.double)
qforce[0,:] = -2*r[0,:]*alpha
qforce[1,:] = -2*r[1,:]*alpha
return qforce
# Computing the derivative of the energy and the energy
# jit decorator tells Numba to compile this function.
# The argument types will be inferred by Numba when function is called.
@jit
def EnergyMinimization(alpha):
NumberMCcycles= 1000
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
# seed for rng generator
seed()
energy = 0.0
DeltaE = 0.0
EnergyDer = 0.0
DeltaPsi = 0.0
DerivativePsiE = 0.0
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha)
QuantumForceOld = QuantumForce(PositionOld,alpha)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha)
QuantumForceNew = QuantumForce(PositionNew,alpha)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
(D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
PositionNew[i,j]+PositionOld[i,j])
GreensFunction = 1.0#exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha)
DerPsi = DerivativeWFansatz(PositionOld,alpha)
DeltaPsi +=DerPsi
energy += DeltaE
DerivativePsiE += DerPsi*DeltaE
# We calculate mean, variance and error (no blocking applied)
energy /= NumberMCcycles
DerivativePsiE /= NumberMCcycles
DeltaPsi /= NumberMCcycles
EnergyDer = 2*(DerivativePsiE-DeltaPsi*energy)
print(energy, DerivativePsiE, DeltaPsi*energy)
return energy, EnergyDer
#Here starts the main program with variable declarations
NumberParticles = 2
Dimension = 2
# guess for variational parameters
x0 = 0.9
# Set up iteration using stochastic gradient method
Energy =0 ; EnergyDer = 0
Energy, EnergyDer = EnergyMinimization(x0)
eta = 0.5
Niterations = 100
for iter in range(Niterations):
gradients = EnergyDer
x0 -= eta*gradients
Energy, EnergyDer = EnergyMinimization(x0)
print(Energy, EnergyDer)
print(x0)
| {
"repo_name": "CompPhysics/ComputationalPhysics2",
"path": "doc/Programs/ConjugateGradient/python/qdotnint.py",
"copies": "1",
"size": "4670",
"license": "cc0-1.0",
"hash": 8638217723136608000,
"line_mean": 34.3787878788,
"line_max": 92,
"alpha_frac": 0.6582441113,
"autogenerated": false,
"ratio": 3.3120567375886525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4470300848888652,
"avg_score": null,
"num_lines": null
} |
# 2-electron VMC for quantum dot system in two dimensions
# Brute force Metropolis, no importance sampling and no energy minimization
from math import exp, sqrt
from random import random, seed
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import sys
from numba import jit
#Read name of output file from command line
if len(sys.argv) == 2:
outfilename = sys.argv[1]
else:
print('\nError: Name of output file must be given as command line argument.\n')
outfile = open(outfilename,'w')
# Trial wave function for the 2-electron quantum dot in two dims
def WaveFunction(r,alpha,beta):
r1 = r[0,0]**2 + r[0,1]**2
r2 = r[1,0]**2 + r[1,1]**2
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = r12/(1+beta*r12)
return exp(-0.5*alpha*(r1+r2)+deno)
# Local energy for the 2-electron quantum dot in two dims, using analytical local energy
def LocalEnergy(r,alpha,beta):
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
return 0.5*(1-alpha*alpha)*(r1 + r2) +2.0*alpha + 1.0/r12+deno2*(alpha*r12-deno2+2*beta*deno-1.0/r12)
# The Monte Carlo sampling with the Metropolis algo
# The jit decorator tells Numba to compile this function.
# The argument types will be inferred by Numba when the function is called.
@jit
def MonteCarloSampling():
NumberMCcycles= 100000
StepSize = 1.0
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# seed for rng generator
seed()
# start variational parameter
alpha = 0.9
for ia in range(MaxVariations):
alpha += .025
AlphaValues[ia] = alpha
beta = 0.2
for jb in range(MaxVariations):
beta += .01
BetaValues[jb] = beta
energy = energy2 = 0.0
DeltaE = 0.0
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = StepSize * (random() - .5)
wfold = WaveFunction(PositionOld,alpha,beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j] + StepSize * (random() - .5)
wfnew = WaveFunction(PositionNew,alpha,beta)
#Metropolis test to see whether we accept the move
if random() < wfnew**2 / wfold**2:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
energy += DeltaE
energy2 += DeltaE**2
#We calculate mean, variance and error ...
energy /= NumberMCcycles
energy2 /= NumberMCcycles
variance = energy2 - energy**2
error = sqrt(variance/NumberMCcycles)
Energies[ia,jb] = energy
outfile.write('%f %f %f %f %f\n' %(alpha,beta,energy,variance,error))
return Energies, AlphaValues, BetaValues
#Here starts the main program with variable declarations
NumberParticles = 2
Dimension = 2
MaxVariations = 10
Energies = np.zeros((MaxVariations,MaxVariations))
AlphaValues = np.zeros(MaxVariations)
BetaValues = np.zeros(MaxVariations)
(Energies, AlphaValues, BetaValues) = MonteCarloSampling()
outfile.close()
# Prepare for plots
fig = plt.figure()
ax = fig.gca(projection='3d')
# Plot the surface.
X, Y = np.meshgrid(AlphaValues, BetaValues)
surf = ax.plot_surface(X, Y, Energies,cmap=cm.coolwarm,linewidth=0, antialiased=False)
# Customize the z axis.
zmin = np.matrix(Energies).min()
zmax = np.matrix(Energies).max()
ax.set_zlim(zmin, zmax)
ax.set_xlabel(r'$\alpha$')
ax.set_ylabel(r'$\beta$')
ax.set_zlabel(r'$\langle E \rangle$')
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
| {
"repo_name": "CompPhysics/ComputationalPhysics2",
"path": "doc/Programs/ConjugateGradient/python/qdotmetropolis.py",
"copies": "1",
"size": "4473",
"license": "cc0-1.0",
"hash": 7610836137299340000,
"line_mean": 35.3658536585,
"line_max": 105,
"alpha_frac": 0.6295551084,
"autogenerated": false,
"ratio": 3.2578295702840494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4387384678684049,
"avg_score": null,
"num_lines": null
} |
## 2. Enumerate ##
ships = ["Andrea Doria", "Titanic", "Lusitania"]
cars = ["Ford Edsel", "Ford Pinto", "Yugo"]
for i,item in enumerate(ships):
print(item)
print(cars[i])
## 3. Adding Columns ##
things = [["apple", "monkey"], ["orange", "dog"], ["banana", "cat"]]
trees = ["cedar", "maple", "fig"]
for i, item in enumerate(things):
item.append(trees[i])
## 4. List Comprehensions ##
apple_prices = [100, 101, 102, 105]
apple_prices_doubled = [(item*2) for item in apple_prices]
apple_prices_lowered = [(item-100) for item in apple_prices]
## 5. Counting Female Names ##
name_counts = {}
for item in legislators:
if item[3] == 'F' and item[7] > 1940:
name = item[1]
if name in name_counts:
name_counts[name] +=1
else:
name_counts[name]=1
## 7. Comparing with None ##
values = [None, 10, 20, 30, None, 50]
checks = []
for item in values:
status = (item != None and item > 30)
checks.append(status)
## 8. Highest Female Name Count ##
max_value = None
for item in name_counts.keys():
count = name_counts[item]
if (max_value == None or count > max_value):
max_value = count
## 9. The Items Method ##
plant_types = {"orchid": "flower", "cedar": "tree", "maple": "tree"}
for item,val in plant_types.items():
print(item)
print(val)
## 10. Finding the Most Common Female Names ##
top_female_names = []
for item in name_counts.keys():
if name_counts[item]==2:
top_female_names.append(item)
## 11. Finding the Most Common Male Names ##
top_male_names = []
male_name_counts ={}
highest_male_count = None
for item in legislators:
if item[3] == 'M' and item[7] > 1940:
name = item[1]
if name in male_name_counts:
male_name_counts[name] +=1
else:
male_name_counts[name]=1
for item in male_name_counts.keys():
count = male_name_counts[item]
if (highest_male_count == None or count > highest_male_count):
highest_male_count = count
for item in male_name_counts.keys():
if male_name_counts[item]==highest_male_count:
top_male_names.append(item) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Python Programming Intermediate/List Comprehensions-16.py",
"copies": "1",
"size": "2151",
"license": "mit",
"hash": 5531344968793942000,
"line_mean": 24.9277108434,
"line_max": 68,
"alpha_frac": 0.6052998605,
"autogenerated": false,
"ratio": 2.9465753424657533,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.900594904781693,
"avg_score": 0.009185231029764497,
"num_lines": 83
} |
## 2. Extract Line Numbers ##
raw_hamlet = sc.textFile("hamlet.txt")
split_hamlet = raw_hamlet.map(lambda line: line.split('\t'))
split_hamlet.take(5)
def format_id(x):
id = x[0].split('@')[1]
results = list()
results.append(id)
if len(x) > 1:
for y in x[1:]:
results.append(y)
return results
hamlet_with_ids = split_hamlet.map(lambda line: format_id(line))
hamlet_with_ids.take(10)
## 3. Remove Blank Values ##
hamlet_with_ids.take(5)
real_text = hamlet_with_ids.filter(lambda line: len(line) > 1)
hamlet_text_only = real_text.map(lambda line: [l for l in line if l != ''])
hamlet_text_only.take(10)
## 4. Remove Pipe Characters ##
hamlet_text_only.take(10)
def fix_pipe(line):
results = list()
for l in line:
if l == "|":
pass
elif "|" in l:
fmtd = l.replace("|", "")
results.append(fmtd)
else:
results.append(l)
return results
clean_hamlet = hamlet_text_only.map(lambda line: fix_pipe(line)) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "pySpark/Challenge_ Transforming Hamlet into a Data Set-124.py",
"copies": "1",
"size": "1025",
"license": "mit",
"hash": -3681280010474772500,
"line_mean": 24.65,
"line_max": 75,
"alpha_frac": 0.5980487805,
"autogenerated": false,
"ratio": 3.014705882352941,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.906717156482826,
"avg_score": 0.009116619604936302,
"num_lines": 40
} |
## 2. Finding correlations ##
correlations = combined.corr()
correlations = correlations["sat_score"]
print(correlations)
## 3. Plotting enrollment ##
import matplotlib.pyplot as plt
combined.plot.scatter(x='total_enrollment', y='sat_score')
plt.show()
## 4. Exploring schools with low SAT scores and enrollment ##
low_enrollment = combined[(combined['total_enrollment']<1000) & (combined['sat_score'] < 1000)]
print(low_enrollment['School Name'])
## 5. Plotting language learning percentage ##
combined.plot.scatter(x='ell_percent',y='sat_score')
plt.show()
## 6. Mapping the schools ##
from mpl_toolkits.basemap import Basemap
m = Basemap(
projection='merc',
llcrnrlat=40.496044,
urcrnrlat=40.915256,
llcrnrlon=-74.255735,
urcrnrlon=-73.700272,
resolution='i'
)
m.drawmapboundary(fill_color='#85A6D9')
m.drawcoastlines(color='#6D5F47', linewidth=.4)
m.drawrivers(color='#6D5F47', linewidth=.4)
longitudes = combined["lon"].tolist()
latitudes = combined["lat"].tolist()
m.scatter(longitudes, latitudes, s=20, zorder=2, latlon=True)
plt.show()
## 7. Plotting out statistics ##
from mpl_toolkits.basemap import Basemap
m = Basemap(
projection='merc',
llcrnrlat=40.496044,
urcrnrlat=40.915256,
llcrnrlon=-74.255735,
urcrnrlon=-73.700272,
resolution='i'
)
m.drawmapboundary(fill_color='#85A6D9')
m.drawcoastlines(color='#6D5F47', linewidth=.4)
m.drawrivers(color='#6D5F47', linewidth=.4)
longitudes = combined["lon"].tolist()
latitudes = combined["lat"].tolist()
m.scatter(longitudes, latitudes, s=20, zorder=2, latlon=True,cmap = "summer",c=combined["ell_percent"])
plt.show()
## 8. Calculating district level statistics ##
import numpy
districts = combined.groupby('school_dist').agg(numpy.mean)
districts.reset_index(inplace = True)
print(districts.head(2))
## 9. Plotting ell_percent by district ##
from mpl_toolkits.basemap import Basemap
m = Basemap(
projection='merc',
llcrnrlat=40.496044,
urcrnrlat=40.915256,
llcrnrlon=-74.255735,
urcrnrlon=-73.700272,
resolution='i'
)
m.drawmapboundary(fill_color='#85A6D9')
m.drawcoastlines(color='#6D5F47', linewidth=.4)
m.drawrivers(color='#6D5F47', linewidth=.4)
longitudes = combined["lon"].tolist()
latitudes = combined["lat"].tolist()
m.scatter(longitudes, latitudes, s=50, zorder=2, latlon=True,cmap = "summer",c=combined["ell_percent"])
plt.show() | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Data Exploration/Data Cleaning Walkthrough_ Analyzing And Visualizing The Data-210.py",
"copies": "1",
"size": "2405",
"license": "mit",
"hash": 6963367545178510000,
"line_mean": 24.5957446809,
"line_max": 103,
"alpha_frac": 0.7097713098,
"autogenerated": false,
"ratio": 2.754868270332188,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.3964639580132188,
"avg_score": null,
"num_lines": null
} |
# 2. fn/names_text
# Parameters: text (required), engine (optional)
import sys, os, unittest, json, codecs
sys.path.append('./')
sys.path.append('../')
import webapp
service = webapp.get_service(5004, 'fn/names_text')
the_sample = None
def get_sample():
global the_sample
if the_sample == None:
with codecs.open(webapp.find_resource('text-sample'), 'r', 'latin-1') as infile:
the_sample = infile.read()
return the_sample
class TestFnNamesText(webapp.WebappTestCase):
@classmethod
def get_service(self):
return service
def test_no_parameter(self):
"""No parameters. Should yield some kind of error."""
request = service.get_request('GET', {})
x = self.start_request_tests(request)
self.assertTrue(x.status_code >= 400)
self.assertTrue(u'text' in x.json()[u'message'],
'error message from service is not informative: %s' % x.json()[u'message'])
def test_large_input(self):
"""Test large input.
40000 characters fails; 30000 succeeds.
N.b. the input is copied to the output (u'input_text' result field).
That seems like a bad idea.
TBD: design issue."""
request = service.get_request('GET', {u'text': get_sample()[0:30000]})
x = self.start_request_tests(request)
self.assert_success(x)
self.assertTrue(len(x.json()[u'scientificNames']) > 10)
self.assertTrue(u'Papilio' in x.json()[u'scientificNames'])
def test_engines(self):
"""It looks like engines 6, 7, and 8 are all the same.
3 and 4 are the same as well.
Maybe inadequate error checking? In any case, there is *no* documentation
of the engine parameter (in the old documentation). TBD: issue."""
for engine in range(0, 8):
request = service.get_request('GET', {u'engine': engine, u'text': get_sample()[0:30000]})
x = self.start_request_tests(request)
# TBD: if range > ?? then x.status_code should be >= 400
self.assert_success(x)
self.assertTrue(len(x.json()[u'scientificNames']) > 10)
self.assertTrue(u'Scolopendrella' in x.json()[u'scientificNames'])
# Insert here: edge case tests
# Insert here: inputs out of range, leading to error or long delay
# Insert here: error-generating conditions
# (See ../README.md)
def test_example_4(self):
x = self.start_request_tests(example_4)
self.assert_success(x)
# Insert: whether result is what it should be according to docs
def test_example_5(self):
x = self.start_request_tests(example_5)
self.assert_success(x)
# Insert: whether result is what it should be according to docs
null=None; false=False; true=True
example_5 = service.get_request('GET', {u'text': u'Formica polyctena is a species of European red wood ant in the genus Formica. The pavement ant, Tetramorium caespitum is an ant native to Europe. Pseudomyrmex is a genus of stinging, wasp-like ants. Adetomyrma venatrix is an endangered species of ants endemic to Madagascar. Carebara diversa is a species of ants in the subfamily Formicinae. It is found in many Asian countries.'})
example_4 = service.get_request('GET', {u'engine': u'2', u'text': u'The lemon dove (Columba larvata) is a species of bird in the pigeon family Columbidae found in montane forests of sub-Saharan Africa.'})
if __name__ == '__main__':
webapp.main()
| {
"repo_name": "jar398/tryphy",
"path": "tests/test_fn_names_text.py",
"copies": "1",
"size": "3503",
"license": "bsd-2-clause",
"hash": -2101260319275050500,
"line_mean": 41.2048192771,
"line_max": 432,
"alpha_frac": 0.6488723951,
"autogenerated": false,
"ratio": 3.4444444444444446,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9566159868268607,
"avg_score": 0.005431394255167686,
"num_lines": 83
} |
# 2 Gold Stars
# One way search engines rank pages
# is to count the number of times a
# searcher clicks on a returned link.
# This indicates that the person doing
# the query thought this was a useful
# link for the query, so it should be
# higher in the rankings next time.
# (In Unit 6, we will look at a different
# way of ranking pages that does not depend
# on user clicks.)
# Modify the index such that for each url in a
# list for a keyword, there is also a number
# that counts the number of times a user
# clicks on that link for this keyword.
# The result of lookup(index,keyword) should
# now be a list of url entries, where each url
# entry is a list of a url and a number
# indicating the number of times that url
# was clicked for this query keyword.
# You should define a new procedure to simulate
# user clicks for a given link:
# record_user_click(index,word,url)
# that modifies the entry in the index for
# the input word by increasing the count associated
# with the url by 1.
# You also will have to modify add_to_index
# in order to correctly create the new data
# structure, and to prevent the repetition of
# entries as in homework 4-5.
index = [['chicken',[['url1',3],['url2',2],['url3',3]]]]
#print index[0][1][0][1]
def record_user_click(index,keyword,url):
for entry in index:
if entry[0] == keyword:
for combo in entry[1]:
if combo[0] == url:
combo[1] = combo[1] + 1
def add_to_index(index, keyword, url):
for entry in index:
if entry[0] == keyword:
for combo in entry[1]:
if url == combo[0]:
return
entry[1].append([url,0])
return
# not found, add new keyword to index
index.append([keyword, [[url,0]]])
def get_page(url):
try:
import urllib
return urllib.urlopen(url).read()
except:
return "failed"
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def crawl_web(seed):
tocrawl = [seed]
crawled = []
index = []
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
union(tocrawl, get_all_links(content))
crawled.append(page)
return index
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def lookup(index, keyword):
for entry in index:
if entry[0] == keyword:
return entry[1]
return None
index = crawl_web('http://www.udacity.com/cs101x/index.html')
#Here is an example showing a sequence of interactions:
index = crawl_web('http://www.udacity.com/cs101x/index.html')
print lookup(index, 'good')
#>>> [['http://www.udacity.com/cs101x/index.html', 0],
#>>> ['http://www.udacity.com/cs101x/crawling.html', 0]]
record_user_click(index, 'good', 'http://www.udacity.com/cs101x/crawling.html')
print lookup(index, 'good')
#>>> [['http://www.udacity.com/cs101x/index.html', 0],
#>>> ['http://www.udacity.com/cs101x/crawling.html', 1]] | {
"repo_name": "W0mpRat/IntroToComputerScience",
"path": "Lesson4/CountingClicks.py",
"copies": "1",
"size": "3668",
"license": "unlicense",
"hash": 5053235345235291000,
"line_mean": 27.6640625,
"line_max": 79,
"alpha_frac": 0.6199563795,
"autogenerated": false,
"ratio": 3.343664539653601,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4463620919153601,
"avg_score": null,
"num_lines": null
} |
## 2. Implementing an Algorithm ##
# When the algorithm finds Kobe in the data set, store his position in Kobe_position
kobe_position = ""
# Find Kobe in the data set
for item in nba:
if item[0]== 'Kobe Bryant':
kobe_position = item[1]
## 4. Linear Search with Modular Code ##
# player_age returns the age of a player in our NBA data set
def player_age(name):
for row in nba:
if row[0] == name:
return row[2]
return -1
allen_age = player_age('Ray Allen')
durant_age = player_age('Kevin Durant')
shaq_age = player_age('Shaquille O\'Neal')
## 7. Exercise: Recognizing Constant Time Algorithms ##
# Implementation A: Convert degrees Celcius to degrees Fahrenheit
def celcius_to_fahrenheit(degrees):
step_1 = degrees * 1.8
step_2 = step_1 + 32
return step_2
# Implementation B: Reverse a list
def reverse(ls):
length = len(ls)
new_list = []
for i in range(length):
new_list[i] = ls[length - i]
return new_list
# Implementation C: Print a blastoff message after a countdown
def blastoff(message):
count = 10
for i in range(count):
print(count - i)
print(message)
not_constant = "B"
## 10. Some Other Algorithms ##
# Find the length of a list
def length(ls):
count = 0
for elem in ls:
count = count + 1
length_time_complexity = "linear"
# Check whether a list is empty -- Implementation 1
def is_empty_1(ls):
if length(ls) == 0:
return True
else:
return False
is_empty_1_complexity = "linear"
# Check whether a list is empty -- Implementation 2
def is_empty_2(ls):
for element in ls:
return False
return True
is_empty_2_complexity = "constant" | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Data Structures & Algorithms/Algorithms-92.py",
"copies": "1",
"size": "1702",
"license": "mit",
"hash": -4694796253325890000,
"line_mean": 23.3285714286,
"line_max": 84,
"alpha_frac": 0.6498237368,
"autogenerated": false,
"ratio": 3.199248120300752,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43490718571007514,
"avg_score": null,
"num_lines": null
} |
""" 2-input XOR example using Izhikevich's spiking neuron model. """
from __future__ import print_function
import multiprocessing
import os
from matplotlib import patches
from matplotlib import pylab as plt
import visualize
import neat
# Network inputs and expected outputs.
xor_inputs = ((0, 0), (0, 1), (1, 0), (1, 1))
xor_outputs = (0, 1, 1, 0)
# Maximum amount of simulated time (in milliseconds) to wait for the network to produce an output.
max_time_msec = 20.0
def compute_output(t0, t1):
"""Compute the network's output based on the "time to first spike" of the two output neurons."""
if t0 is None or t1 is None:
# If neither of the output neurons fired within the allotted time,
# give a response which produces a large error.
return -1.0
else:
# If the output neurons fire within 1.0 milliseconds of each other,
# the output is 1, and if they fire more than 11 milliseconds apart,
# the output is 0, with linear interpolation between 1 and 11 milliseconds.
response = 1.1 - 0.1 * abs(t0 - t1)
return max(0.0, min(1.0, response))
def simulate(genome, config):
# Create a network of "fast spiking" Izhikevich neurons.
net = neat.iznn.IZNN.create(genome, config)
dt = net.get_time_step_msec()
sum_square_error = 0.0
simulated = []
for idata, odata in zip(xor_inputs, xor_outputs):
neuron_data = {}
for i, n in net.neurons.items():
neuron_data[i] = []
# Reset the network, apply the XOR inputs, and run for the maximum allowed time.
net.reset()
net.set_inputs(idata)
t0 = None
t1 = None
v0 = None
v1 = None
num_steps = int(max_time_msec / dt)
net.set_inputs(idata)
for j in range(num_steps):
t = dt * j
output = net.advance(dt)
# Capture the time and neuron membrane potential for later use if desired.
for i, n in net.neurons.items():
neuron_data[i].append((t, n.current, n.v, n.u, n.fired))
# Remember time and value of the first output spikes from each neuron.
if t0 is None and output[0] > 0:
t0, I0, v0, u0, f0 = neuron_data[net.outputs[0]][-2]
if t1 is None and output[1] > 0:
t1, I1, v1, u1, f0 = neuron_data[net.outputs[1]][-2]
response = compute_output(t0, t1)
sum_square_error += (response - odata) ** 2
#print(genome)
#visualize.plot_spikes(neuron_data[net.outputs[0]], False)
#visualize.plot_spikes(neuron_data[net.outputs[1]], True)
simulated.append((idata, odata, t0, t1, v0, v1, neuron_data))
return sum_square_error, simulated
def eval_genome(genome, config):
sum_square_error, simulated = simulate(genome, config)
return 10.0 - sum_square_error
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = eval_genome(genome, config)
def run(config_path):
# Load the config file, which is assumed to live in
# the same directory as this script.
config = neat.Config(neat.iznn.IZGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path)
# For this network, we use two output neurons and use the difference between
# the "time to first spike" to determine the network response. There are
# probably a great many different choices one could make for an output encoding,
# and this choice may not be the best for tackling a real problem.
config.output_nodes = 2
pop = neat.population.Population(config)
# Add a stdout reporter to show progress in the terminal.
pop.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
pe = neat.ParallelEvaluator(multiprocessing.cpu_count(), eval_genome)
winner = pop.run(pe.evaluate, 3000)
# Display the winning genome.
print('\nBest genome:\n{!s}'.format(winner))
node_names = {-1:'A', -2: 'B'}
visualize.draw_net(config, winner, True, node_names=node_names)
visualize.plot_stats(stats, ylog=False, view=True)
visualize.plot_species(stats, view=True)
# Show output of the most fit genome against training data, and create
# a plot of the traces out to the max time for each set of inputs.
print('\nBest network output:')
plt.figure(figsize=(12, 12))
sum_square_error, simulated = simulate(winner, config)
for r, (inputData, outputData, t0, t1, v0, v1, neuron_data) in enumerate(simulated):
response = compute_output(t0, t1)
print("{0!r} expected {1:.3f} got {2:.3f}".format(inputData, outputData, response))
axes = plt.subplot(4, 1, r + 1)
plt.title("Traces for XOR input {{{0:.1f}, {1:.1f}}}".format(*inputData), fontsize=12)
for i, s in neuron_data.items():
if i in [0, 1]:
t, I, v, u, fired = zip(*s)
plt.plot(t, v, "-", label="neuron {0:d}".format(i))
# Circle the first peak of each output.
circle0 = patches.Ellipse((t0, v0), 1.0, 10.0, color='r', fill=False)
circle1 = patches.Ellipse((t1, v1), 1.0, 10.0, color='r', fill=False)
axes.add_artist(circle0)
axes.add_artist(circle1)
plt.ylabel("Potential (mv)", fontsize=10)
plt.ylim(-100, 50)
plt.tick_params(labelsize=8)
plt.grid()
plt.xlabel("Time (in ms)", fontsize=10)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.savefig("traces.png", dpi=90)
plt.show()
if __name__ == '__main__':
local_dir = os.path.dirname(__file__)
run(os.path.join(local_dir, 'config-spiking'))
| {
"repo_name": "CodeReclaimers/neat-python",
"path": "examples/xor/evolve-spiking.py",
"copies": "1",
"size": "5782",
"license": "bsd-3-clause",
"hash": -1212478639902887200,
"line_mean": 35.3647798742,
"line_max": 100,
"alpha_frac": 0.6240055344,
"autogenerated": false,
"ratio": 3.349942062572422,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9466854705101662,
"avg_score": 0.0014185783741521379,
"num_lines": 159
} |
""" 2-input XOR example using Izhikevich's spiking neuron model. """
from __future__ import print_function
import os
from matplotlib import pylab as plt
from matplotlib import patches
import neat
import visualize
# Network inputs and expected outputs.
xor_inputs = ((0, 0), (0, 1), (1, 0), (1, 1))
xor_outputs = (0, 1, 1, 0)
# Maximum amount of simulated time (in milliseconds) to wait for the network to produce an output.
max_time_msec = 20.0
def compute_output(t0, t1):
'''Compute the network's output based on the "time to first spike" of the two output neurons.'''
if t0 is None or t1 is None:
# If neither of the output neurons fired within the allotted time,
# give a response which produces a large error.
return -1.0
else:
# If the output neurons fire within 1.0 milliseconds of each other,
# the output is 1, and if they fire more than 11 milliseconds apart,
# the output is 0, with linear interpolation between 1 and 11 milliseconds.
response = 1.1 - 0.1 * abs(t0 - t1)
return max(0.0, min(1.0, response))
def simulate(genome, config):
# Create a network of "fast spiking" Izhikevich neurons.
net = neat.iznn.IZNN.create(genome, config)
dt = net.get_time_step_msec()
sum_square_error = 0.0
simulated = []
for idata, odata in zip(xor_inputs, xor_outputs):
neuron_data = {}
for i, n in net.neurons.items():
neuron_data[i] = []
# Reset the network, apply the XOR inputs, and run for the maximum allowed time.
net.reset()
net.set_inputs(idata)
t0 = None
t1 = None
v0 = None
v1 = None
num_steps = int(max_time_msec / dt)
net.set_inputs(idata)
for j in range(num_steps):
t = dt * j
output = net.advance(dt)
# Capture the time and neuron membrane potential for later use if desired.
for i, n in net.neurons.items():
neuron_data[i].append((t, n.current, n.v, n.u, n.fired))
# Remember time and value of the first output spikes from each neuron.
if t0 is None and output[0] > 0:
t0, I0, v0, u0, f0 = neuron_data[net.outputs[0]][-2]
if t1 is None and output[1] > 0:
t1, I1, v1, u1, f0 = neuron_data[net.outputs[1]][-2]
response = compute_output(t0, t1)
sum_square_error += (response - odata) ** 2
#print(genome)
#visualize.plot_spikes(neuron_data[net.outputs[0]], False)
#visualize.plot_spikes(neuron_data[net.outputs[1]], True)
simulated.append((idata, odata, t0, t1, v0, v1, neuron_data))
return sum_square_error, simulated
def eval_genome(genome, config):
sum_square_error, simulated = simulate(genome, config)
return 10.0 - sum_square_error
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = eval_genome(genome, config)
def run(config_path):
# Load the config file, which is assumed to live in
# the same directory as this script.
config = neat.Config(neat.iznn.IZGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path)
# For this network, we use two output neurons and use the difference between
# the "time to first spike" to determine the network response. There are
# probably a great many different choices one could make for an output encoding,
# and this choice may not be the best for tackling a real problem.
config.output_nodes = 2
pop = neat.population.Population(config)
# Add a stdout reporter to show progress in the terminal.
pop.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
if 0:
winner = pop.run(eval_genomes, 3000)
else:
pe = neat.ParallelEvaluator(6, eval_genome)
winner = pop.run(pe.evaluate, 3000)
# Display the winning genome.
print('\nBest genome:\n{!s}'.format(winner))
node_names = {-1:'A', -2: 'B'}
visualize.draw_net(config, winner, True, node_names=node_names)
visualize.plot_stats(stats, ylog=False, view=True)
visualize.plot_species(stats, view=True)
# Show output of the most fit genome against training data, and create
# a plot of the traces out to the max time for each set of inputs.
print('\nBest network output:')
plt.figure(figsize=(12, 12))
sum_square_error, simulated = simulate(winner, config)
for r, (inputData, outputData, t0, t1, v0, v1, neuron_data) in enumerate(simulated):
response = compute_output(t0, t1)
print("{0!r} expected {1:.3f} got {2:.3f}".format(inputData, outputData, response))
axes = plt.subplot(4, 1, r + 1)
plt.title("Traces for XOR input {{{0:.1f}, {1:.1f}}}".format(*inputData), fontsize=12)
for i, s in neuron_data.items():
if i in [0, 1]:
t, I, v, u, fired = zip(*s)
plt.plot(t, v, "-", label="neuron {0:d}".format(i))
# Circle the first peak of each output.
circle0 = patches.Ellipse((t0, v0), 1.0, 10.0, color='r', fill=False)
circle1 = patches.Ellipse((t1, v1), 1.0, 10.0, color='r', fill=False)
axes.add_artist(circle0)
axes.add_artist(circle1)
plt.ylabel("Potential (mv)", fontsize=10)
plt.ylim(-100, 50)
plt.tick_params(labelsize=8)
plt.grid()
plt.xlabel("Time (in ms)", fontsize=10)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.savefig("traces.png", dpi=90)
plt.show()
if __name__ == '__main__':
local_dir = os.path.dirname(__file__)
run(os.path.join(local_dir, 'config-spiking'))
| {
"repo_name": "drallensmith/neat-python",
"path": "examples/xor/evolve-spiking.py",
"copies": "1",
"size": "5804",
"license": "bsd-3-clause",
"hash": -8673809357334075000,
"line_mean": 35.5031446541,
"line_max": 100,
"alpha_frac": 0.6200895934,
"autogenerated": false,
"ratio": 3.3375503162737203,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.445763990967372,
"avg_score": null,
"num_lines": null
} |
## 2. Introduction to the Data ##
import csv
nfl_suspensions = list(csv.reader(open('nfl_suspensions_data.csv','r')))[1:]
#nfl_suspensions = [item[1:] for item in nfl_suspensions]
years = {}
for item in nfl_suspensions:
if item[5] in years:
years[item[5]] +=1
else:
years[item[5]] = 1
print(years)
## 3. Unique Values ##
unique_teams,unique_games= [],[]
for item in nfl_suspensions:
unique_teams.append(item[1])
unique_games.append(item[2])
unique_teams = set(unique_teams)
unique_games = set(unique_games)
print(unique_teams)
print(unique_games)
## 4. Suspension Class ##
class Suspension():
def __init__(self,data):
self.name = data[0]
self.team = data[1]
self.games = data[2]
self.year = data[5]
print(nfl_suspensions[2])
third_suspension = Suspension(nfl_suspensions[2])
## 5. Tweaking the Suspension Class ##
class Suspension():
def __init__(self,row):
self.name = row[0]
self.team = row[1]
self.games = row[2]
try:
self.year = int(row[5])
except Exception:
self.year = 0
def get_year(self):
return(self.year)
missing_year = Suspension(nfl_suspensions[22])
twenty_third_year = missing_year.get_year() | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Python Programming Intermediate/Challenge_ Modules, Classes, Error Handling, and List Comprehensions-186.py",
"copies": "1",
"size": "1273",
"license": "mit",
"hash": 2018127548106726100,
"line_mean": 23.9803921569,
"line_max": 76,
"alpha_frac": 0.6135113904,
"autogenerated": false,
"ratio": 2.873589164785553,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3987100555185553,
"avg_score": null,
"num_lines": null
} |
## 2. Introduction to the Data ##
import pandas as pd
all_ages = pd.read_csv('all-ages.csv')
recent_grads = pd.read_csv('recent-grads.csv')
print(all_ages.head())
print(recent_grads.head())
## 3. Summarizing Major Categories ##
# Unique values in Major_category column.
print(all_ages['Major_category'].unique())
aa_cat_counts = dict()
rg_cat_counts = dict()
def cat_summary(data,category):
subset = data[data['Major_category']==category]
total = subset['Total'].sum()
return(total)
for cat in all_ages['Major_category'].unique():
aa_cat_counts[cat] = cat_summary(all_ages,cat)
for cat in recent_grads['Major_category'].unique():
rg_cat_counts[cat] = cat_summary(recent_grads,cat)
## 4. Low-Wage Job Rates ##
low_wage_percent = 0.0
low_wage_percent = recent_grads['Low_wage_jobs'].sum()/recent_grads['Total'].sum()
## 5. Comparing Data Sets ##
# All majors, common to both DataFrames
majors = recent_grads['Major'].unique()
rg_lower_count = 0
for item in majors:
grad_subset = recent_grads[recent_grads['Major']==item]
all_subset = all_ages[all_ages['Major']==item]
if(grad_subset['Unemployment_rate'].values[0] < all_subset['Unemployment_rate'].values[0]):
rg_lower_count +=1
print(rg_lower_count) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Data Analysis with Pandas Intermediate/Challenge_ Summarizing Data-112.py",
"copies": "1",
"size": "1259",
"license": "mit",
"hash": 3571526071365128000,
"line_mean": 29.7317073171,
"line_max": 95,
"alpha_frac": 0.6783161239,
"autogenerated": false,
"ratio": 2.9693396226415096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.414765574654151,
"avg_score": null,
"num_lines": null
} |
## 2. Introduction to the data ##
import pandas as pd
import matplotlib.pyplot as plt
admissions = pd.read_csv('admissions.csv')
plt.scatter(admissions['gpa'],admissions['admit'])
plt.show()
## 4. Logit function ##
import numpy as np
# Logit Function
def logit(x):
# np.exp(x) raises x to the exponential power, ie e^x. e ~= 2.71828
return np.exp(x) / (1 + np.exp(x))
# Generate 50 real values, evenly spaced, between -6 and 6.
x = np.linspace(-6,6,50, dtype=float)
# Transform each number in t using the logit function.
y = logit(x)
# Plot the resulting data.
plt.plot(x, y)
plt.ylabel("Probability")
plt.show()
## 5. Training a logistic regression model ##
from sklearn.linear_model import LinearRegression, LogisticRegression
linear_model = LinearRegression()
linear_model.fit(admissions[["gpa"]], admissions["admit"])
logistic_model = LogisticRegression()
logistic_model.fit(admissions[['gpa']],admissions['admit'])
## 6. Plotting probabilities ##
logistic_model = LogisticRegression()
logistic_model.fit(admissions[["gpa"]], admissions["admit"])
pred_probs = logistic_model.predict_proba(admissions[['gpa']])
plt.scatter(admissions[['gpa']],pred_probs[:,1])
## 7. Predict labels ##
logistic_model = LogisticRegression()
logistic_model.fit(admissions[["gpa"]], admissions["admit"])
fitted_labels = logistic_model.predict(admissions[["gpa"]])
plt.scatter(admissions["gpa"], fitted_labels)
print(fitted_labels[:10]) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Machine learning Beginner/Logistic regression-56.py",
"copies": "1",
"size": "1444",
"license": "mit",
"hash": 3952839229870808000,
"line_mean": 27.9,
"line_max": 71,
"alpha_frac": 0.7174515235,
"autogenerated": false,
"ratio": 3.159737417943107,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4377188941443107,
"avg_score": null,
"num_lines": null
} |
## 2. Introduction To The Data ##
import pandas as pd
import matplotlib.pyplot as plt
women_degrees = pd.read_csv('percent-bachelors-degrees-women-usa.csv')
plt.plot(women_degrees['Year'],women_degrees['Biology'])
plt.show()
## 3. Visualizing The Gender Gap ##
plt.plot(women_degrees['Year'],women_degrees['Biology'], c = 'blue', label = 'Women')
plt.plot(women_degrees['Year'],100 - women_degrees['Biology'], c = 'green', label = 'Men')
plt.title('Percentage of Biology Degrees Awarded By Gender')
plt.legend(loc = 'upper right')
plt.show()
## 5. Hiding Tick Marks ##
plt.plot(women_degrees['Year'],women_degrees['Biology'], c = 'blue', label = 'Women')
plt.plot(women_degrees['Year'],100 - women_degrees['Biology'], c = 'green', label = 'Men')
plt.title('Percentage of Biology Degrees Awarded By Gender')
plt.tick_params(bottom = 'off', top = 'off', left = 'off', right = 'off')
plt.legend(loc = 'upper right')
plt.show()
## 6. Hiding Spines ##
fig, ax = plt.subplots()
ax.plot(women_degrees['Year'], women_degrees['Biology'], label='Women')
ax.plot(women_degrees['Year'], 100-women_degrees['Biology'], label='Men')
ax.tick_params(bottom="off", top="off", left="off", right="off")
# Add your code here
for key,spine in ax.spines.items():
spine.set_visible(False)
ax.legend(loc='upper right')
ax.set_title('Percentage of Biology Degrees Awarded By Gender')
plt.show()
## 7. Comparing Gender Gap Across Degree Categories ##
major_cats = ['Biology', 'Computer Science', 'Engineering', 'Math and Statistics']
fig = plt.figure(figsize=(12, 12))
for sp in range(0,4):
ax = fig.add_subplot(2,2,sp+1)
ax.plot(women_degrees['Year'], women_degrees[major_cats[sp]], c='blue', label='Women')
ax.plot(women_degrees['Year'], 100-women_degrees[major_cats[sp]], c='green', label='Men')
# Add your code here.
ax.set_xlim(1968,2011)
ax.set_ylim(0,100)
ax.set_title(major_cats[sp])
ax.tick_params(bottom="off", top="off", left="off", right="off")
for key,spine in ax.spines.items():
spine.set_visible(False)
# Calling pyplot.legend() here will add the legend to the last subplot that was created.
plt.legend(loc='upper right')
plt.show() | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Storytelling Data Visualization/Improving Plot Aesthetics-220.py",
"copies": "1",
"size": "2180",
"license": "mit",
"hash": 4422019673359032000,
"line_mean": 35.9661016949,
"line_max": 93,
"alpha_frac": 0.6811926606,
"autogenerated": false,
"ratio": 2.725,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8816468628660274,
"avg_score": 0.017944806387945284,
"num_lines": 59
} |
## 2. Introduction to the data ##
import pandas as pd
reviews = pd.read_csv('fandango_scores.csv')
cols = ['FILM', 'RT_user_norm', 'Metacritic_user_nom', 'IMDB_norm', 'Fandango_Ratingvalue', 'Fandango_Stars']
norm_reviews = reviews[cols]
print(norm_reviews[:1])
## 4. Creating Bars ##
import matplotlib.pyplot as plt
from numpy import arange
num_cols = ['RT_user_norm', 'Metacritic_user_nom', 'IMDB_norm', 'Fandango_Ratingvalue', 'Fandango_Stars']
bar_heights = norm_reviews.ix[0, num_cols].values
bar_positions = arange(5) + 0.75
fig, ax = plt.subplots()
ax.bar(bar_positions, bar_heights, 0.5)
plt.show()
## 5. Aligning Axis Ticks And Labels ##
num_cols = ['RT_user_norm', 'Metacritic_user_nom', 'IMDB_norm', 'Fandango_Ratingvalue', 'Fandango_Stars']
bar_heights = norm_reviews.ix[0, num_cols].values
bar_positions = arange(5) + 0.75
tick_positions = range(1,6)
fig, ax = plt.subplots()
ax.bar(bar_positions, bar_heights, 0.5)
ax.set_xticks(tick_positions)
ax.set_xticklabels(num_cols, rotation=90)
ax.set_xlabel('Rating Source')
ax.set_ylabel('Average Rating')
ax.set_title('Average User Rating For Avengers: Age of Ultron (2015)')
plt.show()
## 6. Horizontal Bar Plot ##
import matplotlib.pyplot as plt
from numpy import arange
num_cols = ['RT_user_norm', 'Metacritic_user_nom', 'IMDB_norm', 'Fandango_Ratingvalue', 'Fandango_Stars']
bar_widths = norm_reviews.ix[0, num_cols].values
bar_positions = arange(5) + 0.75
tick_positions = range(1,6)
fig, ax = plt.subplots()
ax.barh(bar_positions,bar_widths,0.5)
ax.set_yticks(tick_positions)
ax.set_yticklabels(num_cols)
plt.xlabel('Average Rating')
plt.ylabel('Rating Source')
plt.title('Average User Rating for Avengers: Age of Ultron (2015)')
plt.show()
## 7. Scatter plot ##
fig, ax = plt.subplots()
ax.scatter(norm_reviews['Fandango_Ratingvalue'], norm_reviews['RT_user_norm'])
ax.set_xlabel('Fandango')
ax.set_ylabel('Rotten Tomatoes')
plt.show()
## 8. Switching axes ##
fig = plt.figure(figsize=(5,10))
ax1 = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2)
ax1.scatter(norm_reviews['Fandango_Ratingvalue'], norm_reviews['RT_user_norm'])
ax1.set_xlabel('Fandango')
ax1.set_ylabel('Rotten Tomatoes')
ax2.scatter(norm_reviews['RT_user_norm'], norm_reviews['Fandango_Ratingvalue'])
ax2.set_xlabel('Rotten Tomatoes')
ax2.set_ylabel('Fandango')
plt.show()
## 9. Benchmarking correlation ##
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5,10))
ax1 = fig.add_subplot(3,1,1)
ax2 = fig.add_subplot(3,1,2)
ax3 = fig.add_subplot(3,1,3)
ax1.scatter(norm_reviews['Fandango_Ratingvalue'], norm_reviews['RT_user_norm'])
ax1.set_xlabel('Fandango')
ax1.set_ylabel('Rotten Tomatoes')
ax1.set_xlim(0,5)
ax1.set_ylim(0,5)
ax2.scatter(norm_reviews['Fandango_Ratingvalue'], norm_reviews['Metacritic_user_nom'])
ax2.set_xlabel('Fandango')
ax2.set_ylabel('Metacritic')
ax2.set_xlim(0,5)
ax2.set_ylim(0,5)
ax3.scatter(norm_reviews['Fandango_Ratingvalue'], norm_reviews['IMDB_norm'])
ax3.set_xlabel('Fandango')
ax3.set_ylabel('IMDB')
ax3.set_xlim(0,5)
ax3.set_ylim(0,5)
plt.show() | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Exploratory Data Visualization/Bar Plots And Scatter Plots-217.py",
"copies": "1",
"size": "3043",
"license": "mit",
"hash": 1026446975846338700,
"line_mean": 29.44,
"line_max": 109,
"alpha_frac": 0.7183700296,
"autogenerated": false,
"ratio": 2.5295095594347465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37478795890347466,
"avg_score": null,
"num_lines": null
} |
## 2. Lists of lists ##
import csv
world_alcohol = list(csv.reader(open('world_alcohol.csv','r')))
years= [int(item[0]) for item in world_alcohol[1:]]
total = sum(years)
avg_year = total/len(years)
## 4. Using NumPy ##
import numpy
world_alcohol = numpy.genfromtxt("world_alcohol.csv", delimiter=",")
print(type(world_alcohol))
## 5. Creating arrays ##
vector = numpy.array([10,20,30])
matrix = numpy.array([[5, 10, 15], [20, 25, 30], [35, 40, 45]])
## 6. Array shape ##
vector = numpy.array([10, 20, 30])
matrix = numpy.array([[5, 10, 15], [20, 25, 30], [35, 40, 45]])
vector_shape = vector.shape
matrix_shape = matrix.shape
## 7. Data types ##
world_alcohol_dtype = world_alcohol.dtype
## 9. Reading in the data properly ##
world_alcohol = numpy.genfromtxt('world_alcohol.csv',skip_header = 1,dtype = 'U75',delimiter = ',')
print(world_alcohol)
## 10. Indexing arrays ##
uruguay_other_1986 = world_alcohol[1,4]
third_country = world_alcohol[2,2]
## 11. Slicing arrays ##
countries = world_alcohol[:,2]
alcohol_consumption= world_alcohol[:,4]
## 12. Slicing one dimension ##
first_two_columns = world_alcohol[:,:2]
first_ten_years= world_alcohol[:10,0]
first_ten_rows= world_alcohol[:10,:]
## 13. Slicing arrays ##
first_twenty_regions = world_alcohol[:20,1:3] | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Data Analysis with Pandas Intermediate/Getting started with NumPy-6.py",
"copies": "1",
"size": "1281",
"license": "mit",
"hash": 8424053905647226000,
"line_mean": 22.7407407407,
"line_max": 99,
"alpha_frac": 0.669008587,
"autogenerated": false,
"ratio": 2.6036585365853657,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3772667123585366,
"avg_score": null,
"num_lines": null
} |
## 2. Looking at the data ##
import pandas as pd
submissions = pd.read_csv("sel_hn_stories.csv")
submissions.columns = ["submission_time", "upvotes", "url", "headline"]
submissions = submissions.dropna()
## 3. Tokenization ##
tokenized_headlines = []
for value in submissions["headline"]:
tokenized_headlines.append(value.split(" "))
## 4. Preprocessing ##
punctuation = [",", ":", ";", ".", "'", '"', "’", "?", "/", "-", "+", "&", "(", ")"]
clean_tokenized = []
for item in tokenized_headlines:
tokens = []
for token in item:
token = token.lower()
for punc in punctuation:
token = token.replace(punc, "")
tokens.append(token)
clean_tokenized.append(tokens)
## 5. Assembling a matrix ##
import numpy as np
unique_tokens = []
single_tokens = []
for tokens in clean_tokenized:
for token in tokens:
if token not in single_tokens:
single_tokens.append(token)
elif token not in unique_tokens:
unique_tokens.append(token)
counts = pd.DataFrame(0, index = np.arange(len(clean_tokenized)), columns = unique_tokens)
## 6. Counting tokens ##
# clean_tokenized and counts have been loaded in.
for i, item in enumerate(clean_tokenized):
for token in item:
if token in unique_tokens:
counts.iloc[i][token] += 1
## 7. Removing extraneous columns ##
# clean_tokenized and counts have been loaded in
word_counts = counts.sum(axis=0)
counts = counts.loc[:,(word_counts >= 5) & (word_counts <= 100)]
## 8. Splitting the data ##
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(counts, submissions["upvotes"], test_size=0.2, random_state=1)
## 9. Making predictions ##
from sklearn.linear_model import LinearRegression
clf = LinearRegression()
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
## 10. Calculating error ##
mse = sum((y_test - predictions) ** 2) / len(predictions) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Natural Language Processing/Introduction to natural language processing-158.py",
"copies": "1",
"size": "1972",
"license": "mit",
"hash": -7699341085713866000,
"line_mean": 25.6351351351,
"line_max": 114,
"alpha_frac": 0.6487309645,
"autogenerated": false,
"ratio": 3.3389830508474576,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9451798326615274,
"avg_score": 0.0071831377464367605,
"num_lines": 74
} |
## 2. Looking at the data ##
# We can use the pandas library in python to read in the csv file.
# This creates a pandas dataframe and assigns it to the titanic variable.
titanic = pandas.read_csv("titanic_train.csv")
# Print the first 5 rows of the dataframe.
print(titanic.head(5))
print(titanic.describe())
## 3. Missing data ##
# The titanic variable is available here.
titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median())
## 5. Converting the Sex column ##
# Find all the unique genders -- the column appears to contain only male and female.
print(titanic["Sex"].unique())
# Replace all the occurences of male with the number 0.
titanic.loc[titanic["Sex"] == "male", "Sex"] = 0
titanic.loc[titanic["Sex"] == "female", "Sex"] = 1
## 6. Converting the Embarked column ##
# Find all the unique values for "Embarked".
print(titanic["Embarked"].unique())
titanic["Embarked"] = titanic["Embarked"].fillna('S')
titanic.loc[titanic["Embarked"] == "S", "Embarked"] = 0
titanic.loc[titanic["Embarked"] == "C", "Embarked"] = 1
titanic.loc[titanic["Embarked"] == "Q", "Embarked"] = 2
## 9. Making predictions ##
# Import the linear regression class
from sklearn.linear_model import LinearRegression
# Sklearn also has a helper that makes it easy to do cross validation
from sklearn.cross_validation import KFold
# The columns we'll use to predict the target
predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
# Initialize our algorithm class
alg = LinearRegression()
# Generate cross validation folds for the titanic dataset. It return the row indices corresponding to train and test.
# We set random_state to ensure we get the same splits every time we run this.
kf = KFold(titanic.shape[0], n_folds=3, random_state=1)
predictions = []
for train, test in kf:
# The predictors we're using the train the algorithm. Note how we only take the rows in the train folds.
train_predictors = (titanic[predictors].iloc[train,:])
# The target we're using to train the algorithm.
train_target = titanic["Survived"].iloc[train]
# Training the algorithm using the predictors and target.
alg.fit(train_predictors, train_target)
# We can now make predictions on the test fold
test_predictions = alg.predict(titanic[predictors].iloc[test,:])
predictions.append(test_predictions)
## 10. Evaluating error ##
import numpy as np
# The predictions are in three separate numpy arrays. Concatenate them into one.
# We concatenate them on axis 0, as they only have one axis.
predictions = np.concatenate(predictions, axis=0)
# Map predictions to outcomes (only possible outcomes are 1 and 0)
predictions[predictions > .5] = 1
predictions[predictions <=.5] = 0
accuracy = sum(predictions[predictions == titanic["Survived"]]) / len(predictions)
## 11. Logistic regression ##
from sklearn import cross_validation
# Initialize our algorithm
alg = LogisticRegression(random_state=1)
# Compute the accuracy score for all the cross validation folds. (much simpler than what we did before!)
scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=3)
# Take the mean of the scores (because we have one for each fold)
print(scores.mean())
## 12. Processing the test set ##
titanic_test = pandas.read_csv("titanic_test.csv")
titanic_test["Age"] = titanic_test["Age"].fillna(titanic["Age"].median())
titanic_test["Fare"] = titanic_test["Fare"].fillna(titanic_test["Fare"].median())
titanic_test.loc[titanic_test["Sex"] == "male", "Sex"] = 0
titanic_test.loc[titanic_test["Sex"] == "female", "Sex"] = 1
titanic_test["Embarked"] = titanic_test["Embarked"].fillna("S")
titanic_test.loc[titanic_test["Embarked"] == "S", "Embarked"] = 0
titanic_test.loc[titanic_test["Embarked"] == "C", "Embarked"] = 1
titanic_test.loc[titanic_test["Embarked"] == "Q", "Embarked"] = 2
## 13. Generating a submission file ##
# Initialize the algorithm class
alg = LogisticRegression(random_state=1)
# Train the algorithm using all the training data
alg.fit(titanic[predictors], titanic["Survived"])
# Make predictions using the test set.
predictions = alg.predict(titanic_test[predictors])
# Create a new dataframe with only the columns Kaggle wants from the dataset.
submission = pandas.DataFrame({
"PassengerId": titanic_test["PassengerId"],
"Survived": predictions
}) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Kaggle Competitions/Getting started with Kaggle-73.py",
"copies": "1",
"size": "4368",
"license": "mit",
"hash": -5394495463459755000,
"line_mean": 37.3245614035,
"line_max": 118,
"alpha_frac": 0.7184065934,
"autogenerated": false,
"ratio": 3.111111111111111,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4329517704511111,
"avg_score": null,
"num_lines": null
} |
## 2. Mutability ##
class Counter():
def __init__(self):
self.count = 0
def increment(self):
self.count += 1
def get_count(self):
return self.count
def count_up_100000(counter):
for i in range(100000):
counter.increment()
class Counter():
def __init__(self):
self.count = 0
def increment(self):
self.count += 1
def get_count(self):
return self.count
def count_up_100000(counter):
for i in range(100000):
counter.increment()
counter = Counter()
initial_count = counter.get_count()
count_up_100000(counter)
final_count = counter.get_count()
## 3. Multithreading ##
import threading
counter = Counter()
count_thread = threading.Thread(target=count_up_100000, args=[counter])
count_thread.start()
count_thread.join()
counter = Counter()
count_thread = threading.Thread(target=count_up_100000, args=[counter])
count_thread.start()
count_thread.join()
after_join = counter.get_count()
print(after_join)
## 4. Determinism ##
import threading
def conduct_trial():
counter = Counter()
count_thread = threading.Thread(target=count_up_100000, args=[counter])
count_thread.start()
# Take measurement here
count_thread.join()
def conduct_trial():
counter = Counter()
count_thread = threading.Thread(target=count_up_100000, args=[counter])
count_thread.start()
intermediate_value = counter.get_count()
count_thread.join()
return intermediate_value
trial1 = conduct_trial()
print(trial1)
trial2 = conduct_trial()
print(trial2)
trial3 = conduct_trial()
print(trial3)
## 5. Enforcing determinism ##
import threading
def count_up_100000(counter, lock):
for i in range(10000):
for i in range(10):
counter.increment()
def conduct_trial():
counter = Counter()
lock = threading.Lock()
count_thread = threading.Thread(target=count_up_100000, args=[counter, lock])
count_thread.start()
intermediate_value = counter.get_count()
count_thread.join()
return intermediate_value
trial1 = conduct_trial()
print(trial1)
trial2 = conduct_trial()
print(trial2)
trial3 = conduct_trial()
print(trial3)
def count_up_100000(counter, lock):
for i in range(10000):
lock.acquire()
for i in range(10):
counter.increment()
lock.release()
def conduct_trial():
counter = Counter()
lock = threading.Lock()
count_thread = threading.Thread(target=count_up_100000, args=[counter, lock])
count_thread.start()
lock.acquire()
intermediate_value = counter.get_count()
lock.release()
count_thread.join()
return intermediate_value
trial1 = conduct_trial()
print(trial1)
trial2 = conduct_trial()
print(trial2)
trial3 = conduct_trial()
print(trial3)
## 6. Counting twice ##
def count_up_100000(counter):
for i in range(100000):
counter.increment()
counter = Counter()
def count_up_100000(counter):
for i in range(100000):
counter.increment()
counter = Counter()
count_up_100000(counter)
count_up_100000(counter)
final_count = counter.get_count()
print(final_count)
## 7. Splitting our count into two threads. ##
import threading
def count_up_100000(counter):
for i in range(100000):
counter.increment()
def count_up_100000(counter):
for i in range(100000):
counter.increment()
def conduct_trial():
counter = Counter()
count_thread1 = threading.Thread(target=count_up_100000, args=[counter])
count_thread2 = threading.Thread(target=count_up_100000, args=[counter])
count_thread1.start()
count_thread2.start()
# Join the threads here
count_thread1.join()
count_thread2.join()
final_count = counter.get_count()
return final_count
trial1 = conduct_trial()
print(trial1)
trial2 = conduct_trial()
print(trial2)
trial3 = conduct_trial()
print(trial3)
## 8. Atomicity ##
import threading
class Counter():
def __init__(self):
self.count = 0
self.lock = threading.Lock()
def increment(self):
self.lock.acquire()
old_count = self.count
self.count = old_count + 1
self.lock.release()
def get_count(self):
return self.count
def count_up_100000(counter):
for i in range(100000):
counter.increment()
def conduct_trial():
counter = Counter()
count_thread1 = threading.Thread(target=count_up_100000, args=[counter])
count_thread2 = threading.Thread(target=count_up_100000, args=[counter])
count_thread1.start()
count_thread2.start()
count_thread1.join()
count_thread2.join()
final_count = counter.get_count()
return final_count
trial1 = conduct_trial()
print(trial1)
trial2 = conduct_trial()
print(trial2)
trial3 = conduct_trial()
print(trial3) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Python Programming Advanced/Parallel Processing-171.py",
"copies": "1",
"size": "4804",
"license": "mit",
"hash": 894002071476223900,
"line_mean": 21.6650943396,
"line_max": 81,
"alpha_frac": 0.664029975,
"autogenerated": false,
"ratio": 3.443727598566308,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4607757573566308,
"avg_score": null,
"num_lines": null
} |
# 2
# 5 3
# 1 2 4 5 6
# 5 3
# 1 2 4 5 7
n = 5
K = 3
dp = [[0 for i in xrange(2**n)] for k in xrange(K+1)]
a = [1,2,4,5,6]
# a = [1,2,4,5,7]
x = int(sum(a) / K)
dp[0][0] = 1
for k in xrange(K):
for bitmask in xrange(2**n):
if not dp[k][bitmask]:
continue
s = 0
for i in xrange(n):
if (bitmask & (1 << i)):
s += a[i]
s -= (k * x)
for i in xrange(n):
if (bitmask & (1 << i)):
continue
new_mask = bitmask | (1 << i)
if (s + a[i]) == x:
dp[k + 1][new_mask] = 1
elif (s + a[i]) < x:
dp[k][new_mask] = 1
if dp[K][2**n - 1] == 1:
print "yes"
else:
print "no"
# Try running this locally.
def send_simple_message():
import requests
return requests.post(
"https://api.mailgun.net/v2/samples.mailgun.org/messages",
auth=("api", "key-3ax6xnjp29jd6fds4gc373sgvjxteol0"),
data={"from": "sanskar.com",
"to": ["parin2092@gmail.com"],
"subject": "Hello",
"text": "Testing some Mailgun awesomeness!"})
print send_simple_message() | {
"repo_name": "parinck/cook",
"path": "codechef/sanskar.py",
"copies": "2",
"size": "1181",
"license": "mit",
"hash": -914744041901318400,
"line_mean": 21.3018867925,
"line_max": 66,
"alpha_frac": 0.4580863675,
"autogenerated": false,
"ratio": 2.8119047619047617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.924291216364655,
"avg_score": 0.005415793151642208,
"num_lines": 53
} |
"""2nd database upgrade
Revision ID: d440fe2187fa
Revises: None
Create Date: 2016-06-05 21:55:18.797000
"""
# revision identifiers, used by Alembic.
revision = 'd440fe2187fa'
down_revision = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('activated', sa.Boolean(), nullable=True))
op.add_column('users', sa.Column('password_hash', sa.String(length=128), nullable=True))
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=False)
op.drop_index('email', table_name='users')
op.drop_column('users', 'password')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('password', mysql.VARCHAR(collation=u'utf8_unicode_ci', length=64), nullable=True))
op.create_index('email', 'users', ['email'], unique=True)
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_column('users', 'password_hash')
op.drop_column('users', 'activated')
### end Alembic commands ###
| {
"repo_name": "zlasd/flaskr_exercise",
"path": "migrations/versions/d440fe2187fa_2nd_database_upgrade.py",
"copies": "1",
"size": "1331",
"license": "mit",
"hash": 5903061786855625000,
"line_mean": 35.9722222222,
"line_max": 120,
"alpha_frac": 0.6784372652,
"autogenerated": false,
"ratio": 3.2945544554455446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4472991720645545,
"avg_score": null,
"num_lines": null
} |
# 2nd-order accurate finite-volume implementation of linear advection with
# piecewise linear slope reconstruction
#
# We are solving a_t + u a_x = 0
#
# M. Zingale (2013-03-24)
import numpy
import pylab
import math
class ccFVgrid:
def __init__(self, nx, ng, xmin=0.0, xmax=1.0):
self.xmin = xmin
self.xmax = xmax
self.ng = ng
self.nx = nx
# python is zero-based. Make easy intergers to know where the
# real data lives
self.ilo = ng
self.ihi = ng+nx-1
# physical coords -- cell-centered, left and right edges
self.dx = (xmax - xmin)/(nx)
self.x = xmin + (numpy.arange(nx+2*ng)-ng+0.5)*self.dx
self.xl = xmin + (numpy.arange(nx+2*ng)-ng)*self.dx
self.xr = xmin + (numpy.arange(nx+2*ng)-ng+1.0)*self.dx
# storage for the solution
self.a = numpy.zeros((nx+2*ng), dtype=numpy.float64)
def period(self, u):
""" return the period for advection with velocity u """
return (self.xmax - self.xmin)/u
def scratchArray(self):
""" return a scratch array dimensioned for our grid """
return numpy.zeros((self.nx+2*self.ng), dtype=numpy.float64)
def fillBCs(self):
""" fill all single ghostcell with periodic boundary conditions """
# left boundary
n = 0
while (n < self.ng):
self.a[self.ilo-1-n] = self.a[self.ihi-n]
n += 1
# right boundary
n = 0
while (n < self.ng):
self.a[self.ihi+1+n] = self.a[self.ilo+n]
n += 1
def initCond(self, type="tophat"):
if type == "tophat":
self.a[numpy.logical_and(self.x >= 0.333, self.x <= 0.666)] = 1.0
elif type == "sine":
self.a[:] = numpy.sin(2.0*math.pi*self.x/(self.xmax-self.xmin))
elif type == "gaussian":
self.a[:] = 1.0 + numpy.exp(-60.0*(self.x - 0.5)**2)
self.ainit = self.a.copy()
def norm(self, e):
""" return the norm of quantity e which lives on the grid """
if not len(e) == (2*self.ng + self.nx):
return None
return numpy.sqrt(self.dx*numpy.sum(e[self.ilo:self.ihi+1]**2))
#-----------------------------------------------------------------------------
# advection-specific routines
def timestep(g, C, u):
return C*g.dx/u
def states(g, dt, u):
""" compute the left and right interface states """
# compute the piecewise linear slopes
slope = g.scratchArray()
i = g.ilo-1
while (i <= g.ihi+1):
slope[i] = 0.5*(g.a[i+1] - g.a[i-1])/g.dx
i += 1
# loop over all the interfaces. Here, i refers to the left
# interface of the zone. Note that thre are 1 more interfaces
# than zones
al = g.scratchArray()
ar = g.scratchArray()
i = g.ilo
while (i <= g.ihi+1):
# left state on the current interface comes from zone i-1
al[i] = g.a[i-1] + 0.5*g.dx*(1.0 - u*dt/g.dx)*slope[i-1]
# right state on the current interface comes from zone i
ar[i] = g.a[i] - 0.5*g.dx*(1.0 + u*dt/g.dx)*slope[i]
i += 1
return al, ar
def riemann(u, al, ar):
""" Riemann problem for advection -- this is simply upwinding,
but we return the flux """
if u > 0.0:
return u*al
else:
return u*ar
def update(g, dt, flux):
""" conservative update """
anew = g.scratchArray()
anew[g.ilo:g.ihi+1] = g.a[g.ilo:g.ihi+1] + \
dt/g.dx * (flux[g.ilo:g.ihi+1] - flux[g.ilo+1:g.ihi+2])
return anew
def evolve(nx, C, u, numPeriods, ICname):
ng = 2
# create the grid
g = ccFVgrid(nx, ng)
t = 0.0
tmax = numPeriods*g.period(u)
# initialize the data
g.initCond(ICname)
# main evolution loop
while (t < tmax):
# fill the boundary conditions
g.fillBCs()
# get the timestep
dt = timestep(g, C, u)
if (t + dt > tmax):
dt = tmax - t
# get the interface states
al, ar = states(g, dt, u)
# solve the Riemann problem at all interfaces
flux = riemann(u, al, ar)
# do the conservative update
anew = update(g, dt, flux)
g.a[:] = anew[:]
t += dt
return g
#-----------------------------------------------------------------------------
u = 1.0
nx = 64
C = 0.8
g = evolve(nx, C, u, 5, "tophat")
pylab.plot(g.x[g.ilo:g.ihi+1], g.a[g.ilo:g.ihi+1], color="r")
pylab.plot(g.x[g.ilo:g.ihi+1], g.ainit[g.ilo:g.ihi+1], ls=":", color="0.5")
pylab.savefig("fv-advect.png")
#-----------------------------------------------------------------------------
# convergence test
problem = "gaussian"
N = [32, 64, 128, 256]
u = 1.0
C = 0.8
err = []
for nx in N:
g = evolve(nx, C, u, 5, problem)
# compute the error
err.append(g.norm(g.a - g.ainit))
print g.dx, nx, err[-1]
pylab.clf()
N = numpy.array(N, dtype=numpy.float64)
err = numpy.array(err)
pylab.scatter(N, err, color="r")
pylab.plot(N, err[len(N)-1]*(N[len(N)-1]/N)**2, color="k")
ax = pylab.gca()
ax.set_xscale('log')
ax.set_yscale('log')
pylab.savefig("plm-converge.png")
| {
"repo_name": "bt3gl/Numerical-Methods-for-Physics",
"path": "others/advection/fv_advection.py",
"copies": "1",
"size": "5211",
"license": "apache-2.0",
"hash": -6059247351424842000,
"line_mean": 21.364806867,
"line_max": 78,
"alpha_frac": 0.5248512761,
"autogenerated": false,
"ratio": 3.0051903114186853,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4030041587518685,
"avg_score": null,
"num_lines": null
} |
# 2nd-order accurate finite-volume implementation of the inviscid Burger's equation
# with piecewise linear slope reconstruction
#
# We are solving u_t + u u_x = 0 with outflow boundary conditions
#
# M. Zingale (2013-03-26)
import numpy
import pylab
import math
import sys
class ccFVgrid:
def __init__(self, nx, ng, xmin=0.0, xmax=1.0):
self.xmin = xmin
self.xmax = xmax
self.ng = ng
self.nx = nx
# python is zero-based. Make easy intergers to know where the
# real data lives
self.ilo = ng
self.ihi = ng+nx-1
# physical coords -- cell-centered, left and right edges
self.dx = (xmax - xmin)/(nx)
self.x = xmin + (numpy.arange(nx+2*ng)-ng+0.5)*self.dx
self.xl = xmin + (numpy.arange(nx+2*ng)-ng)*self.dx
self.xr = xmin + (numpy.arange(nx+2*ng)-ng+1.0)*self.dx
# storage for the solution
self.u = numpy.zeros((nx+2*ng), dtype=numpy.float64)
def period(self, u):
""" return the period for advection with velocity u """
return (self.xmax - self.xmin)/u
def scratchArray(self):
""" return a scratch array dimensioned for our grid """
return numpy.zeros((self.nx+2*self.ng), dtype=numpy.float64)
def fillBCs(self):
""" fill all ghostcells with outflow """
# left boundary
self.u[0:self.ilo] = self.u[self.ilo]
# right boundary
self.u[self.ihi+1:] = self.u[self.ihi]
def initCond(self, type="tophat"):
if type == "tophat":
self.u[numpy.logical_and(self.x >= 0.333, self.x <= 0.666)] = 1.0
elif type == "sine":
self.u[:] = 1.0
index = numpy.logical_and(self.x >= 0.333, self.x <= 0.666)
self.u[index] += 0.5*numpy.sin(2.0*math.pi*(self.x[index]-0.333)/0.333)
elif type == "rarefaction":
self.u[:] = 1.0
self.u[self.x > 0.5] = 2.0
self.uinit = self.u.copy()
def norm(self, e):
""" return the norm of quantity e which lives on the grid """
if not len(e) == (2*self.ng + self.nx):
return None
return numpy.sqrt(self.dx*numpy.sum(e[self.ilo:self.ihi+1]**2))
#-----------------------------------------------------------------------------
# advection-specific routines
def timestep(g, C):
return C*g.dx/max(abs(g.u[g.ilo:g.ihi+1]))
def states(g, dt, slopeType):
""" compute the left and right interface states """
# compute the piecewise linear slopes
slope = g.scratchArray()
if slopeType == "godunov":
# piecewise constant = 0 slopes
slope[:] = 0.0
elif slopeType == "centered":
# unlimited centered difference slopes
i = g.ilo-1
while (i <= g.ihi+1):
slope[i] = 0.5*(g.u[i+1] - g.u[i-1])/g.dx
i += 1
elif slopeType == "minmod":
# minmod limited slope
i = g.ilo-1
while (i <= g.ihi+1):
slope[i] = minmod( (g.u[i] - g.u[i-1])/g.dx,
(g.u[i+1] - g.u[i])/g.dx )
i += 1
elif slopeType == "MC":
# MC limiter
i = g.ilo-1
while (i <= g.ihi+1):
slope[i] = minmod(minmod( 2.0*(g.u[i] - g.u[i-1])/g.dx,
2.0*(g.u[i+1] - g.u[i])/g.dx ),
0.5*(g.u[i+1] - g.u[i-1])/g.dx)
i += 1
elif slopeType == "superbee":
# superbee limiter
i = g.ilo-1
while (i <= g.ihi+1):
A = minmod( (g.u[i+1] - g.u[i])/g.dx,
2.0*(g.u[i] - g.u[i-1])/g.dx )
B = minmod( (g.u[i] - g.u[i-1])/g.dx,
2.0*(g.u[i+1] - g.u[i])/g.dx )
slope[i] = maxmod(A, B)
i += 1
# loop over all the interfaces. Here, i refers to the left
# interface of the zone. Note that thre are 1 more interfaces
# than zones
ul = g.scratchArray()
ur = g.scratchArray()
i = g.ilo
while (i <= g.ihi+1):
# left state on the current interface comes from zone i-1
ul[i] = g.u[i-1] + 0.5*g.dx*(1.0 - g.u[i-1]*dt/g.dx)*slope[i-1]
# right state on the current interface comes from zone i
ur[i] = g.u[i] - 0.5*g.dx*(1.0 + g.u[i]*dt/g.dx)*slope[i]
i += 1
return ul, ur
def riemann(g, ul, ur):
""" Riemann problem for advection -- this is simply upwinding,
but we return the flux """
f = g.scratchArray()
i = g.ilo
while (i <= g.ihi+1):
if ul[i] > ur[i]:
# shock
S = 0.5*(ul[i] + ur[i])
if (S > 0):
us = ul[i]
elif (S < 0):
us = ur[i]
else:
f[i] = 0.0
else:
# rarefaction
if ul[i] >= 0.0:
us = ul[i]
elif ur[i] <= 0.0:
us = ur[i]
else:
f[i] = 0.0
f[i] = 0.5*us*us
i += 1
return f
def update(g, dt, flux):
""" conservative update """
anew = g.scratchArray()
anew[g.ilo:g.ihi+1] = g.u[g.ilo:g.ihi+1] + \
dt/g.dx * (flux[g.ilo:g.ihi+1] - flux[g.ilo+1:g.ihi+2])
return anew
def evolve(nx, C, u, numPeriods, ICname, slopeType="centered"):
ng = 2
# create the grid
g = ccFVgrid(nx, ng)
t = 0.0
tmax = numPeriods*g.period(u)
# initialize the data
g.initCond(ICname)
# main evolution loop
while (t < tmax):
# fill the boundary conditions
g.fillBCs()
# get the timestep
dt = timestep(g, C)
if (t + dt > tmax):
dt = tmax - t
# get the interface states
ul, ur = states(g, dt, slopeType)
# solve the Riemann problem at all interfaces
flux = riemann(g, ul, ur)
# do the conservative update
unew = update(g, dt, flux)
g.u[:] = unew[:]
t += dt
return g
def minmod(a, b):
if (abs(a) < abs(b) and a*b > 0.0):
return a
elif (abs(b) < abs(a) and a*b > 0.0):
return b
else:
return 0.0
def maxmod(a, b):
if (abs(a) > abs(b) and a*b > 0.0):
return a
elif (abs(b) > abs(a) and a*b > 0.0):
return b
else:
return 0.0
#-----------------------------------------------------------------------------
# sine
u = 1.0
nx = 128
C = 0.8
pylab.clf()
for i in range(0,10):
tend = (i+1)*0.02
g = evolve(nx, C, u, tend, "sine", slopeType="MC")
c = 1.0 - (0.1 + i*0.1)
pylab.plot(g.x[g.ilo:g.ihi+1], g.u[g.ilo:g.ihi+1], color=`c`)
pylab.plot(g.x[g.ilo:g.ihi+1], g.uinit[g.ilo:g.ihi+1], ls=":", color="0.5")
pylab.savefig("fv-burger-sine.png")
#-----------------------------------------------------------------------------
# rarefaction
u = 1.0
nx = 128
C = 0.8
pylab.clf()
for i in range(0,10):
tend = (i+1)*0.02
g = evolve(nx, C, u, tend, "rarefaction", slopeType="MC")
c = 1.0 - (0.1 + i*0.1)
pylab.plot(g.x[g.ilo:g.ihi+1], g.u[g.ilo:g.ihi+1], color=`c`)
pylab.plot(g.x[g.ilo:g.ihi+1], g.uinit[g.ilo:g.ihi+1], ls=":", color="0.5")
pylab.savefig("fv-burger-rarefaction.png")
| {
"repo_name": "bt3gl/Numerical-Methods-for-Physics",
"path": "others/advection/fv_burgers.py",
"copies": "1",
"size": "7293",
"license": "apache-2.0",
"hash": -1460346586849891600,
"line_mean": 21.8620689655,
"line_max": 83,
"alpha_frac": 0.4768956534,
"autogenerated": false,
"ratio": 2.911377245508982,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.884358155008459,
"avg_score": 0.008938269764878319,
"num_lines": 319
} |
# 2nd-order accurate finite-volume implementation of the inviscid Burger's
# equation with piecewise linear slope reconstruction
#
# We are solving u_t + u u_x = 0 with outflow boundary conditions
#
# M. Zingale (2013-03-26)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import sys
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['mathtext.rm'] = 'serif'
mpl.rcParams['font.size'] = 12
mpl.rcParams['legend.fontsize'] = 'large'
mpl.rcParams['figure.titlesize'] = 'medium'
class Grid1d(object):
def __init__(self, nx, ng, xmin=0.0, xmax=1.0, bc="outflow"):
self.nx = nx
self.ng = ng
self.xmin = xmin
self.xmax = xmax
self.bc=bc
# python is zero-based. Make easy intergers to know where the
# real data lives
self.ilo = ng
self.ihi = ng+nx-1
# physical coords -- cell-centered, left and right edges
self.dx = (xmax - xmin)/(nx)
self.x = xmin + (np.arange(nx+2*ng)-ng+0.5)*self.dx
# storage for the solution
self.u = np.zeros((nx+2*ng), dtype=np.float64)
def scratch_array(self):
""" return a scratch array dimensioned for our grid """
return np.zeros((self.nx+2*self.ng), dtype=np.float64)
def fill_BCs(self):
""" fill all ghostcells as periodic """
if self.bc == "periodic":
# left boundary
self.u[0:self.ilo] = self.u[self.ihi-self.ng+1:self.ihi+1]
# right boundary
self.u[self.ihi+1:] = self.u[self.ilo:self.ilo+self.ng]
elif self.bc == "outflow":
# left boundary
self.u[0:self.ilo] = self.u[self.ilo]
# right boundary
self.u[self.ihi+1:] = self.u[self.ihi]
else:
sys.exit("invalid BC")
def norm(self, e):
""" return the norm of quantity e which lives on the grid """
if len(e) != 2*self.ng + self.nx:
return None
return np.sqrt(self.dx*np.sum(e[self.ilo:self.ihi+1]**2))
class Simulation(object):
def __init__(self, grid):
self.grid = grid
self.t = 0.0
def init_cond(self, type="tophat"):
if type == "tophat":
self.grid.u[np.logical_and(self.grid.x >= 0.333,
self.grid.x <= 0.666)] = 1.0
elif type == "sine":
self.grid.u[:] = 1.0
index = np.logical_and(self.grid.x >= 0.333,
self.grid.x <= 0.666)
self.grid.u[index] += \
0.5*np.sin(2.0*np.pi*(self.grid.x[index]-0.333)/0.333)
elif type == "rarefaction":
self.grid.u[:] = 1.0
self.grid.u[self.grid.x > 0.5] = 2.0
def timestep(self, C):
return C*self.grid.dx/max(abs(self.grid.u[self.grid.ilo:
self.grid.ihi+1]))
def states(self, dt):
""" compute the left and right interface states """
g = self.grid
# compute the piecewise linear slopes -- 2nd order MC limiter
# we pick a range of cells that includes 1 ghost cell on either
# side
ib = g.ilo-1
ie = g.ihi+1
u = g.u
# this is the MC limiter from van Leer (1977), as given in
# LeVeque (2002). Note that this is slightly different than
# the expression from Colella (1990)
dc = g.scratch_array()
dl = g.scratch_array()
dr = g.scratch_array()
dc[ib:ie+1] = 0.5*(u[ib+1:ie+2] - u[ib-1:ie ])
dl[ib:ie+1] = u[ib+1:ie+2] - u[ib :ie+1]
dr[ib:ie+1] = u[ib :ie+1] - u[ib-1:ie ]
# these where's do a minmod()
d1 = 2.0*np.where(np.fabs(dl) < np.fabs(dr), dl, dr)
d2 = np.where(np.fabs(dc) < np.fabs(d1), dc, d1)
ldeltau = np.where(dl*dr > 0.0, d2, 0.0)
# now the interface states. Note that there are 1 more interfaces
# than zones
ul = g.scratch_array()
ur = g.scratch_array()
# are these indices right?
#
# --+-----------------+------------------+
# ^ i ^ ^ i+1
# ur(i) ul(i+1) ur(i+1)
#
ur[ib:ie+2] = u[ib:ie+2] - \
0.5*(1.0 + u[ib:ie+2]*dt/self.grid.dx)*ldeltau[ib:ie+2]
ul[ib+1:ie+2] = u[ib:ie+1] + \
0.5*(1.0 - u[ib:ie+1]*dt/self.grid.dx)*ldeltau[ib:ie+1]
return ul, ur
def riemann(self, ul, ur):
"""
Riemann problem for Burgers' equation.
"""
S = 0.5*(ul + ur)
ushock = np.where(S > 0.0, ul, ur)
ushock = np.where(S == 0.0, 0.0, ushock)
# rarefaction solution
urare = np.where(ur <= 0.0, ur, 0.0)
urare = np.where(ul >= 0.0, ul, urare)
us = np.where(ul > ur, ushock, urare)
return 0.5*us*us
def update(self, dt, flux):
""" conservative update """
g = self.grid
unew = g.scratch_array()
unew[g.ilo:g.ihi+1] = g.u[g.ilo:g.ihi+1] + \
dt/g.dx * (flux[g.ilo:g.ihi+1] - flux[g.ilo+1:g.ihi+2])
return unew
def evolve(self, C, tmax):
self.t = 0.0
g = self.grid
# main evolution loop
while (self.t < tmax):
# fill the boundary conditions
g.fill_BCs()
# get the timestep
dt = self.timestep(C)
if (self.t + dt > tmax):
dt = tmax - self.t
# get the interface states
ul, ur = self.states(dt)
# solve the Riemann problem at all interfaces
flux = self.riemann(ul, ur)
# do the conservative update
unew = self.update(dt, flux)
self.grid.u[:] = unew[:]
self.t += dt
if __name__ == "__main__":
#-----------------------------------------------------------------------------
# sine
xmin = 0.0
xmax = 1.0
nx = 256
ng = 2
g = Grid1d(nx, ng, bc="periodic")
# maximum evolution time based on period for unit velocity
tmax = (xmax - xmin)/1.0
C = 0.8
plt.clf()
s = Simulation(g)
for i in range(0, 10):
tend = (i+1)*0.02*tmax
s.init_cond("sine")
uinit = s.grid.u.copy()
s.evolve(C, tend)
c = 1.0 - (0.1 + i*0.1)
g = s.grid
plt.plot(g.x[g.ilo:g.ihi+1], g.u[g.ilo:g.ihi+1], color=str(c))
g = s.grid
plt.plot(g.x[g.ilo:g.ihi+1], uinit[g.ilo:g.ihi+1], ls=":", color="0.9", zorder=-1)
plt.xlabel("$x$")
plt.ylabel("$u$")
plt.savefig("fv-burger-sine.pdf")
#-----------------------------------------------------------------------------
# rarefaction
xmin = 0.0
xmax = 1.0
nx = 256
ng = 2
g = Grid1d(nx, ng, bc="outflow")
# maximum evolution time based on period for unit velocity
tmax = (xmax - xmin)/1.0
C = 0.8
plt.clf()
s = Simulation(g)
for i in range(0, 10):
tend = (i+1)*0.02*tmax
s.init_cond("rarefaction")
uinit = s.grid.u.copy()
s.evolve(C, tend)
c = 1.0 - (0.1 + i*0.1)
plt.plot(g.x[g.ilo:g.ihi+1], g.u[g.ilo:g.ihi+1], color=str(c))
plt.plot(g.x[g.ilo:g.ihi+1], uinit[g.ilo:g.ihi+1], ls=":", color="0.9", zorder=-1)
plt.xlabel("$x$")
plt.ylabel("$u$")
plt.savefig("fv-burger-rarefaction.pdf")
| {
"repo_name": "zingale/hydro_examples",
"path": "burgers/burgers.py",
"copies": "1",
"size": "7466",
"license": "bsd-3-clause",
"hash": -4261778056769246000,
"line_mean": 23.803986711,
"line_max": 86,
"alpha_frac": 0.4902223413,
"autogenerated": false,
"ratio": 3.022672064777328,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4012894406077328,
"avg_score": null,
"num_lines": null
} |
# 2nd-order accurate finite-volume implementation of the inviscid Burger's
# equation with piecewise linear slope reconstruction
#
# We are solving u_t + u u_x = 0 with outflow boundary conditions
#
# M. Zingale (2013-03-26)
import numpy
import pylab
import math
import sys
class Grid1d:
def __init__(self, nx, ng, xmin=0.0, xmax=1.0):
self.nx = nx
self.ng = ng
self.xmin = xmin
self.xmax = xmax
# python is zero-based. Make easy intergers to know where the
# real data lives
self.ilo = ng
self.ihi = ng+nx-1
# physical coords -- cell-centered, left and right edges
self.dx = (xmax - xmin)/(nx)
self.x = xmin + (numpy.arange(nx+2*ng)-ng+0.5)*self.dx
# storage for the solution
self.u = numpy.zeros((nx+2*ng), dtype=numpy.float64)
def scratch_array(self):
""" return a scratch array dimensioned for our grid """
return numpy.zeros((self.nx+2*self.ng), dtype=numpy.float64)
def fill_BCs(self):
""" fill all ghostcells with outflow """
# left boundary
self.u[0:self.ilo] = self.u[self.ilo]
# right boundary
self.u[self.ihi+1:] = self.u[self.ihi]
class Simulation:
def __init__(self, grid):
self.grid = grid
self.t = 0.0
def init_cond(self, type="tophat"):
if type == "tophat":
self.grid.u[numpy.logical_and(self.grid.x >= 0.333,
self.grid.x <= 0.666)] = 1.0
elif type == "sine":
self.grid.u[:] = 1.0
index = numpy.logical_and(self.grid.x >= 0.333,
self.grid.x <= 0.666)
self.grid.u[index] += \
0.5*numpy.sin(2.0*math.pi*(self.grid.x[index]-0.333)/0.333)
elif type == "rarefaction":
self.grid.u[:] = 1.0
self.grid.u[self.grid.x > 0.5] = 2.0
def timestep(self, C):
return C*self.grid.dx/max(abs(self.grid.u[self.grid.ilo:
self.grid.ihi+1]))
def states(self, dt):
""" compute the left and right interface states """
# compute the piecewise linear slopes -- 2nd order MC limiter
# we pick a range of cells that includes 1 ghost cell on either
# side
ib = self.grid.ilo-1
ie = self.grid.ihi+1
u = self.grid.u
# this is the MC limiter from van Leer (1977), as given in
# LeVeque (2002). Note that this is slightly different than
# the expression from Colella (1990)
dc = self.grid.scratch_array()
dl = self.grid.scratch_array()
dr = self.grid.scratch_array()
dc[ib:ie+1] = 0.5*(u[ib+1:ie+2] - u[ib-1:ie ])
dl[ib:ie+1] = u[ib+1:ie+2] - u[ib :ie+1]
dr[ib:ie+1] = u[ib :ie+1] - u[ib-1:ie ]
# these where's do a minmod()
d1 = 2.0*numpy.where(numpy.fabs(dl) < numpy.fabs(dr), dl, dr)
d2 = numpy.where(numpy.fabs(dc) < numpy.fabs(d1), dc, d1)
ldeltau = numpy.where(dl*dr > 0.0, d2, 0.0)
# now the interface states. Note that there are 1 more interfaces
# than zones
ul = g.scratch_array()
ur = g.scratch_array()
# are these indices right?
#
# --+-----------------+------------------+
# ^ i ^ ^ i+1
# ur(i) ul(i+1) ur(i+1)
#
ur[ib:ie+1] = u[ib:ie+1] - \
0.5*(1.0 + u[ib:ie+1]*dt/self.grid.dx)*ldeltau[ib:ie+1]
ul[ib+1:ie+2] = u[ib:ie+1] + \
0.5*(1.0 - u[ib:ie+1]*dt/self.grid.dx)*ldeltau[ib:ie+1]
return ul, ur
def riemann(self, ul, ur):
"""
Riemann problem for Burgers' equation.
"""
S = 0.5*(ul + ur)
ushock = numpy.where(S > 0.0, ul, ur)
ushock = numpy.where(S == 0.0, 0.0, ushock)
# rarefaction solution
urare = numpy.where(ur <= 0.0, ur, 0.0)
urare = numpy.where(ul >= 0.0, ul, urare)
us = numpy.where(ul > ur, ushock, urare)
return 0.5*us*us
def update(self, dt, flux):
""" conservative update """
g = self.grid
unew = g.scratch_array()
unew[g.ilo:g.ihi+1] = g.u[g.ilo:g.ihi+1] + \
dt/g.dx * (flux[g.ilo:g.ihi+1] - flux[g.ilo+1:g.ihi+2])
return unew
def evolve(self, C, tmax):
self.t = 0.0
# main evolution loop
while (self.t < tmax):
# fill the boundary conditions
g.fill_BCs()
# get the timestep
dt = self.timestep(C)
if (self.t + dt > tmax):
dt = tmax - self.t
# get the interface states
ul, ur = self.states(dt)
# solve the Riemann problem at all interfaces
flux = self.riemann(ul, ur)
# do the conservative update
unew = self.update(dt, flux)
self.grid.u[:] = unew[:]
self.t += dt
#-----------------------------------------------------------------------------
# sine
xmin = 0.0
xmax = 1.0
nx = 256
ng = 2
g = Grid1d(nx, ng)
# maximum evolution time based on period for unit velocity
tmax = (xmax - xmin)/1.0
C = 0.8
pylab.clf()
s = Simulation(g)
for i in range(0,10):
tend = (i+1)*0.02*tmax
s.init_cond("sine")
uinit = s.grid.u.copy()
s.evolve(C, tend)
c = 1.0 - (0.1 + i*0.1)
g = s.grid
pylab.plot(g.x[g.ilo:g.ihi+1], g.u[g.ilo:g.ihi+1], color=`c`)
g = s.grid
pylab.plot(g.x[g.ilo:g.ihi+1], uinit[g.ilo:g.ihi+1], ls=":", color="0.5")
pylab.xlabel("$x$")
pylab.ylabel("$u$")
pylab.savefig("fv-burger-sine.png")
pylab.savefig("fv-burger-sine.eps")
#-----------------------------------------------------------------------------
# rarefaction
xmin = 0.0
xmax = 1.0
nx = 256
ng = 2
g = Grid1d(nx, ng)
# maximum evolution time based on period for unit velocity
tmax = (xmax - xmin)/1.0
C = 0.8
pylab.clf()
s = Simulation(g)
for i in range(0,10):
tend = (i+1)*0.02*tmax
s.init_cond("rarefaction")
uinit = s.grid.u.copy()
s.evolve(C, tend)
c = 1.0 - (0.1 + i*0.1)
pylab.plot(g.x[g.ilo:g.ihi+1], g.u[g.ilo:g.ihi+1], color=`c`)
pylab.plot(g.x[g.ilo:g.ihi+1], uinit[g.ilo:g.ihi+1], ls=":", color="0.5")
pylab.xlabel("$x$")
pylab.ylabel("$u$")
pylab.savefig("fv-burger-rarefaction.png")
pylab.savefig("fv-burger-rarefaction.eps")
| {
"repo_name": "JeffDestroyerOfWorlds/hydro_examples",
"path": "burgers/burgers.py",
"copies": "1",
"size": "6543",
"license": "bsd-3-clause",
"hash": 6555722672691561000,
"line_mean": 23.1439114391,
"line_max": 79,
"alpha_frac": 0.5052728106,
"autogenerated": false,
"ratio": 2.940674157303371,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8891245681384714,
"avg_score": 0.010940257303731183,
"num_lines": 271
} |
import numpy as np
from sympy import symbols, sin, cos, lambdify
from shenfun import *
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
# pylint: disable=multiple-statements
from mpltools import annotation
pa = {'fill': False, 'edgecolor': 'black'}
ta = {'fontsize': 10}
pex = lambda *args: print(*args) + exit(0)
x, y, t = symbols("x, y, t", real=True)
# Define the initial solution
uex = (sin(np.pi*x)**2)*sin(2*np.pi*y)*sin(t)
uey = -sin(2*np.pi*x)*(sin(np.pi*y)**2)*sin(t)
pe = cos(np.pi*x)*cos(np.pi*y)*sin(t)
fex = -uex.diff(x, 2) - uex.diff(y, 2) + pe.diff(x, 1) + uex.diff(t, 1) \
+ uex*uex.diff(x, 1) + uey*uex.diff(y, 1)
fey = -uey.diff(x, 2) - uey.diff(y, 2) + pe.diff(y, 1) + uey.diff(t, 1) \
+ uex*uey.diff(x, 1) + uey*uey.diff(y, 1)
he = uex.diff(x, 1) + uey.diff(y, 1)
uexf, ueyf, pef, fexf, feyf = map(lambda v: lambdify((x, y, t), v),
(uex, uey, pe, fex, fey))
def main(n):
# number of modes in x and y direction
N = (32, 32)
# basis function for velocity components in x and y directions: P_{N}
D0X = FunctionSpace(N[0], 'Legendre', quad='GL', dtype='d', bc=(0, 0))
D0Y = FunctionSpace(N[1], 'Legendre', quad='GL', dtype='d', bc=(0, 0))
# basis function for pressure: P_{N-2}
PX = FunctionSpace(N[0], 'Legendre', quad='GL')
PY = FunctionSpace(N[1], 'Legendre', quad='GL')
PX.slice = lambda: slice(0, N[0]-2)
PY.slice = lambda: slice(0, N[1]-2)
# define a multi-dimensional tensor product basis
Vs = TensorProductSpace(comm, (D0X, D0Y))
Ps = TensorProductSpace(comm, (PX, PY), modify_spaces_inplace=True)
# Create vector space for velocity
Ws = VectorSpace([Vs, Vs])
Cs = TensorSpace([Ws, Ws]) # cauchy stress tensor
# Create test and trial spaces for velocity and pressure
u = TrialFunction(Ws); v = TestFunction(Ws)
p = TrialFunction(Ps); q = TestFunction(Ps)
X = Vs.local_mesh(True)
# Define the initial solution on quadrature points at t=0
U = Array(Ws, buffer=(uex.subs(t, 0), uey.subs(t, 0)))
P = Array(Ps); P.fill(0)
F = Array(Ws, buffer=(fex.subs(t, 0), fey.subs(t, 0)))
U0 = U.copy()
# Define the coefficient vector
U_hat = Function(Ws); U_hat = Ws.forward(U, U_hat)
P_hat = Function(Ps); P_hat = Ps.forward(P, P_hat)
F_hat = Function(Ws); F_hat = Ws.forward(F, F_hat)
# Initial time, time step, final time
ti, dt, tf = 0., 5e-3/n, 5e-2
nsteps = np.int(np.ceil((tf - ti)/dt))
dt = (tf - ti)/nsteps
X = Ws.local_mesh(True)
# Define the implicit operator for BDF-2
Lb1 = BlockMatrix(inner(v, u*(1.5/dt)) + inner(grad(v), grad(u)))
Lb2 = BlockMatrix(inner(-grad(q), grad(p)))
# Define the implicit operator for Euler
Le1 = BlockMatrix(inner(v, u*(1./dt)) + inner(grad(v), grad(u)))
Le2 = BlockMatrix(inner(-grad(q), grad(p)))
# Define the implicit operator for updating
Lu1 = BlockMatrix([inner(v, u)])
Lu2 = BlockMatrix([inner(q, p)])
# temporary storage
rhsU, rhsP = Function(Ws), Function(Ps)
U0_hat = Function(Ws); U0_hat = Ws.forward(U, U0_hat)
Ut_hat = Function(Ws); Ut_hat = Ws.forward(U, Ut_hat)
P0_hat = Function(Ps); P0_hat = Ps.forward(P, P0_hat)
Phi_hat = Function(Ps); Phi_hat = Ps.forward(P, Phi_hat)
# Create work arrays for nonlinear part
UiUj = Array(Cs)
UiUj_hat = Function(Cs)
# integrate in time
time = ti
# storage
rhsU, rhsP = rhsU, rhsP
u_hat, p_hat = U_hat, P_hat
u0_hat, p0_hat = U0_hat, P0_hat
ut_hat, phi_hat = Ut_hat, Phi_hat
# Euler time-step
# evaluate the forcing function
F[0] = fexf(X[0], X[1], time+dt)
F[1] = feyf(X[0], X[1], time+dt)
# Solve (9.102)
rhsU.fill(0)
rhsU += -inner(v, grad(p_hat))
rhsU += inner(v, F)
rhsU += inner(v, u_hat/dt)
U = Ws.backward(U_hat, U)
UiUj = outer(U, U, UiUj)
UiUj_hat = UiUj.forward(UiUj_hat)
rhsU += -inner(v, div(UiUj_hat))
ut_hat = Le1.solve(rhsU, u=ut_hat)
# Solve (9.107)
rhsP.fill(0)
rhsP += (1/dt)*inner(q, div(ut_hat))
phi_hat = Le2.solve(rhsP, u=phi_hat, constraints=((0, 0, 0),))
# Update for next time step
u0_hat[:] = u_hat; p0_hat[:] = p_hat
# Update (9.107)
rhsU.fill(0)
rhsU += inner(v, ut_hat) - inner(v, dt*grad(phi_hat))
u_hat = Lu1.solve(rhsU, u=u_hat)
# Update (9.105)
rhsP.fill(0)
rhsP += inner(q, phi_hat) + inner(q, p_hat) - inner(q, div(ut_hat))
p_hat = Lu2.solve(rhsP, u=p_hat, constraints=((0, 0, 0),))
time += dt
# BDF time step
for step in range(2, nsteps+1):
# evaluate the forcing function
F[0] = fexf(X[0], X[1], time+dt)
F[1] = feyf(X[0], X[1], time+dt)
# Solve (9.102)
rhsU.fill(0)
rhsU += -inner(v, grad(p_hat))
rhsU += inner(v, F)
rhsU += inner(v, u_hat*2/dt) - inner(v, u0_hat*0.5/dt)
U = Ws.backward(U_hat, U)
UiUj = outer(U, U, UiUj)
UiUj_hat = UiUj.forward(UiUj_hat)
rhsU += -2*inner(v, div(UiUj_hat))
U0 = Ws.backward(U0_hat, U0)
UiUj = outer(U0, U0, UiUj)
UiUj_hat = UiUj.forward(UiUj_hat)
rhsU += inner(v, div(UiUj_hat))
ut_hat = Lb1.solve(rhsU, u=ut_hat)
# Solve (9.107)
rhsP.fill(0)
rhsP += 1.5/dt*inner(q, div(ut_hat))
phi_hat = Lb2.solve(rhsP, u=phi_hat, constraints=((0, 0, 0),))
# update for next time step
u0_hat[:] = u_hat; p0_hat[:] = p_hat
# Update (9.107, 9.105)
rhsU.fill(0)
rhsU += inner(v, ut_hat) - inner(v, ((2.*dt/3))*grad(phi_hat))
u_hat = Lu1.solve(rhsU, u=u_hat)
rhsP.fill(0)
rhsP += inner(q, phi_hat) + inner(q, p_hat) - inner(q, div(ut_hat))
p_hat = Lu2.solve(rhsP, u=p_hat, constraints=((0, 0, 0),))
# increment time
time += dt
# Transform the solution to physical space
UP = [*U_hat.backward(U), P_hat.backward(P)]
# compute error
Ue = Array(Ws, buffer=(uex.subs(t, tf), uey.subs(t, tf)))
Pe = Array(Ps, buffer=(pe.subs(t, tf)))
UPe = [*Ue, Pe]
l2_error = list(map(np.linalg.norm, [u-ue for u, ue in zip(UP, UPe)]))
return l2_error
if __name__ == "__main__":
N = 2**np.arange(0, 4)
E = np.zeros((3, len(N)))
for (j, n) in enumerate(N):
E[:, j] = main(n)
fig = plt.figure(figsize=(5.69, 4.27))
ax = plt.gca()
marks = ('or', '-g', '-ob')
vars = (r'$u_x$', r'$u_y$', r'$p$')
for i in range(3):
plt.loglog(N, E[i, :], marks[i], label=vars[i])
slope, intercept = np.polyfit(np.log(N[-2:]), np.log(E[i, -2:]), 1)
if i != 1:
annotation.slope_marker((N[-2], E[i, -2]), ("{0:.2f}".format(slope), 1),
ax=ax, poly_kwargs=pa, text_kwargs=ta)
plt.text(N[0], 2e-5, r"$\Delta t=5 \times 10^{-3},\; N=32^2$")
plt.text(N[0], 1e-5, r"Final Time = $5 \times 10^{-2}$")
plt.title(r"Navier-Stokes: $2^{nd}$-order Rotational Pressure-Correction")
plt.legend(); plt.autoscale()
plt.ylabel(r'$|Error|_{L^2}$')
plt.xticks(N)
ax.get_xaxis().set_minor_formatter(NullFormatter())
fmt = lambda v: r"$\Delta t/{0}$".format(v) if v!=1 else r"$\Delta t$"
plt.gca().set_xticklabels(list(map(fmt, N)))
#plt.savefig("navier-stokes.pdf", orientation='portrait')
plt.show()
| {
"repo_name": "spectralDNS/shenfun",
"path": "demo/NavierStokesPC.py",
"copies": "1",
"size": "7513",
"license": "bsd-2-clause",
"hash": 6916785474719700000,
"line_mean": 32.0969162996,
"line_max": 84,
"alpha_frac": 0.5659523493,
"autogenerated": false,
"ratio": 2.5313342318059298,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35972865811059296,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from sympy import symbols, sin, cos, lambdify
from shenfun import *
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter, ScalarFormatter
from mpltools import annotation
pa = {'fill': False, 'edgecolor': 'black'}
ta = {'fontsize': 10}
pex = lambda *args: print(*args) + exit(0)
x, y, t = symbols("x, y, t", real=True)
# Define the initial solution
uex = (sin(np.pi*x)**2)*sin(2*np.pi*y)*sin(t)
uey = -sin(2*np.pi*x)*(sin(np.pi*y)**2)*sin(t)
pe = cos(np.pi*x)*cos(np.pi*y)*sin(t)
fex = -uex.diff(x, 2) - uex.diff(y, 2) + pe.diff(x, 1) + uex.diff(t, 1)
fey = -uey.diff(x, 2) - uey.diff(y, 2) + pe.diff(y, 1) + uey.diff(t, 1)
he = uex.diff(x, 1) + uey.diff(y, 1)
uexf, ueyf, pef, fexf, feyf = map(lambda v: lambdify((x, y, t), v),
(uex, uey, pe, fex, fey))
def main(n):
# number of modes in x and y direction
N = (32, 32)
# basis function for velocity components in x and y directions: P_{N}
D0X = FunctionSpace(N[0], 'Legendre', quad='GL', dtype='d', bc=(0, 0))
D0Y = FunctionSpace(N[1], 'Legendre', quad='GL', dtype='d', bc=(0, 0))
# basis function for pressure: P_{N-2}
PX = FunctionSpace(N[0], 'Legendre', quad='GL')
PY = FunctionSpace(N[1], 'Legendre', quad='GL')
PX.slice = lambda: slice(0, N[0]-2)
PY.slice = lambda: slice(0, N[1]-2)
# define a multi-dimensional tensor product basis
Vs = TensorProductSpace(comm, (D0X, D0Y))
Ps = TensorProductSpace(comm, (PX, PY), modify_spaces_inplace=True)
# Create vector space for velocity
Ws = VectorSpace([Vs, Vs])
# Create test and trial spaces for velocity and pressure
u = TrialFunction(Ws); v = TestFunction(Ws)
p = TrialFunction(Ps); q = TestFunction(Ps)
X = Vs.local_mesh(True)
# Define the initial solution on quadrature points at t=0
U = Array(Ws, buffer=(uex.subs(t, 0), uey.subs(t, 0)))
P = Array(Ps); P.fill(0)
F = Array(Ws, buffer=(fex.subs(t, 0), fey.subs(t, 0)))
# Define the coefficient vector
U_hat = Function(Ws); U_hat = Ws.forward(U, U_hat)
P_hat = Function(Ps); P_hat = Ps.forward(P, P_hat)
F_hat = Function(Ws); F_hat = Ws.forward(F, F_hat)
# Initial time, time step, final time
ti, dt, tf = 0., 5e-3/n, 5e-2
nsteps = np.int(np.ceil((tf - ti)/dt))
dt = (tf - ti)/nsteps
X = Ws.local_mesh(True)
# Define the implicit operator for BDF-2
Lb1 = BlockMatrix(inner(v, u*(1.5/dt)) + inner(grad(v), grad(u)))
Lb2 = BlockMatrix(inner(-grad(q), grad(p)))
# Define the implicit operator for Euler
Le1 = BlockMatrix(inner(v, u*(1./dt)) + inner(grad(v), grad(u)))
Le2 = BlockMatrix(inner(-grad(q), grad(p)))
# Define the implicit operator for updating
Lu1 = BlockMatrix([inner(v, u)])
Lu2 = BlockMatrix([inner(q, p)])
# temporary storage
rhsU, rhsP = Function(Ws), Function(Ps)
U0_hat = Function(Ws); U0_hat = Ws.forward(U, U0_hat)
Ut_hat = Function(Ws); Ut_hat = Ws.forward(U, Ut_hat)
P0_hat = Function(Ps); P0_hat = Ps.forward(P, P0_hat)
Phi_hat = Function(Ps); Phi_hat = Ps.forward(P, Phi_hat)
# integrate in time
time = ti
# storage
rhsU, rhsP = rhsU, rhsP
u_hat, p_hat = U_hat, P_hat
u0_hat, p0_hat = U0_hat, P0_hat
ut_hat, phi_hat = Ut_hat, Phi_hat
# Euler time-step
# evaluate the forcing function
F[0] = fexf(X[0], X[1], time+dt)
F[1] = feyf(X[0], X[1], time+dt)
# Solve (9.102)
rhsU.fill(0)
rhsU += -inner(v, grad(p_hat))
rhsU += inner(v, F)
rhsU += inner(v, u_hat/dt)
ut_hat = Le1.solve(rhsU, u=ut_hat)
# Solve (9.107)
rhsP.fill(0)
rhsP += (1/dt)*inner(q, div(ut_hat))
phi_hat = Le2.solve(rhsP, u=phi_hat, constraints=((0, 0, 0),))
# Update for next time step
u0_hat[:] = u_hat; p0_hat[:] = p_hat
# Update (9.107)
rhsU.fill(0)
rhsU += inner(v, ut_hat) - inner(v, dt*grad(phi_hat))
u_hat = Lu1.solve(rhsU, u=u_hat)
# Update (9.105)
rhsP.fill(0)
rhsP += inner(q, phi_hat) + inner(q, p_hat) - inner(q, div(ut_hat))
p_hat = Lu2.solve(rhsP, u=p_hat, constraints=((0, 0, 0),))
time += dt
# BDF time step
for step in range(2, nsteps+1):
# evaluate the forcing function
F[0] = fexf(X[0], X[1], time+dt)
F[1] = feyf(X[0], X[1], time+dt)
# Solve (9.102)
rhsU.fill(0)
rhsU += -inner(v, grad(p_hat))
rhsU += inner(v, F)
rhsU += inner(v, u_hat*2/dt) - inner(v, u0_hat*0.5/dt)
ut_hat = Lb1.solve(rhsU, u=ut_hat)
# Solve (9.107)
rhsP.fill(0)
rhsP += 1.5/dt*inner(q, div(ut_hat))
phi_hat = Lb2.solve(rhsP, u=phi_hat, constraints=((0, 0, 0),))
# update for next time step
u0_hat[:] = u_hat; p0_hat[:] = p_hat
# Update (9.107, 9.105)
rhsU.fill(0)
rhsU += inner(v, ut_hat) - inner(v, ((2.*dt/3))*grad(phi_hat))
u_hat = Lu1.solve(rhsU, u=u_hat)
rhsP.fill(0)
rhsP += inner(q, phi_hat) + inner(q, p_hat) - inner(q, div(ut_hat))
p_hat = Lu2.solve(rhsP, u=p_hat, constraints=((0, 0, 0),))
# increment time
time += dt
# Transform the solution to physical space
UP = [*U_hat.backward(U), P_hat.backward(P)]
# compute error
Ue = Array(Ws, buffer=(uex.subs(t, tf), uey.subs(t, tf)))
Pe = Array(Ps, buffer=(pe.subs(t, tf)))
UPe = [*Ue, Pe]
l2_error = list(map(np.linalg.norm, [u-ue for u, ue in zip(UP, UPe)]))
return l2_error
if __name__ == "__main__":
N = 2**np.arange(0, 4)
E = np.zeros((3, len(N)))
for (j, n) in enumerate(N):
E[:, j] = main(n)
fig = plt.figure(figsize=(5.69, 4.27))
ax = plt.gca()
marks = ('or', '-g', '-ob')
vars = (r'$u_x$', r'$u_y$', r'$p$')
for i in range(3):
plt.loglog(N, E[i, :], marks[i], label=vars[i])
slope, intercept = np.polyfit(np.log(N[-2:]), np.log(E[i,-2:]), 1)
if i != 1:
annotation.slope_marker((N[-2], E[i, -2]), ("{0:.2f}".format(slope), 1),
ax=ax, poly_kwargs=pa, text_kwargs=ta)
plt.text(N[0], 2e-5, r"$\Delta t=5 \times 10^{-3},\; N=32^2$")
plt.text(N[0], 1e-5, r"Final Time = $5 \times 10^{-2}$")
plt.title(r"Stokes: $2^{nd}$-order Rotational Pressure-Correction")
plt.legend()
plt.autoscale()
plt.ylabel(r'$|Error|_{L^2}$')
plt.xticks(N)
ax.get_xaxis().set_minor_formatter(NullFormatter())
fmt = lambda v: r"$\Delta t/{0}$".format(v) if v != 1 else r"$\Delta t$"
plt.gca().set_xticklabels(list(map(fmt, N)))
#plt.savefig("stokes.pdf", orientation='portrait')
plt.show()
| {
"repo_name": "spectralDNS/shenfun",
"path": "demo/StokesPC.py",
"copies": "1",
"size": "6793",
"license": "bsd-2-clause",
"hash": -5098144695905062000,
"line_mean": 31.6586538462,
"line_max": 84,
"alpha_frac": 0.5644045341,
"autogenerated": false,
"ratio": 2.57701062215478,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36414151562547803,
"avg_score": null,
"num_lines": null
} |
"""2nd pass at adding IRONMAN 3 digit identifiers
Revision ID: 4ea2b79957f3
Revises: d561999e9b42
Create Date: 2019-04-30 12:17:03.382443
"""
import re
from alembic import op
from sqlalchemy.orm.session import Session
from portal.models.identifier import Identifier
from portal.models.organization import OrganizationIdentifier
from portal.models.user import UserIdentifier
from portal.system_uri import TRUENTH_EXTERNAL_STUDY_SYSTEM
# revision identifiers, used by Alembic.
revision = '4ea2b79957f3'
down_revision = 'd561999e9b42'
org_pattern = re.compile(r'^146-(\d\d)$')
study_pattern = re.compile(r'^170-(\d\d)-(\d\d\d)$')
def upgrade():
session = Session(bind=op.get_bind(), expire_on_commit=False)
# All IRONMAN orgs need a 3 digit version
IRONMAN_system = 'http://pcctc.org/'
ironman_org_ids = [(id.id, id._value) for id in Identifier.query.filter(
Identifier.system == IRONMAN_system).with_entities(
Identifier.id, Identifier._value)]
existing_values = [id[1] for id in ironman_org_ids]
replacements = {}
for io_id, io_value in ironman_org_ids:
found = org_pattern.match(io_value)
if found:
# avoid probs if run again - don't add if already present
needed = '146-0{}'.format(found.group(1))
replacements[found.group(1)] = '0{}'.format(found.group(1))
if needed not in existing_values:
needed_i = Identifier(
use='secondary', system=IRONMAN_system, _value=needed)
else:
needed_i = Identifier.query.filter(
Identifier.system == IRONMAN_system).filter(
Identifier._value == needed).one()
# add a 3 digit identifier and link with same org
oi = OrganizationIdentifier.query.filter(
OrganizationIdentifier.identifier_id == io_id).one()
needed_oi = OrganizationIdentifier.query.filter(
OrganizationIdentifier.organization_id ==
oi.organization_id).filter(
OrganizationIdentifier.identifier == needed_i).first()
if not needed_oi:
needed_i = session.merge(needed_i)
needed_oi = OrganizationIdentifier(
organization_id=oi.organization_id,
identifier=needed_i)
session.add(needed_oi)
# All IRONMAN users with a 2 digit ID referencing one of the replaced
# values needs a 3 digit version
ironman_study_ids = Identifier.query.filter(
Identifier.system == TRUENTH_EXTERNAL_STUDY_SYSTEM).filter(
Identifier._value.like('170-%')).with_entities(
Identifier.id, Identifier._value)
for iid, ival in ironman_study_ids:
found = study_pattern.match(ival)
if found:
org_segment = found.group(1)
patient_segment = found.group(2)
# only add if also one of the new org ids
if org_segment not in replacements:
continue
needed = '170-{}-{}'.format(
replacements[org_segment], patient_segment)
# add a 3 digit identifier and link with same user(s),
# if not already present
uis = UserIdentifier.query.filter(
UserIdentifier.identifier_id == iid)
needed_i = Identifier.query.filter(
Identifier.system == TRUENTH_EXTERNAL_STUDY_SYSTEM).filter(
Identifier._value == needed).first()
if not needed_i:
needed_i = Identifier(
use='secondary', system=TRUENTH_EXTERNAL_STUDY_SYSTEM,
_value=needed)
for ui in uis:
needed_ui = UserIdentifier.query.filter(
UserIdentifier.user_id == ui.user_id).filter(
UserIdentifier.identifier == needed_i).first()
if not needed_ui:
needed_ui = UserIdentifier(
user_id=ui.user_id, identifier=needed_i)
session.add(needed_ui)
session.commit()
def downgrade():
pass
| {
"repo_name": "uwcirg/true_nth_usa_portal",
"path": "portal/migrations/versions/4ea2b79957f3_.py",
"copies": "1",
"size": "4167",
"license": "bsd-3-clause",
"hash": -1318155700847623400,
"line_mean": 36.5405405405,
"line_max": 76,
"alpha_frac": 0.5987520998,
"autogenerated": false,
"ratio": 4.026086956521739,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5124839056321739,
"avg_score": null,
"num_lines": null
} |
# 2nd step of the process - constructing the index
import os, sys, math;
import pickle, glob, re;
from operator import itemgetter;
from os.path import join;
# read the tf and idf objects
# tff contains the tf dictionaries for each file as index
tfpck=open("tfpickle.pkl","rb");
tff=pickle.load(tfpck);
tfpck.close();
# idfs contains the idf dictionary for each unique word in the corpus
idfpck=open("idfpickle.pkl","rb");
idfs=pickle.load(idfpck);
idfpck.close();
tfidf={};
for i in tff:
r={};
for j in tff[i]:
r[j]=tff[i][j]*idfs[j];
tfidf[i]=r;
def clnSentence(x):
# keep only characters 32 to 126 after splitting
x2=[i for i in x if ord(i)>=32 and ord(i)<=126];
return "".join(x2).strip();
def getSentences(x): # split the story into constituent sentences
r=[];
f=open(x,"r");
fc=f.read();
fc1=fc.split("\n");
for i in fc1: r+=[clnSentence(i)];
return r;
x=glob.glob("*.txt");
x=[i for i in x if len(i)==36];
# we save stories to summarize with filenames composed of their MD5 hash
# makes it easy to locate documents to be processed, and explains "36" in check above.
g=open("summaries.txt","w");
dcts={};
for i in x:
g.write("original story follows: \n");
p=getSentences(i);
for j in p: g.write(j);
g.write("\n\n summary follows: \n");
svals=[];
# for each sentence,
# break into words,
# then compute total tfidf for each sentence...
# divide each sentence tfidf by the number of words in the sentence
# create a dictionary with the sentence as key and average tfidf as value.
# sort sentences on value, pick top 30% of sentences in summary
for j in p:
wrds=j.split(" ");
line_tfidf=sum([tff[i][k] for k in wrds if k in tff[i]]);
avg_line_tfidf=line_tfidf/float(len(wrds));
svals+=[(j,avg_line_tfidf)];
svals=sorted(svals, key=itemgetter(1), reverse=True); # descending sort
svals=[j[0] for j in svals[:len(p)/3]][:-1];
g.write(p[0]+" "); # always include first sentence for context
for j in p[1:]:
if j in svals: g.write(j+" ");
g.write("\n\n\n");
g.close();
| {
"repo_name": "gnokem/blog-code",
"path": "tfidf_summarizer.py",
"copies": "1",
"size": "2109",
"license": "mit",
"hash": -8556239723070361000,
"line_mean": 26.12,
"line_max": 86,
"alpha_frac": 0.6448553817,
"autogenerated": false,
"ratio": 2.81951871657754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.396437409827754,
"avg_score": null,
"num_lines": null
} |
"""2nd update that adds an index on the user_id column
Revision ID: 3414dfab0e91
Revises: 52feb4cd3e65
Create Date: 2015-10-09 12:16:31.095629
"""
# revision identifiers, used by Alembic.
revision = '3414dfab0e91'
down_revision = '52feb4cd3e65'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('scores', 'created_at',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
op.drop_index('ix_scores_user_id', table_name='scores')
op.create_index(op.f('ix_scores_user_id'), 'scores', ['user_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_scores_user_id'), table_name='scores')
op.create_index('ix_scores_user_id', 'scores', ['user_id'], unique=True)
op.alter_column('scores', 'created_at',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
### end Alembic commands ###
| {
"repo_name": "Rdbaker/GameCenter",
"path": "migrations/versions/3414dfab0e91_.py",
"copies": "2",
"size": "1110",
"license": "mit",
"hash": 4268377765724918300,
"line_mean": 31.6470588235,
"line_max": 83,
"alpha_frac": 0.6621621622,
"autogenerated": false,
"ratio": 3.3944954128440368,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5056657575044037,
"avg_score": null,
"num_lines": null
} |
# 2.
# Используя расщепление матрицы Стилтьеса, отвечающее её неполной факторизации по методу ILU(k),
# реализовать стационарный итерационный процесс и исследовать скорость его сходимости
#
# стр. 65 - Основные стационарные итерационные процессы
# стр. 75 - ускорение сходимости стационарных итерационных процессов
#
# http://mathworld.wolfram.com/StationaryIterativeMethod.html
# Stationary iterative methods are methods for solving a linear system of equations Ax=b
#
import numpy as np
ITERATION_LIMIT = 1000
# initialize the matrix
A = np.array([[10., -1., 2., 0.],
[-1., 11., -1., 3.],
[2., -1., 10., -1.],
[0., 3., -1., 8.]])
# initialize the RHS vector
b = np.array([6., 25., -11., 15.])
def jacobi_method(A: np.ndarray, b: np.ndarray):
x = np.zeros_like(b)
for it_count in range(ITERATION_LIMIT):
# print("Current solution:", x)
x_new = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], x[:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
x_new[i] = (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, x_new, atol=1e-8):
break
x = x_new
return x
def gauss_seidel(A: np.ndarray, b: np.ndarray):
x = np.zeros_like(b)
for it_count in range(ITERATION_LIMIT):
# print("Current solution:", x)
x_new = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], x_new[:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
x_new[i] = (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, x_new, rtol=1e-8):
break
x = x_new
return x
def sor_method(A: np.ndarray, b: np.ndarray, w=1.0):
x = np.zeros_like(b)
for it_count in range(ITERATION_LIMIT):
# print("Current solution:", x)
x_new = np.zeros_like(x)
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], x_new[:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
x_new[i] = (1.0 - w)*x[i] + w * (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, x_new, rtol=1e-8):
break
x = x_new
return x
def ssor_method(A: np.ndarray, b: np.ndarray, w=1.0):
x = np.zeros_like(b)
xk = np.zeros(shape=(ITERATION_LIMIT, x.shape[0]), dtype=np.float)
for it_count in range(ITERATION_LIMIT):
# print("Current solution:", x)
k = it_count
xk[k] = np.zeros_like(x)
# вычисляем вектор x^{k/2} на основе вектора x^{k-1} по методу SOR в прямом порядке
# вычисляем вектор x^{k} на основе вектора x^{k/2} по методу SOR в обратном порядке
for i in range(A.shape[0]):
s1 = np.dot(A[i, :i], xk[k-1][:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
xk[k/2][i] = (1.0 - w)*x[i] + w * (b[i] - s1 - s2) / A[i, i]
#
for i in reversed(range(A.shape[0])):
s1 = np.dot(A[i, :i], xk[k/2][:i])
s2 = np.dot(A[i, i + 1:], x[i + 1:])
xk[k][i] = (1.0 - w)*x[i] + w * (b[i] - s1 - s2) / A[i, i]
if np.allclose(x, xk[k], rtol=1e-8):
break
x = xk[k]
return x
x = sor_method(A, b)
print("Final solution:")
print(x)
error = np.dot(A, x) - b
print("Error:")
print(error) | {
"repo_name": "maxmalysh/congenial-octo-adventure",
"path": "mod2/task2.py",
"copies": "1",
"size": "3649",
"license": "unlicense",
"hash": 5055167547906019000,
"line_mean": 26.9237288136,
"line_max": 97,
"alpha_frac": 0.5154826958,
"autogenerated": false,
"ratio": 2.2122229684351913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8216070432187274,
"avg_score": 0.0023270464095835044,
"num_lines": 118
} |
#2 okay lets do this
import sys
sys.path.append('../.')
import pandas as pd
import numpy as np
import itertools as it
import path_planner as plan
pathName = '../../data-se3-path-planner/cherylData/'
pathName1 = '../../data-se3-path-planner/yearData/batch2019/'
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
inclinations = ['55', '60', '65', '70', '75', '80', '85', '90']
inclinations = inclinations[::-1] # reverses the inclinations
filesList =[[pathName+month+inclination+'.txt' for month in months ] for inclination in inclinations]
# print( filesList)
# filesList = ['55deg.txt', '60deg.txt','65deg.txt', '70deg.txt', '75deg.txt','80deg.txt','85deg.txt']
# filesList = [pathName+files for files in filesList]
# filesList = ['80deg.txt']
# anglesList = [55, 60, 65, 70, 75, 80, 85]
cma = []
def createCma(files):
# print("FILES", files)
df = pd.read_csv(files)
# print(df.tail())
Xgse = df['DefaultSC.gse.X']
Ygse = df['DefaultSC.gse.Y']
Zgse = df['DefaultSC.gse.Z']
# i am DEFINITELY going to end up with a dimension problem here
t = df['DefaultSC.A1ModJulian']
# print(t.tail())
t = t.tolist()
t = [t_index + 29999.5 for t_index in t]
# print("hopefully something close to the real time", t)
# print("type of t", type(t.tolist()))
# refer to tsyganenko for these coordinate systems
# the output here is in radians
# angle = np.arctan2(Xgse,Zgse)
# theta = np.arctan2(Ygse,Xgse)
angle = np.arctan2(Zgse,Xgse)
theta = np.arctan2(Xgse,Ygse)
print("type angle,", type(angle))
# but sometimes i print it in degrees
# print("angle is" , angle * 180 / np.pi)
# print("theta is", theta)
# make it into an array for iteration (probably a faster way)
# print(angle)
# print(len(angle))
count = 0
region = []
# for x,x1 in zip(angle, angle[1:]):
# eventually this has to be modified so that the unh professors script
# will alter the dipole angle
# These numbers are from the tsyganenko script that I wrote a while back
# if x<0.2151 or x> 0.2849:
# if 0.2151<=x1<=0.2849:
# count+=1
# angle = angle[:20]
# lets get this boundary crossing thing right
# Okay I think I did it
lowBound,highBound,lowLateralBound,highLateralBound = plan.GridGraph().getGoalRegion(t)
# print("type of llb", lowLateralBound)
# print("type of lb", lowBound)
# print("type of hb", highBound)
# print("type of llb", lowLateralBound)
# print("type of hlb", highLateralBound)
# lowBound = np.asarray(lowBound).tolist()
# highBound = np.asarray(highBound).tolist()
# lowLateralBound = np.asarray(lowLateralBound).tolist()
# highLateralBound = np.asarray(highLateralBound).tolist()
# print("lowbound hadhasdf", len(lowBound))
# print("lowbound", lowBound)
# print("highBound", highBound)
# print("lowLateralBound", lowLateralBound)
# print("highLateralbound", highLateralBound)
# lowBound = 0.2151/2
# highBound = 0.2849/2
# lateralBound = 5.0/2
# implement the tsyganenko function and dipole tilt for dynamic changing
# of the cusp location
# for x,y,lb,ub,llb,hlb in zip(angle, theta, lowBound, highBound, lowLateralBound, highLateralBound):
for x,y,lb,hb,llb,hlb in zip(angle,theta,lowBound,highBound,lowLateralBound,highLateralBound):
# the biggest thing is a modification of these thresholds
# if lowBound<=x<=highBound and lowLateralBound<=y<=highLateralBound:
if lb<=x<=hb and llb<=y<=hlb:
# if lowLateralBound<=y<=highLateralBound:
region.append(1)
else:
region.append(0)
for x,x1 in zip(region, region[1:]):
if x==0 and x1 == 1:
count+=1
else:
pass
# print("x", angle)
# print("x1", angle[1:])
# print("count",count)
# the main problem is with the dimensions of the cma variable
# so how do i get cma to have the same dimensions as filesList?
cma.append([count])
# print("cma", cma)
# print("region",region)
# print("region", region[:100])
return count
cma2 =[]
# the fact that you can call a function in a list comprehension is the number one reason
# why i'm going to stick with python
cma2 =[[createCma(pathName1+month+inclination+'.txt') for month in months ] for inclination in inclinations]
# cma2 = [[createCma(pathName1+'test4.txt')]]
print("cma2", cma2)
if __name__ == "__main__":
a = 3
from pylab import *
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.7),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 0.5, 1.0))}
my_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap',cdict,256)
pcolor(cma2,cmap=my_cmap)
colorbar()
plt.title('Cusp Crossings')
plt.xlabel('Start Month')
plt.ylabel('55+5y deg inclination')
plt.show()
| {
"repo_name": "fsbr/se3-path-planner",
"path": "modularPlanner/cuspAnalyze.py",
"copies": "2",
"size": "5214",
"license": "mit",
"hash": 6572577738920635000,
"line_mean": 34.9586206897,
"line_max": 110,
"alpha_frac": 0.6058688147,
"autogenerated": false,
"ratio": 3.0598591549295775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4665727969629577,
"avg_score": null,
"num_lines": null
} |
## 2. Opening Files ##
a = open("test.txt", "r")
print(a)
f = open("crime_rates.csv", "r")
## 3. Reading In Files ##
f = open("crime_rates.csv", "r")
data = f.read()
## 4. Splitting ##
# We can split a string into a list.
sample = "john,plastic,joe"
split_list = sample.split(",")
print(split_list)
# Here's another example.
string_two = "How much wood\ncan a woodchuck chuck\nif a woodchuck\ncould chuck wood?"
split_string_two = string_two.split('\n')
print(split_string_two)
# Code from previous cells
f = open('crime_rates.csv', 'r')
data = f.read()
rows = data.split('\n')
## 6. Practice - Loops ##
ten_rows = rows[0:10]
for i in ten_rows: print(i)
## 7. List of Lists ##
three_rows = ["Albuquerque,749", "Anaheim,371", "Anchorage,828"]
final_list = []
for row in three_rows:
split_list = row.split(',')
final_list.append(split_list)
print(final_list)
print(final_list[0])
print(final_list[1])
print(final_list[2])
## 8. Practice - Splitting Elements in a List ##
f = open('crime_rates.csv', 'r')
data = f.read()
rows = data.split('\n')
print(rows[0:5])
final_data = []
for item in rows:
i = item.split(',')
final_data.append(i)
print(final_data)
## 9. Accessing Elements in a List of Lists: The Manual Way ##
print(five_elements)
cities_list = []
for list in five_elements:
cities_list.append(list[0])
## 10. Looping Through a List of Lists ##
crime_rates = []
cities_list = []
for row in five_elements:
# row is a list variable, not a string.
crime_rate = row[1]
# crime_rate is a string, the crime rate of the city.
crime_rates.append(crime_rate)
for row in final_data:
cities_list.append(row[0])
## 11. Challenge ##
f = open('crime_rates.csv', 'r')
data = f.read()
rows = data.split('\n')
print(rows[0:5])
int_crime_rates = []
for row in rows:
row = row.split(',')
int_crime_rates.append(int(row[1])) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Python Programming Beginner/Files and Loops-2.py",
"copies": "1",
"size": "1881",
"license": "mit",
"hash": -4257560896612518000,
"line_mean": 20.8837209302,
"line_max": 86,
"alpha_frac": 0.6422115896,
"autogenerated": false,
"ratio": 2.6418539325842696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37840655221842695,
"avg_score": null,
"num_lines": null
} |
''' 2-orbits_computed.py
=========================
AIM: Verify which orbits were actually computed by the pipeline
INPUT: files: - all flux_*.dat files in <orbit_id>_flux/
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_misc/ : one file 'orbits.dat' containing the list
CMD: python 2-orbits_computed.py
ISSUES: <none known>
REQUIRES:- standard python libraries, specific libraries in resources/
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/ --> figures
* <orbit_id>_misc/ --> storages of data
* all_figures/ --> comparison figures
REMARKS: <none>
'''
###########################################################################
### INCLUDES
import numpy as np
import os.path
from resources.routines import *
from resources.TimeStepping import *
###########################################################################
### PARAMETERS
# Orbit id same as in 0-*.py
orbit_id = '6am_700_5_conf4e'
# First orbit in data set
orbit_ini = 1
# Last orbit to look for
orbit_end = minute2orbit(1440*365+1,orbit_id)
# File name for the output data file
data_file = 'orbits.dat'
###########################################################################
### INITIALISATION
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
predictor = int((orbit_end - orbit_ini) / 10. * 4.)
orbits = np.zeros(predictor*2).reshape(predictor, 2)
kk = 0
orbit_old = 0
###########################################################################
### Look for computed orbits
for orbit in range(orbit_ini, orbit_end+1):
# lookup the start and end time for the orbit
t_ini, t_end, a_ini, a_end = orbit2times(orbit,orbit_id)
# check that minute 0 and 60 exists (they should!)
fname = '%sflux_%d.dat' % (folder_flux, a_ini)
if not os.path.isfile(fname): continue
fname = '%sflux_%d.dat' % (folder_flux, a_ini+60)
if not os.path.isfile(fname): continue
if kk > 0:
step = orbit-orbit_old
else:
step = 1
try:
orbits[kk,0] = orbit
orbits[kk,1] = step
except IndexError:
orbits = np.vstack([orbits, [orbit, step]])
orbit_old = orbit
print 'Orbit %4d was computed for orbit ID %s, step %d' % (orbit, orbit_id, step)
kk += 1
# Remove extra line in the array
orbits = orbits[orbits[:,1]>0]
np.savetxt(folder_misc+data_file, orbits, fmt='%d')
| {
"repo_name": "kuntzer/SALSA-public",
"path": "2_orbits_computed.py",
"copies": "1",
"size": "2376",
"license": "bsd-3-clause",
"hash": 3813659845101292000,
"line_mean": 25.1098901099,
"line_max": 82,
"alpha_frac": 0.5913299663,
"autogenerated": false,
"ratio": 3.147019867549669,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42383498338496683,
"avg_score": null,
"num_lines": null
} |
## 2. Organizing our code ##
# Define the Trial class here
class Trial(object):
def __init__(self, datarow):
self.efficiency = float(datarow[0])
self.individual = int(datarow[1])
self.chopstick_length = int(datarow[2])
first_trial = Trial(chopsticks[0])
## 3. The Chopstick class ##
class Trial(object):
def __init__(self, datarow):
self.efficiency = float(datarow[0])
self.individual = int(datarow[1])
self.chopstick_length = int(datarow[2])
first_trial = Trial(chopsticks[0])
# Define the Chopstick class here
class Chopstick(object):
def __init__(self,length):
self.length = length
mini_chopstick = Chopstick(100)
## 4. Storing the trials ##
class Trial(object):
def __init__(self, datarow):
self.efficiency = float(datarow[0])
self.individual = int(datarow[1])
self.chopstick_length = int(datarow[2])
class Chopstick(object):
def __init__(self, length):
self.length = length
# Start our trial list empty
self.trials = []
# Now, fill our list with relevant trials
for item in chopsticks:
if int(item[2]) == self.length:
self.trials.append(Trial(item))
medium_chopstick = Chopstick(240)
## 5. Average Efficiency ##
import math
class Trial(object):
def __init__(self, datarow):
self.efficiency = float(datarow[0])
self.individual = int(datarow[1])
self.chopstick_length = int(datarow[2])
class Chopstick(object):
def __init__(self, length):
self.length = length
self.trials = []
for row in chopsticks:
if int(row[2]) == self.length:
self.trials.append(Trial(row))
def num_trials(self):
return len(self.trials)
def avg_efficiency(self):
return math.fsum([row.efficiency for row in self.trials]) / self.num_trials()
avg_eff_210 = Chopstick(210).avg_efficiency()
## 8. Bad Data ##
class Trial(object):
def __init__(self, datarow):
try:
self.efficiency = float(datarow[0])
self.individual = int(datarow[1])
self.chopstick_length = int(datarow[2])
except ValueError:
self.efficiency = -1.0
self.individual = -1
self.chopstick_length = -1
bad_trial = Trial(chopsticks[-1])
## 9. Bad Data - Part 2 ##
class Trial(object):
def __init__(self, datarow):
try:
self.efficiency = float(datarow[0])
self.individual = int(datarow[1])
self.chopstick_length = int(datarow[2])
except:
self.efficiency = -1
self.individual = -1
self.chopstick_length = -1
class Chopstick(object):
def __init__(self, length):
self.length = length
self.trials = []
for row in chopsticks:
if int(row[2]) == self.length:
trial = Trial(row)
# Verify that the data is good
if trial.efficiency != -1 and trial.individual != -1 and trial.chopstick_length != -1:
# Add the trial to trials if it is good
self.trials.append(trial)
def num_trials(self):
return len(self.trials)
def avg_efficiency(self):
efficiency_sum = 0
for trial in self.trials:
efficiency_sum += trial.efficiency
return efficiency_sum / self.num_trials()
bad_chopstick = Chopstick(400)
## 10. Division By Zero ##
class Trial(object):
def __init__(self, datarow):
try:
self.efficiency = float(datarow[0])
self.individual = int(datarow[1])
self.chopstick_length = int(datarow[2])
except:
self.efficiency = -1
self.individual = -1
self.chopstick_length = -1
class Chopstick(object):
def __init__(self, length):
self.length = length
self.trials = []
for row in chopsticks:
if int(row[2]) == self.length:
trial = Trial(row)
if trial.individual >= 0:
self.trials.append(trial)
def num_trials(self):
return len(self.trials)
def avg_efficiency(self):
efficiency_sum = 0
for trial in self.trials:
efficiency_sum += trial.efficiency
try:
return efficiency_sum / self.num_trials()
except ZeroDivisionError:
return -1.0
bad_average = Chopstick(100).avg_efficiency()
## 11. Most Efficient Chopsticks ##
class Trial(object):
def __init__(self, datarow):
try:
self.efficiency = float(datarow[0])
self.individual = int(datarow[1])
self.chopstick_length = int(datarow[2])
except:
self.efficiency = -1
self.individual = -1
self.chopstick_length = -1
class Chopstick(object):
def __init__(self, length):
self.length = length
self.trials = []
for row in chopsticks:
if int(row[2]) == self.length:
trial = Trial(row)
if trial.individual >= 0:
self.trials.append(trial)
def num_trials(self):
return len(self.trials)
def avg_efficiency(self):
efficiency_sum = 0
for trial in self.trials:
efficiency_sum += trial.efficiency
try:
return efficiency_sum / self.num_trials()
except ZeroDivisionError:
return -1.0
chopstick_lengths = [180, 195, 210, 225, 240, 255, 270, 285, 300, 315, 330]
chopstick_list = [Chopstick(length) for length in chopstick_lengths]
## 12. Most Efficient Chopsticks - Part 2 ##
class Trial(object):
def __init__(self, datarow):
try:
self.efficiency = float(datarow[0])
self.individual = int(datarow[1])
self.chopstick_length = int(datarow[2])
except:
self.efficiency = -1
self.individual = -1
self.chopstick_length = -1
class Chopstick(object):
def __init__(self, length):
self.length = length
self.trials = []
for row in chopsticks:
if int(row[2]) == self.length:
trial = Trial(row)
if trial.individual >= 0:
self.trials.append(trial)
def num_trials(self):
return len(self.trials)
def avg_efficiency(self):
efficiency_sum = 0
for trial in self.trials:
efficiency_sum += trial.efficiency
try:
return efficiency_sum / self.num_trials()
except ZeroDivisionError:
return -1.0
def __lt__(self, other):
return self.avg_efficiency() < other.avg_efficiency()
def __gt__(self, other):
return self.avg_efficiency() > other.avg_efficiency()
def __le__(self, other):
return self.avg_efficiency() <= other.avg_efficiency()
def __ge__(self, other):
return self.avg_efficiency() >= other.avg_efficiency()
def __eq__(self, other):
return self.avg_efficiency() == other.avg_efficiency()
def __ne__(self, other):
return self.avg_efficiency() != other.avg_efficiency()
chopstick_lengths = [180, 195, 210, 225, 240, 255, 270, 285, 300, 315, 330]
chopstick_list = [Chopstick(length) for length in chopstick_lengths]
most_efficient = max(chopstick_list) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Python Programming Advanced/Exception handling-110.py",
"copies": "1",
"size": "7474",
"license": "mit",
"hash": -7034552558635030000,
"line_mean": 30.0165975104,
"line_max": 102,
"alpha_frac": 0.5663633931,
"autogenerated": false,
"ratio": 3.555661274976213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4622024668076213,
"avg_score": null,
"num_lines": null
} |
## 2. Our dataset ##
import pandas
# Set index_col to False to avoid pandas thinking that the first column is row indexes (it's age).
income = pandas.read_csv("income.csv", index_col=False)
print(income.head(5))
## 3. Converting categorical variables ##
# Convert a single column from text categories into numbers.
col = pandas.Categorical.from_array(income["workclass"])
income["workclass"] = col.codes
print(income["workclass"].head(5))
cols = ['education', 'marital_status', 'occupation', 'relationship', 'race', 'sex', 'native_country','high_income']
for item in cols:
cat = pandas.Categorical.from_array(income[item])
income[item]=cat.codes
## 5. Performing a split ##
# Enter your code here.
private_incomes = income[income['workclass']==4]
public_incomes = income[income['workclass']!=4]
## 8. Entropy ##
import math
# We'll do the same calculation we did above, but in Python.
# Passing 2 as the second parameter to math.log will take a base 2 log.
entropy = -(2/5 * math.log(2/5, 2) + 3/5 * math.log(3/5, 2))
print(entropy)
countts = income['high_income'].value_counts()
income_entropy = -(countts[0]/len(income['high_income']) * math.log(countts[0]/len(income['high_income']),2) + countts[1]/len(income['high_income']) * math.log(countts[1]/len(income['high_income']),2))
print(income_entropy)
## 9. Information gain ##
import numpy
def calc_entropy(column):
"""
Calculate entropy given a pandas Series, list, or numpy array.
"""
# Compute the counts of each unique value in the column.
counts = numpy.bincount(column)
# Divide by the total column length to get a probability.
probabilities = counts / len(column)
# Initialize the entropy to 0.
entropy = 0
# Loop through the probabilities, and add each one to the total entropy.
for prob in probabilities:
if prob > 0:
entropy += prob * math.log(prob, 2)
return -entropy
# Verify our function matches our answer from earlier.
entropy = calc_entropy([1,1,0,0,1])
print(entropy)
information_gain = entropy - ((.8 * calc_entropy([1,1,0,0])) + (.2 * calc_entropy([1])))
print(information_gain)
income_entropy = calc_entropy(income["high_income"])
median_age = income["age"].median()
left_split = income[income["age"] <= median_age]
right_split = income[income["age"] > median_age]
age_information_gain = income_entropy - ((left_split.shape[0] / income.shape[0]) * calc_entropy(left_split["high_income"]) + ((right_split.shape[0] / income.shape[0]) * calc_entropy(right_split["high_income"])))
## 10. Finding the best split ##
def calc_information_gain(data, split_name, target_name):
"""
Calculate information gain given a dataset, column to split on, and target.
"""
# Calculate original entropy.
original_entropy = calc_entropy(data[target_name])
# Find the median of the column we're splitting.
column = data[split_name]
median = column.median()
# Make two subsets of the data based on the median.
left_split = data[column <= median]
right_split = data[column > median]
# Loop through the splits, and calculate the subset entropy.
to_subtract = 0
for subset in [left_split, right_split]:
prob = (subset.shape[0] / data.shape[0])
to_subtract += prob * calc_entropy(subset[target_name])
# Return information gain.
return original_entropy - to_subtract
# Verify that our answer is the same as in the last screen.
print(calc_information_gain(income, "age", "high_income"))
columns = ["age", "workclass", "education_num", "marital_status", "occupation", "relationship", "race", "sex", "hours_per_week", "native_country"]
information_gains = []
for item in columns:
info = calc_information_gain(income,item,'high_income')
information_gains.append(info)
highest_gain_index = information_gains.index(max(information_gains))
highest_gain = columns[highest_gain_index] | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Decision Trees/Introduction to Decision Trees-137.py",
"copies": "1",
"size": "3940",
"license": "mit",
"hash": -6466989242877481000,
"line_mean": 34.8272727273,
"line_max": 211,
"alpha_frac": 0.6824873096,
"autogenerated": false,
"ratio": 3.4290687554395127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9507817331666448,
"avg_score": 0.020747746674612735,
"num_lines": 110
} |
"""2.Phase"""
from sympy import *
init_printing()
z, x1, x2, x3, x4, x5, x6, x7 = symbols('z, x1, x2, x3, x4, x5, x6, x7')
B = [x1, x2, x4, x6, x7]
N = [x3, x5]
rows = [Eq(x4, 6 + 3 * x5 - 1 * x3),
Eq(x1, 2 - x5 + 1 * x3),
Eq(x2, 8 + 2 * x5 - 1 * x3),
Eq(x6, 22 - 5 * x5 + 1 * x3),
Eq(x7, 10 + 1 * x5 - 1 * x3)]
ziel = Eq(z, 86 + 5 * x5 + 3 * x3)
# -------------------------------------------------------------------------------
for i in range(10):
# eintretende Variable finden
# auswaehlen nach dem Teknik in der Vorlesung (d.h. var mit grosstem Koeffizeint)
eintretende = None
max_eintretende = -oo
for var, coeff in ziel.rhs.as_coefficients_dict().items():
# 1 is the first coeff i.e. the value of the ziel function
if var != 1 and coeff > 0 and coeff > max_eintretende:
max_eintretende = coeff
eintretende = var
# if no positiv costs => optimal
if eintretende == None:
break
# verlassende Variable finden
verlassende = None
min_wert = +oo
min_row = None
for row in rows:
if row.has(eintretende):
new_row = row
for nbv in N:
if nbv != eintretende:
new_row = new_row.subs(nbv, 0)
wert = solve(new_row.rhs >= 0).as_set().right
if wert < min_wert:
min_wert = wert
min_row = row
verlassende = row.lhs
# die Formlen umsetzen und rows updaten
new_formel = Eq(eintretende, solve(min_row, eintretende)[0])
new_rows = [new_formel]
for row in rows:
if row.lhs != verlassende:
new_rows.append(Eq(row.lhs, row.rhs.subs(eintretende, new_formel.rhs)))
rows = new_rows
# new ziel
ziel = Eq(z, ziel.rhs.subs(eintretende, new_formel.rhs))
pprint(latex(ziel))
# update B, N
B.remove(verlassende); B.append(eintretende)
N.remove(eintretende); N.append(verlassende)
| {
"repo_name": "mazenbesher/simplex",
"path": "sympy_version/specific/blatt4_aufgabe2_iii.py",
"copies": "1",
"size": "2017",
"license": "mit",
"hash": -6708402545066076000,
"line_mean": 30.0307692308,
"line_max": 85,
"alpha_frac": 0.5205751116,
"autogenerated": false,
"ratio": 2.7706043956043955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3791179507204396,
"avg_score": null,
"num_lines": null
} |
## 2. Point Guards ##
# Enter code here.
point_guards = nba[nba["pos"] == "PG"]
## 3. Points Per Game ##
point_guards['ppg'] = point_guards['pts'] / point_guards['g']
# Sanity check, make sure ppg = pts/g
point_guards[['pts', 'g', 'ppg']].head(5)
## 4. Assist Turnover Ratio ##
point_guards = point_guards[point_guards['tov'] != 0]
point_guards["atr"] = point_guards["ast"] / point_guards["tov"]
## 5. Visualizing the Point Guards ##
plt.scatter(point_guards['ppg'], point_guards['atr'], c='y')
plt.title("Point Guards")
plt.xlabel('Points Per Game', fontsize=13)
plt.ylabel('Assist Turnover Ratio', fontsize=13)
plt.show()
## 7. The Algorithm ##
num_clusters = 5
# Use numpy's random function to generate a list, length: num_clusters, of indices
random_initial_points = np.random.choice(point_guards.index, size=num_clusters)
# Use the random indices to create the centroids
centroids = point_guards.loc[random_initial_points]
## 8. Visualize Centroids ##
plt.scatter(point_guards['ppg'], point_guards['atr'], c='yellow')
plt.scatter(centroids['ppg'], centroids['atr'], c='red')
plt.title("Centroids")
plt.xlabel('Points Per Game', fontsize=13)
plt.ylabel('Assist Turnover Ratio', fontsize=13)
plt.show()
## 9. Setup (continued) ##
def centroids_to_dict(centroids):
dictionary = dict()
# iterating counter we use to generate a cluster_id
counter = 0
# iterate a pandas data frame row-wise using .iterrows()
for index, row in centroids.iterrows():
coordinates = [row['ppg'], row['atr']]
dictionary[counter] = coordinates
counter += 1
return dictionary
centroids_dict = centroids_to_dict(centroids)
## 10. Step 1 (Euclidean Distance) ##
import math
def calculate_distance(centroid, player_values):
root_distance = 0
for x in range(0, len(centroid)):
difference = centroid[x] - player_values[x]
squared_difference = difference**2
root_distance += squared_difference
euclid_distance = math.sqrt(root_distance)
return euclid_distance
q = [5, 2]
p = [3,1]
# Sqrt(5) = ~2.24
print(calculate_distance(q, p))
## 11. Step 1 (Continued) ##
# Add the function, `assign_to_cluster`
# This creates the column, `cluster`, by applying assign_to_cluster row-by-row
# Uncomment when ready
def assign_to_cluster(row):
largest = []
for key in centroids_dict.keys():
largest.append(calculate_distance(centroids_dict[key], [row["ppg"],row["atr"]]))
return largest.index(min(largest))
point_guards['cluster'] = point_guards.apply(lambda row: assign_to_cluster(row), axis=1)
## 12. Visualizing Clusters ##
# Visualizing clusters
def visualize_clusters(df, num_clusters):
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
for n in range(num_clusters):
clustered_df = df[df['cluster'] == n]
plt.scatter(clustered_df['ppg'], clustered_df['atr'], c=colors[n-1])
plt.xlabel('Points Per Game', fontsize=13)
plt.ylabel('Assist Turnover Ratio', fontsize=13)
plt.show()
visualize_clusters(point_guards, 5)
## 13. Step 2 ##
def recalculate_centroids(df):
new_centroids_dict = dict()
# 0..1...2...3...4
for cluster_id in range(0, num_clusters):
temp_df = df[df["cluster"] == cluster_id]
new_centroids_dict[cluster_id] = [np.mean(temp_df["ppg"]),np.mean(temp_df["atr"])]
return new_centroids_dict
centroids_dict = recalculate_centroids(point_guards)
## 14. Repeat Step 1 ##
point_guards['cluster'] = point_guards.apply(lambda row: assign_to_cluster(row), axis=1)
visualize_clusters(point_guards, num_clusters)
## 15. Repeat Step 2 and Step 1 ##
centroids_dict = recalculate_centroids(point_guards)
point_guards['cluster'] = point_guards.apply(lambda row: assign_to_cluster(row), axis=1)
visualize_clusters(point_guards, num_clusters)
## 16. Challenges of K-Means ##
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=num_clusters)
kmeans.fit(point_guards[['ppg', 'atr']])
point_guards['cluster'] = kmeans.labels_
visualize_clusters(point_guards, num_clusters) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Machine learning Intermediate/K-means clustering-95.py",
"copies": "1",
"size": "4063",
"license": "mit",
"hash": 4968914129924903000,
"line_mean": 28.2374100719,
"line_max": 90,
"alpha_frac": 0.6729017967,
"autogenerated": false,
"ratio": 3.0051775147928996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9128740028389579,
"avg_score": 0.009867856620664112,
"num_lines": 139
} |
## 2. Probability of renting bikes ##
import pandas
bikes = pandas.read_csv("bike_rental_day.csv")
# Find the number of days the bikes rented exceeded the threshold.
days_over_threshold = bikes[bikes["cnt"] > 2000].shape[0]
# Find the total number of days we have data for.
total_days = bikes.shape[0]
# Get the probability that more than 2000 bikes were rented for any given day.
probability_over_2000 = days_over_threshold / total_days
print(probability_over_2000)
probability_over_4000 =bikes[bikes["cnt"] > 4000].shape[0] / total_days
## 4. Calculating probabilities ##
# Enter your code here.
coin_1_prob = 0.5*3*.5*.5
## 6. Calculating the number of combinations ##
sunny_1_combinations = None
# There are 5 combinations in which one day can be sunny.
# SNNNN
# NSNNN
# NNSNN
# NNNSN
# NNNNS
sunny_1_combinations = 5
## 8. Finding the number of combinations ##
import math
def find_outcome_combinations(N, k):
# Calculate the numerator of our formula.
numerator = math.factorial(N)
# Calculate the denominator.
denominator = math.factorial(k) * math.factorial(N - k)
# Divide them to get the final value.
return numerator / denominator
combinations_7 = find_outcome_combinations(10, 7)
combinations_8 = find_outcome_combinations(10, 8)
combinations_9 = find_outcome_combinations(10, 9)
## 10. Calculating the probability of one combination ##
prob_combination_3 = None
prob_combination_3 = (0.7 ** 3) * (0.3 ** 2)
## 12. Function to calculate the probability of a single combination ##
p = .6
q = .4
def find_combination_probability(N, k, p, q):
# Take p to the power k, and get the first term.
term_1 = p ** k
# Take q to the power N-k, and get the second term.
term_2 = q ** (N-k)
# Multiply the terms out.
return term_1 * term_2
prob_8 = find_outcome_combinations(10, 8) * find_combination_probability(10, 8, p, q)
prob_9 = find_outcome_combinations(10, 9) * find_combination_probability(10, 9, p, q)
prob_10 = find_outcome_combinations(10, 10) * find_combination_probability(10, 10, p, q) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Probability Statistics Intermediate/Calculating probabilities-134.py",
"copies": "1",
"size": "2066",
"license": "mit",
"hash": 2333814900466093600,
"line_mean": 28.9565217391,
"line_max": 88,
"alpha_frac": 0.701839303,
"autogenerated": false,
"ratio": 3.069836552748886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9241802224338951,
"avg_score": 0.005974726281986772,
"num_lines": 69
} |
2#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Administrator
#
# Created: 08/10/2011
# Copyright: (c) Administrator 2011
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
class Gol02:
def __init__(self):
self.numberOfNeighbors = 0
self.isAlive = False
self.board = []
pass
def evolve(self):
return
def setAlive(self):
self.isAlive = True
def isLiving(self):
result = ( self.numberOfNeighbors == 3 ) or \
(self.isAlive and self.numberOfNeighbors == 2)
return result
def addNeigbors(self, numberOfNeighbors):
self.numberOfNeighbors = numberOfNeighbors
return
def appendNeigbors(neighbor):
self.board.append(neighbor)
self.numberOfNeighbors +=1
if __name__ == '__main__':
pass | {
"repo_name": "hemmerling/codingdojo",
"path": "src/game_of_life/python_coderetreat_berlin_2014-09/python_legacycrberlin02/gol02.py",
"copies": "1",
"size": "1049",
"license": "apache-2.0",
"hash": 3356558385341113300,
"line_mean": 24.275,
"line_max": 81,
"alpha_frac": 0.4671115348,
"autogenerated": false,
"ratio": 4.444915254237288,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5412026789037289,
"avg_score": null,
"num_lines": null
} |
## 2. Sets ##
#legislators = list(csv.reader(open('legislators.csv','r')))
gender = []
for item in legislators:
gender.append(item[3])
gender = set(gender)
print(gender)
## 3. Exploring the Dataset ##
party = []
for item in legislators:
party.append(item[6])
party = set(party)
print(party)
print(legislators)
## 4. Missing Values ##
for item in legislators:
if item[3]=='':
item[3]= 'M'
## 5. Parsing Birth Years ##
birth_years = []
for item in legislators:
parts = item[2].split("-")
birth_years.append(parts[0])
## 6. Try/except Blocks ##
try:
float("hello")
except Exception:
print("Error converting to float..")
## 7. Exception Instances ##
try:
int('')
except Exception as exc:
print(type(exc))
print(str(exc))
## 8. The Pass Keyword ##
converted_years = []
for item in birth_years:
try:
item = int(item)
except Exception:
pass
converted_years.append(item)
## 9. Convert Birth Years to Integers ##
for item in legislators:
try:
birth_year = int(item[2].split('-')[0])
except Exception:
birth_year = 0
item.append(birth_year)
## 10. Fill in Years Without a Value ##
last_value = 1
for item in legislators:
if item[7]==0:
item[7]=last_value
last_value = item[7] | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Python Programming Intermediate/Error Handling-7.py",
"copies": "1",
"size": "1303",
"license": "mit",
"hash": 4636334197409308000,
"line_mean": 16.8630136986,
"line_max": 60,
"alpha_frac": 0.6162701458,
"autogenerated": false,
"ratio": 3.07311320754717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9149345311419306,
"avg_score": 0.008007608385573033,
"num_lines": 73
} |
## 2. Systems of equations as matrices ##
import numpy as np
# Set the dtype to float to do float math with the numbers.
matrix = np.asarray([
[2, 1, 25],
[3, 2, 40]
], dtype=np.float32)
matrix[0] = matrix[0] * 2
matrix[0] = matrix[0] - matrix[1]
matrix[1] = matrix[1] - (matrix[0] * 3)
matrix[1] /= 2
print(matrix)
## 4. Solving more complex equations ##
import numpy as np
matrix = np.asarray([
[1, 2, 0, 7],
[0, 3, 3, 11],
[1, 2, 2, 11]
], dtype=np.float32)
matrix[2] = matrix[2] - matrix[0]
matrix[2] = matrix[2] / 2
matrix[1] = matrix[1] / 3
matrix[1] = matrix[1] - matrix[2]
matrix[0] = matrix[0] - 2 * matrix[1]
## 5. Echelon form ##
matrix = np.asarray([
[0, 0, 0, 7],
[0, 0, 1, 11],
[1, 2, 2, 11],
[0, 5, 5, 1]
], dtype=np.float32)
# Swap the first and the third rows - first swap
matrix[[0,2]] = matrix[[2,0]]
matrix[[1,3]] = matrix[[3,1]]
matrix[[2,3]] = matrix[[3,2]]
## 6. Reduced row echelon form ##
A = np.asarray([
[0, 2, 1, 5],
[1, 2, 1, 8],
[3, 0, 1, 10],
], dtype=np.float32)
# First, we'll swap the second row with the first to get a non-zero coefficient in the first column
A[[0,1]] = A[[1,0]]
# The leading coefficient is already 1, so there's no need to divide
# Now, we need to make sure that our 1 coefficient is the only coefficient in its column
# We have to subtract three times the first row from the third row
A[2] -= 3 * A[0]
# Now, we move to row 2
# We divide by 2 to get a one as the leading coefficient
A[1] /= 2
# We subtract 2 times the second row from the first to get rid of
# the second column coefficient in the first row
A[0] -= 2 * A[1]
# And we'll add 6 times the second row to the third to eliminate the leading coefficient there
A[2] += 6 * A[1]
# Now, we can move to the third row where the leading coefficient is already 1
# We just need to subtract half of the third from the second
A[1] -= 0.5 * A[2]
# We're finished, and our system is solved!
print(A)
## 7. Inconsistency ##
A = np.asarray([
[10, 5, 20, 60],
[3, 1, 0, 11],
[8, 2, 2, 30],
[0, 4, 5, 13]
], dtype=np.float32)
B = np.asarray([
[5, -1, 3, 14],
[0, 1, 2, 8],
[0, -2, 5, 1],
[0, 0, 6, 6]
], dtype=np.float32)
A_consistent,B_consistent = True,False
## 8. Infinite solutions ##
A = np.asarray([
[2, 4, 8, 20],
[4, 8, 16, 40],
[20, 5, 5, 10]
], dtype=np.float32)
A_infinite = True
B = np.asarray([
[1, 1, 1, 4],
[3, -2, 5, 8],
[8, -4, 5, 10]
], dtype=np.float32)
B_infinite = False | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Linear Algebra/Solving systems of equations with matrices-55.py",
"copies": "1",
"size": "2582",
"license": "mit",
"hash": 6122938107126201000,
"line_mean": 22.0625,
"line_max": 99,
"alpha_frac": 0.5766847405,
"autogenerated": false,
"ratio": 2.6618556701030927,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.3738540410603093,
"avg_score": null,
"num_lines": null
} |
# 2. telnetlib
#
# a. Write a script that connects using telnet to the pynet-rtr1 router.
# Execute the 'show ip int brief' command on the router and return the output.
#
# Try to do this on your own (i.e. do not copy what I did previously). You should be able to do this by using the following items:
#
# telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
# remote_conn.read_until(<string_pattern>, TELNET_TIMEOUT)
# remote_conn.read_very_eager()
# remote_conn.write(<command> + '\n')
# remote_conn.close()
import telnetlib
import socket
import time
ip_addr = "50.76.53.27"
username = 'pyclass'
password = '88newclass'
TELNET_PORT = 23
TELNET_TIMEOUT = 5
try:
remote_conn = telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
except socket.timeout:
print "Unreachable Host"
remote_conn.read_until("sername:", TELNET_TIMEOUT)
remote_conn.write(username + "\n")
remote_conn.read_until("assword:", TELNET_TIMEOUT)
remote_conn.write(password + "\n")
remote_conn.read_very_eager()
remote_conn.write("show ip int brief" + "\n")
time.sleep(1)
output = remote_conn.read_very_eager()
print output
remote_conn.close()
| {
"repo_name": "linkdebian/pynet_course",
"path": "class2/exercise2.py",
"copies": "1",
"size": "1143",
"license": "apache-2.0",
"hash": -7075083346293237000,
"line_mean": 24.4,
"line_max": 130,
"alpha_frac": 0.719160105,
"autogenerated": false,
"ratio": 2.901015228426396,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.41201753334263963,
"avg_score": null,
"num_lines": null
} |
## 2. The Basics of Binary ##
# Let's say b is a binary number. In python, we have to store binary numbers as strings.
# If we try to enter it directly as b = 10, Python will assume it's a base 10 integer.
b = "10"
# Now, we can convert b from a string to a binary number with the int function. We'll need to set the optional second argument, base, to 2 (binary is base two).
print(int(b, 2))
base_10_100 = int("100", 2)
## 3. Binary Addition ##
# a is in base 10 -- because we have 10 possible digits, the highest value we can represent with one digit is 9.
a = 9
# When we want to represent a value one higher, we need to add another digit.
a += 1
# a now has two digits -- we incremented the invisible leading digit, which was 0 and is now 1, and set the last digit back to zero.
print(a)
# When we add 1 to 19, we increment the leading 1 by 1, and then set the last digit to 0, giving us 20.
a = 19
a += 1
# When we add 1 to 99, we increment the last digit by 1, and add 1 to the first digit, but the first digit is now greater than 9, so we have to increment the invisible leading digit.
a = 99
a += 1
# Binary addition works the exact same way, except the highest value any single digit can represent is 1.
b = "1"
# We'll add binary values using a binary_add function that was made just for this exercise.
# It's not extremely important to know how it works right this second.
def binary_add(a, b):
return bin(int(a, 2) + int(b, 2))[2:]
c = binary_add(b, "1")
# We now see that c equals "10", which is exactly what happens in base 10 when we reach the highest possible digit.
print(c)
# c now equals "11"
c = binary_add(c, "1")
print(c)
# c now equals "100"
c = binary_add(c, "1")
print(c)
c = binary_add(c,"10")
print(c)
## 4. Converting Binary Values to Other Bases ##
def binary_add(a, b):
return bin(int(a, 2) + int(b, 2))[2:]
# Start both at 0
a = 0
b = "0"
# Loop 10 times
for i in range(0, 10):
# Add 1 to each
a += 1
b = binary_add(b, "1")
# Check if they are equal
print(int(b, 2) == a)
# The cool thing here is that a and b are always equal if we add the same amount to both.
# This is because base 2 and base 10 are just ways to write numbers.
# Counting 100 apples in base 2 or base 10 will always give us an equivalent result - we just have to convert between them.
# We can represent any number in binary; we just need to use more digits than we would in base 10.
base_10_1001 = int("1001", 2)
## 5. Converting Characters to Binary ##
# We can use the ord() function to get the integer for an ASCII character.
ord('a')
# Then, we use the bin() function to convert to binary.
# The bin function adds "0b" to the beginning of a string to indicate that it contains binary values.
bin(ord('a'))
# ÿ is the "last" ASCII character; it has the highest integer value of any ASCII character.
# This is because 255 is the highest value we can represent with eight binary digits.
ord('ÿ')
# As you can see, we get eight 1's, which shows that this is the highest possible eight-digit value.
bin(ord('ÿ'))
# Why is this? Because a single binary digit is called a bit, and computers store values in sequences of eight bits (i.e., a byte).
# You might be more familiar with kilobytes or megabytes. A kilobyte is 1000 bytes, and a megabyte is 1000 kilobytes.
# There are 256 different ASCII symbols, because the largest amount of storage any single ASCII character can take up is one byte.
binary_w = bin(ord("w"))
binary_bracket = bin(ord("}"))
## 6. Introduction to Unicode ##
# We can initialize Unicode code points (the value for this code point is \u27F6, but you see it as a character here because the Dataquest system is automatically converting it).
code_point = "⟶"
# This particular code point maps to a right arrow character.
print(code_point)
# We can get the base 10 integer value of the code point with the ord function.
print(ord(code_point))
# As you can see, this takes up a lot more than 1 byte.
print(bin(ord(code_point)))
code_point = "မ"
binary_1019 = bin(ord(code_point))
## 7. Strings with Unicode ##
s1 = "café"
# The \u prefix means "the next four digits are a Unicode code point"
# It doesn't change the value at all (the last character in the string below is \u00e9)
s2 = "café"
# These strings are the same, because code points are equal to their corresponding Unicode characters.
# \u00e9 and é are equivalent.
print(s1 == s2)
s3 = "hello မ"
## 8. The Bytes Data Type ##
# We can make a string with some Unicode values
superman = "Clark Kent␦"
print(superman)
# This tells Python to encode the string superman as Unicode using the UTF-8 encoding system
# We end up with a sequence of bytes instead of a string
superman_bytes = "Clark Kent␦".encode("utf-8")
batman = "Bruce Wayne␦"
batman_bytes = batman.encode("utf-8")
## 10. Hexadecimal Conversions ##
# F is the highest single digit in hexadecimal (base 16)
# Its value is 15 in base 10
print(int("F", 16))
# A in base 16 has the value 10 in base 10
print(int("A", 16))
# Just like the earlier binary_add function, this adds two hexadecimal numbers
def hexadecimal_add(a, b):
return hex(int(a, 16) + int(b, 16))[2:]
# When we add 1 to 9 in hexadecimal, it becomes "a"
value = "9"
value = hexadecimal_add(value, "1")
print(value)
hex_ea = hexadecimal_add("2", "ea")
hex_ef = hexadecimal_add("e", "f")
## 11. Hex to Binary ##
# One byte (eight bits) in hexadecimal (the value of the byte below is \xe2)
hex_byte = "â"
# Print the base 10 integer value for the hexadecimal byte
print(ord(hex_byte))
# This gives the exact same value. Remember that \x is just a prefix, and doesn't affect the value.
print(int("e2", 16))
# Convert the base 10 integer to binary
print(bin(ord("â")))
binary_aa = bin(ord("ª"))
binary_ab = bin(ord("\xab"))
## 12. Bytes and Strings ##
hulk_bytes = "Bruce Banner␦".encode("utf-8")
# We can't mix strings and bytes
# For instance, if we try to replace the Unicode ␦ character as a string, it won't work, because that value has been encoded to bytes
try:
hulk_bytes.replace("Banner", "")
except Exception:
print("TypeError with replacement")
# We can create objects of the bytes data type by putting a b in front of the quotation marks in a string
hulk_bytes = b"Bruce Banner"
# Now, instead of mixing strings and bytes, we can use the replace method with bytes objects instead
hulk_bytes.replace(b"Banner", b"")
thor_bytes = b"Thor"
## 13. Decode Bytes to Strings ##
# Make a bytes object with aquaman's secret identity
aquaman_bytes = b"Who knows?"
# Now, we can use the decode method, along with the encoding system (UTF-8) to turn it into a string
aquaman = aquaman_bytes.decode("utf-8")
# We can print the value and type to verify that it's a string
print(aquaman)
print(type(aquaman))
morgan_freeman_bytes = b"Morgan Freeman"
morgan_freeman = morgan_freeman_bytes.decode("utf-8")
## 14. Read in File Data ##
# We can read our data in using csvreader
import csv
# When we open a file, we can specify the system used to encode it (in this case, UTF-8).
f = open("sentences_cia.csv", 'r', encoding="utf-8")
csvreader = csv.reader(f)
sentences_cia = list(csvreader)
# The data consists of two columns
# The first column contains the year, and the second contains a sentence from a CIA report written in that year
# Print the first column of the second row
print(sentences_cia[1][0])
# Print the second column of the second row
print(sentences_cia[1][1])
sentences_ten = sentences_cia[9][1]
## 15. Convert to a dataframe ##
import csv
# Let's read in the legislators data from a few missions ago
f = open("legislators.csv", 'r', encoding="utf-8")
csvreader = csv.reader(f)
legislators = list(csvreader)
# Now, we can import pandas and use the DataFrame class to convert the list of lists to a dataframe.
import pandas as pd
legislators_df = pd.DataFrame(legislators)
# As you can see, the first row contains the headers, which we don't want (because they're not actually data)
print(legislators_df.iloc[0,:])
# To remove the headers, we'll subset the df and pass them in separately
# This code removes the headers from legislators, and instead passes them into the columns argument
# The columns argument specifies column names
legislators_df = pd.DataFrame(legislators[1:], columns=legislators[0])
# We now have the right data in the first row, as well as the proper headers
print(legislators_df.iloc[0,:])
# The sentences_cia data from the last screen is available.
sentences_cia_df = pd.DataFrame(sentences_cia[1:], columns=sentences_cia[0])
## 16. Clean up Sentences ##
# The integer codes for all the characters we want to keep
good_characters = [48, 49, 50, 51, 52, 53, 54, 55, 56, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 32]
sentence_15 = sentences_cia["statement"][14]
# Iterate over the characters in the sentence, and only take those whose integer representations are in good_characters
# This will construct a list of single characters
cleaned_sentence_15_list = [s for s in sentence_15 if ord(s) in good_characters]
# Join the list together, separated by "" (no space), which creates a string again
cleaned_sentence_15 = "".join(cleaned_sentence_15_list)
def clean_statement(row):
statement = row["statement"]
clean_statement_list = [s for s in statement if ord(s) in good_characters]
return "".join(clean_statement_list)
sentences_cia["cleaned_statement"] = sentences_cia.apply(clean_statement, axis=1)
## 17. Tokenize Statements ##
# We can use the .join() method on strings to join lists together.
# The string we use the method on will become the separator -- the character(s) between each string when they are joined..
combined_statements = " ".join(sentences_cia["cleaned_statement"])
statement_tokens = combined_statements.split(" ")
## 18. Filter the Tokens ##
# statement_tokens has been loaded in.
filtered_tokens = [s for s in statement_tokens if len(s) > 4]
## 19. Count the Tokens ##
from collections import Counter
fruits = ["apple", "apple", "banana", "orange", "pear", "orange", "apple", "grape"]
fruit_count = Counter(fruits)
# Our code has counted each of the items in the list, and given them dictionary keys
print(fruit_count)
# filtered_tokens has been loaded in
filtered_token_counts = Counter(filtered_tokens)
## 20. Most Common Tokens ##
from collections import Counter
fruits = ["apple", "apple", "banana", "orange", "pear", "orange", "apple", "grape"]
fruit_count = Counter(fruits)
# We can use the most_common method of a Counter class to get the most common items
# We pass in a number, which is the number of items we want to get
print(fruit_count.most_common(2))
print(fruit_count.most_common(3))
# filtered_token_counts has been loaded in
common_tokens = filtered_token_counts.most_common(3)
## 21. Finding the Most Common Tokens by Year ##
# sentences_cia has been loaded in.
# It already has the cleaned_statement column.
from collections import Counter
def find_most_common_by_year(year, sentences_cia):
data = sentences_cia[sentences_cia["year"] == year]
combined_statement = " ".join(data["cleaned_statement"])
statement_split = combined_statement.split(" ")
counter = Counter([s for s in statement_split if len(s) > 4])
return counter.most_common(2)
common_2000 = find_most_common_by_year("2000", sentences_cia)
common_2002 = find_most_common_by_year("2002", sentences_cia)
common_2013 = find_most_common_by_year("2013", sentences_cia) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Data Structures & Algorithms/Memory and Unicode-13.py",
"copies": "1",
"size": "11671",
"license": "mit",
"hash": 6067693329024267000,
"line_mean": 35.1708074534,
"line_max": 289,
"alpha_frac": 0.7148377125,
"autogenerated": false,
"ratio": 3.2898305084745765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45046682209745764,
"avg_score": null,
"num_lines": null
} |
## 2. The dataset ##
import pandas as pd
votes = pd.read_csv('114_congress.csv')
## 3. Exploring the data ##
print(votes['party'].value_counts())
print(votes.mean())
## 4. Distance between Senators ##
from sklearn.metrics.pairwise import euclidean_distances
print(euclidean_distances(votes.iloc[0,3:].reshape(1, -1), votes.iloc[1,3:].reshape(1, -1)))
distance = euclidean_distances(votes.iloc[0,3:].reshape(1, -1), votes.iloc[2,3:].reshape(1, -1))
## 5. Initial clustering ##
import pandas as pd
from sklearn.cluster import KMeans
kmeans_model = KMeans(n_clusters=2, random_state=1)
senator_distances = kmeans_model.fit_transform(votes.iloc[:,3:])
## 6. Exploring the clusters ##
labels = kmeans_model.labels_
print(pd.crosstab(labels, votes["party"]))
## 7. Exploring Senators in the wrong cluster ##
democratic_outliers = votes[(labels == 1) & (votes["party"] != "D")]
print(democratic_outliers)
## 8. Plotting out the clusters ##
plt.scatter(senator_distances[:,0], senator_distances[:,1], c = labels)
plt.show()
## 9. Finding the most extreme ##
extremism = senator_distances ** 3
votes["extremism"] = extremism.sum(axis=1)
votes.sort_values(by= "extremism" , ascending = False, inplace = True)
votes.head(10) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Machine learning Beginner/Clustering basics-59.py",
"copies": "1",
"size": "1230",
"license": "mit",
"hash": 3761524056747814400,
"line_mean": 25.7608695652,
"line_max": 96,
"alpha_frac": 0.7016260163,
"autogenerated": false,
"ratio": 2.8738317757009346,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.40754577920009344,
"avg_score": null,
"num_lines": null
} |
## 2. The mean as the center ##
# Make a list of values
values = [2, 4, 5, -1, 0, 10, 8, 9]
# Compute the mean of the values
values_mean = sum(values) / len(values)
# Find the difference between each of the values and the mean by subtracting the mean from each value.
differences = [i - values_mean for i in values]
# This equals 0. Try changing the values around and verifying that it equals 0 if you want.
print(sum(differences))
# We can use the median function from numpy to find the median
# The median is the "middle" value in a set of values -- if you sort the values in order, it's the one in the center (or the average of the two in the center if there are an even number of items in the set)
# You'll see that the differences from the median don't always add to 0. You might want to play around with this and think about why that is.
from numpy import median
values_median = median(values)
median_difference_sum = 0
for i in range(len(values)):
val = values[i] - values_median
median_difference_sum += val
## 3. Finding variance ##
import matplotlib.pyplot as plt
import pandas as pd
# The nba data is loaded into the nba_stats variable.
# Find the mean value of the column
pf_mean = nba_stats["pf"].mean()
# Initialize variance at zero
variance = 0
# Loop through each item in the "pf" column
for p in nba_stats["pf"]:
# Calculate the difference between the mean and the value
difference = p - pf_mean
# Square the difference -- this ensures that the result isn't negative
# If we didn't square the difference, the total variance would be zero
# ** in python means "raise whatever comes before this to the power of whatever number is after this"
square_difference = difference ** 2
# Add the difference to the total
variance += square_difference
# Average the total to find the final variance.
variance = variance / len(nba_stats["pf"])
pt_mean = nba_stats['pts'].mean()
va = 0
for i in nba_stats['pts']:
va += (i - pt_mean)**2
point_variance = va / len(nba_stats['pts'])
## 4. Order of operations ##
# You might be wondering why multiplication and division are on the same level.
# It doesn't matter whether we do the multiplication first, or the division first -- the answer here will always be the same.
# In this case, we need to think of division as multiplication by a fraction -- otherwise, we'll be dividing more than we want to.
# Create a formula
a = 5 * 5 / 2
# Multiply by 1/2 instead of dividing by 2 -- the result is the same (2/2 == 2 * 1/2)
a_subbed = 5 * 5 * 1/2
a_mul_first = 25 * 1/2
a_div_first = 5 * 2.5
print(a_mul_first == a_div_first)
# The same thing is true for subtraction and addition
# In this case, we need to convert subtraction into adding a negative number -- if we don't we'll end up subtracting more than we expect
b = 10 - 8 + 5
# Add -8 instead of subtracting 8
b_subbed = 10 + -8 + 5
b_sub_first = 2 + 5
b_add_first = 10 + -3
print(b_sub_first == b_add_first)
c = 10 / 2 + 5
d = 3 - 1 / 2 * 2
c = 10 / 2 * 5
d = 3 - 1 / 2 - 2
## 5. Using parentheses ##
a = 50 * 50 - 10 / 5
a_paren = 50 * (50 - 10) / 5
# If we put multiple operations inside parentheses, the order of operations is used inside to determine the order.
a_paren = 50 * (50 - 10 / 5)
b = 10 * (10 + 100)
c = (8 - 6) * 100
## 6. Fractional powers ##
a = 5 ** 2
# Raise to the fourth power
b = 10 ** 4
# Take the square root ( 3 * 3 == 9, so the answer is 3)
c = 9 ** (1/2)
# Take the cube root (4 * 4 * 4 == 64, so 4 is the cube root)
d = 64 ** (1/3)
e = 11**5
f = 10000 ** (1/4)
## 7. Calculating standard deviation ##
# The nba stats are loaded into the nba_stats variable.
def std_dev(data):
d_mean = data.mean()
v = 0
for i in data:
v += (i - d_mean)**2
v = v/len(data)
return(v**(1/2))
mp_dev = std_dev(nba_stats['mp'])
ast_dev = std_dev(nba_stats['ast'])
## 8. Find standard deviation distance ##
import matplotlib.pyplot as plt
plt.hist(nba_stats["pf"])
mean = nba_stats["pf"].mean()
plt.axvline(mean, color="r")
# We can calculate standard deviation by using the std() method on a pandas series.
std_dev = nba_stats["pf"].std()
# Plot a line one standard deviation below the mean
plt.axvline(mean - std_dev, color="g")
# Plot a line one standard deviation above the mean
plt.axvline(mean + std_dev, color="g")
# We can see how much of the data points fall within 1 standard deviation of the mean
# The more that falls into this range, the less spread out the data is
plt.show()
# We can calculate how many standard deviations a data point is from the mean by doing some subtraction and division
# First, we find the total distance by subtracting the mean
total_distance = nba_stats["pf"][0] - mean
# Then we divide by standard deviation to find how many standard deviations away the point is.
standard_deviation_distance = total_distance / std_dev
point_10 = nba_stats["pf"][9]
point_100 = nba_stats["pf"][99]
point_10_std = (point_10 - mean) / std_dev
point_100_std = (point_100 - mean) / std_dev
## 9. Working with the normal distribution ##
import numpy as np
import matplotlib.pyplot as plt
# The norm module has a pdf function (pdf stands for probability density function)
from scipy.stats import norm
# The arange function generates a numpy vector
# The vector below will start at -1, and go up to, but not including 1
# It will proceed in "steps" of .01. So the first element will be -1, the second -.99, the third -.98, all the way up to .99.
points = np.arange(-1, 1, 0.01)
# The norm.pdf function will take points vector and turn it into a probability vector
# Each element in the vector will correspond to the normal distribution (earlier elements and later element smaller, peak in the center)
# The distribution will be centered on 0, and will have a standard devation of .3
probabilities = norm.pdf(points, 0, .3)
# Plot the points values on the x axis and the corresponding probabilities on the y axis
# See the bell curve?
plt.plot(points, probabilities)
plt.show()
pt = np.arange(-1,1,0.1)
prob = norm.pdf(pt,0,2)
plt.plot(pt,prob)
plt.show()
## 10. Normal distribution deviation ##
# Housefly wing lengths in millimeters
wing_lengths = [36, 37, 38, 38, 39, 39, 40, 40, 40, 40, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 43, 43, 43, 43, 43, 43, 43, 43, 44, 44, 44, 44, 44, 44, 44, 44, 44, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 47, 47, 47, 47, 47, 47, 47, 47, 47, 48, 48, 48, 48, 48, 48, 48, 48, 49, 49, 49, 49, 49, 49, 49, 50, 50, 50, 50, 50, 50, 51, 51, 51, 51, 52, 52, 53, 53, 54, 55]
mean = sum(wing_lengths)/len(wing_lengths)
variances = [(i - mean)**2 for i in wing_lengths]
variance = sum(variances)/len(variances)
std = variance ** (1/2)
std= [(i - mean) /std for i in wing_lengths]
def within_percentage(deviations, count):
within = [i for i in deviations if i <= count and i >= -count]
count = len(within)
return count / len(deviations)
within_one_percentage = within_percentage(std, 1)
within_two_percentage = within_percentage(std, 2)
within_three_percentage = within_percentage(std, 3)
## 11. Plotting correlations ##
import matplotlib.pyplot as plt
# This is plotting field goals attempted (number of shots someone takes in a season) vs point scored in a season
# Field goals attempted is on the x-axis, and points is on the y-axis
# As you can tell, they are very strongly correlated -- the plot is close to a straight line.
# The plot also slopes upward, which means that as field goal attempts go up, so do points.
# That means that the plot is positively correlated.
plt.scatter(nba_stats["fga"], nba_stats["pts"])
plt.show()
# If we make points negative (so the people who scored the most points now score the least, because 3000 becomes -3000), we can change the direction of the correlation
# Field goals are negatively correlated with our new "negative" points column -- the more free throws you attempt, the less negative points you score.
# We can see this because the correlation line slopes downward.
plt.scatter(nba_stats["fga"], -nba_stats["pts"])
plt.show()
# Now, we can plot total rebounds (number of times someone got the ball back for their team after someone shot) vs total assists (number of times someone helped another person score)
# These are uncorrelated, so you don't see the same nice line as you see with the plot above.
plt.scatter(nba_stats["trb"], nba_stats["ast"])
plt.show()
plt.scatter(nba_stats["fta"], nba_stats["pts"])
plt.show()
plt.scatter(nba_stats["stl"], nba_stats["pf"])
plt.show()
## 12. Measuring correlation ##
from scipy.stats.stats import pearsonr
# The pearsonr function will find the correlation between two columns of data.
# It returns the r value and the p value. We'll learn more about p values later on.
r, p_value = pearsonr(nba_stats["fga"], nba_stats["pts"])
# As we can see, this is a very high positive r value -- close to 1
print(r)
# These two columns are much less correlated
r, p_value = pearsonr(nba_stats["trb"], nba_stats["ast"])
# We get a much lower, but still positive, r value
print(r)
r_fta_pts, p_value = pearsonr(nba_stats["fta"], nba_stats["pts"])
r_stl_pf, p_value = pearsonr(nba_stats["stl"], nba_stats["pf"])
## 13. Calculate covariance ##
# The nba_stats variable has been loaded.
def covariance(x, y):
x_mean = sum(x) / len(x)
y_mean = sum(y) / len(y)
x_diffs = [i - x_mean for i in x]
y_diffs = [i - y_mean for i in y]
codeviates = [x_diffs[i] * y_diffs[i] for i in range(len(x))]
return sum(codeviates) / len(codeviates)
cov_stl_pf = covariance(nba_stats["stl"], nba_stats["pf"])
cov_fta_pts = covariance(nba_stats["fta"], nba_stats["pts"])
## 14. Calculate correlation ##
from numpy import cov
# The nba_stats variable has already been loaded.
r_fta_blk = cov(nba_stats["fta"], nba_stats["blk"])[0,1] / ((nba_stats["fta"].var() * nba_stats["blk"].var())** (1/2))
r_ast_stl = cov(nba_stats["ast"], nba_stats["stl"])[0,1] / ((nba_stats["ast"].var() * nba_stats["stl"].var())** (1/2)) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Probability Statistics Beginner/Standard deviation and correlation-14.py",
"copies": "1",
"size": "10049",
"license": "mit",
"hash": 8537288113958745000,
"line_mean": 38.880952381,
"line_max": 415,
"alpha_frac": 0.6880286596,
"autogenerated": false,
"ratio": 3.1285803237858034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43166089833858035,
"avg_score": null,
"num_lines": null
} |
# 2. 删除操作
class Animal:
name = "动物"
age = 10
# a1 = Animal()
# print(Animal.name)
# # 动物
# del a1.__class__.name
# print(Animal.__dict__)
# {'__module__': '__main__',
# 'age': 10, '__dict__': <attribute '__dict__' of 'Animal' objects>,
# '__weakref__': <attribute '__weakref__' of 'Animal' objects>,
# '__doc__': None}
# 修改类 __dict__
dictA = Animal.__dict__
print(dictA)
dictA['name'] =" xxx"
# TypeError: 'mappingproxy' object does not support item assignment
# print(Animal.name)
# # 1. 定义类
# class Person:
# name = "人类"
# # 获取 dict
# print(Person.__dict__)
# # {'__module__': '__main__',
# # 'name': '人类',
# # '__dict__': <attribute '__dict__' of 'Person' objects>,
# # '__weakref__': <attribute '__weakref__' of 'Person' objects>,
# # '__doc__': None}
# print(Person) # <class '__main__.Person'>
# print(Person.__class__) # <class 'type'>
# print(Person is Person.__class__) # False
# print(type(Person.__class__))# <class 'type'>
# print(type(Person)) # <class 'type'>
# print(Person.__class__ is type(Person)) # True
# print(Person.__class__ is type(Person.__class__)) # True
# print(Person.__class__ is Person.__class__.__class__) # True
# # 类似 原型链
# p1 = Person()
# p2 = Person()
# p1.name = "xx"
#
# Person.name = "age"
#
# print(p1.name)
# print(p2.name)
# print(Person.name)
#
# # 设置 不存在的属性
# Person.age = 123
# print(p2.age)
#
# print(type(p1))
#
#
# # 查询操作
#
# dictA = Person.__dict__
# print(dictA)
# Person.sex = "boy"
# print(dictA)
# class Nxx(Person):
# typex = 999
#
#
# p2.__class__ = Nxx
# print(p2.typex) | {
"repo_name": "z727354123/pyCharmTest",
"path": "2018-01/01_Jan/14/02-ClassAttribute.py",
"copies": "1",
"size": "1623",
"license": "apache-2.0",
"hash": 5399735191346132000,
"line_mean": 20.2328767123,
"line_max": 68,
"alpha_frac": 0.5661717237,
"autogenerated": false,
"ratio": 2.4983870967741937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3564558820474194,
"avg_score": null,
"num_lines": null
} |
## 2. Using decision trees with scikit-learn ##
from sklearn.tree import DecisionTreeClassifier
# A list of columns to train with.
# All columns have been converted to numeric.
columns = ["age", "workclass", "education_num", "marital_status", "occupation", "relationship", "race", "sex", "hours_per_week", "native_country"]
# Instantiate the classifier.
# Set random_state to 1 to keep results consistent.
clf = DecisionTreeClassifier(random_state=1)
clf.fit(income[columns],income['high_income'])
# The variable income is loaded, and contains all the income data.
## 3. Splitting the data into train and test sets ##
import numpy
import math
# Set a random seed so the shuffle is the same every time.
numpy.random.seed(1)
# Shuffle the rows. This first permutes the index randomly using numpy.random.permutation.
# Then, it reindexes the dataframe with this.
# The net effect is to put the rows into random order.
income = income.reindex(numpy.random.permutation(income.index))
train_max_row = math.floor(income.shape[0] * .8)
train = income.iloc[:train_max_row,:]
test = income.iloc[train_max_row:,:]
## 4. Evaluating error ##
from sklearn.metrics import roc_auc_score
clf = DecisionTreeClassifier(random_state=1)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
error = roc_auc_score(test['high_income'],predictions)
print(error)
## 5. Compute error on the training set ##
predictions = clf.predict(train[columns])
print(roc_auc_score(train['high_income'],predictions))
## 7. Building a shallower tree ##
# Decision trees model from the last screen.
clf = DecisionTreeClassifier(min_samples_split=13, random_state=1)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
test_auc = roc_auc_score(test["high_income"], predictions)
train_predictions = clf.predict(train[columns])
train_auc = roc_auc_score(train["high_income"], train_predictions)
print(test_auc)
print(train_auc)
## 8. More parameter tweaking ##
# First decision trees model we trained and tested.
clf = DecisionTreeClassifier(random_state=1,max_depth = 7, min_samples_split = 13)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
test_auc = roc_auc_score(test["high_income"], predictions)
train_predictions = clf.predict(train[columns])
train_auc = roc_auc_score(train["high_income"], train_predictions)
print(test_auc)
print(train_auc)
## 9. Tweaking the depth ##
# First decision trees model we trained and tested.
clf = DecisionTreeClassifier(random_state=1,max_depth = 2, min_samples_split = 100)
clf.fit(train[columns], train["high_income"])
predictions = clf.predict(test[columns])
test_auc = roc_auc_score(test["high_income"], predictions)
train_predictions = clf.predict(train[columns])
train_auc = roc_auc_score(train["high_income"], train_predictions)
print(test_auc)
print(train_auc)
## 12. Exploring decision tree variance ##
numpy.random.seed(1)
# Generate a column with random numbers from 0 to 4.
income["noise"] = numpy.random.randint(4, size=income.shape[0])
# Adjust columns to include the noise column.
columns = ["noise", "age", "workclass", "education_num", "marital_status", "occupation", "relationship", "race", "sex", "hours_per_week", "native_country"]
# Make new train and test sets.
train_max_row = math.floor(income.shape[0] * .8)
train = income.iloc[:train_max_row]
test = income.iloc[train_max_row:]
# Initialize the classifier.
clf = DecisionTreeClassifier(random_state=1)
clf.fit(train[columns],train['high_income'])
preds = clf.predict(train[columns])
train_auc = roc_auc_score(train['high_income'],preds)
predictions = clf.predict(test[columns])
test_auc = roc_auc_score(test['high_income'],predictions)
| {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Decision Trees/Applying decision trees-143.py",
"copies": "1",
"size": "3748",
"license": "mit",
"hash": -529862598104107200,
"line_mean": 32.7657657658,
"line_max": 155,
"alpha_frac": 0.7395944504,
"autogenerated": false,
"ratio": 3.231034482758621,
"config_test": true,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.4470628933158621,
"avg_score": null,
"num_lines": null
} |
## 2. Web Page Structure ##
# Write your code here.
response = requests.get("http://dataquestio.github.io/web-scraping-pages/simple.html")
content = response.content
## 3. Retrieving Elements from a Page ##
from bs4 import BeautifulSoup
# Initialize the parser, and pass in the content we grabbed earlier.
parser = BeautifulSoup(content, 'html.parser')
# Get the body tag from the document.
# Since we passed in the top level of the document to the parser, we need to pick a branch off of the root.
# With BeautifulSoup, we can access branches by using tag types as attributes.
body = parser.body
# Get the p tag from the body.
p = body.p
# Print the text inside the p tag.
# Text is a property that gets the inside text of a tag.
print(p.text)
title_text = parser.head.title.text
## 4. Using Find All ##
parser = BeautifulSoup(content, 'html.parser')
# Get a list of all occurrences of the body tag in the element.
body = parser.find_all("body")
# Get the paragraph tag.
p = body[0].find_all("p")
# Get the text.
print(p[0].text)
title_text = parser.find_all('title')[0].text
## 5. Element IDs ##
# Get the page content and set up a new parser.
response = requests.get("http://dataquestio.github.io/web-scraping-pages/simple_ids.html")
content = response.content
parser = BeautifulSoup(content, 'html.parser')
# Pass in the ID attribute to only get the element with that specific ID.
first_paragraph = parser.find_all("p", id="first")[0]
print(first_paragraph.text)
second_paragraph_text = parser.find_all("p", id="second")[0].text
## 6. Element Classes ##
# Get the website that contains classes.
response = requests.get("http://dataquestio.github.io/web-scraping-pages/simple_classes.html")
content = response.content
parser = BeautifulSoup(content, 'html.parser')
# Get the first inner paragraph.
# Find all the paragraph tags with the class inner-text.
# Then, take the first element in that list.
first_inner_paragraph = parser.find_all("p", class_="inner-text")[0]
print(first_inner_paragraph.text)
second_inner_paragraph_text = parser.find_all("p",class_="inner-text")[1].text
first_outer_paragraph_text = parser.find_all("p", class_="outer-text")[0].text
## 8. Using CSS Selectors ##
# Get the website that contains classes and IDs.
response = requests.get("http://dataquestio.github.io/web-scraping-pages/ids_and_classes.html")
content = response.content
parser = BeautifulSoup(content, 'html.parser')
# Select all of the elements that have the first-item class.
first_items = parser.select(".first-item")
# Print the text of the first paragraph (the first element with the first-item class).
print(first_items[0].text)
first_outer_text = parser.select(".outer-text")[0].text
second_text = parser.select("#second")[0].text
## 10. Using Nested CSS Selectors ##
# Get the Superbowl box score data.
response = requests.get("http://dataquestio.github.io/web-scraping-pages/2014_super_bowl.html")
content = response.content
parser = BeautifulSoup(content, 'html.parser')
# Find the number of turnovers the Seahawks committed.
turnovers = parser.select("#turnovers")[0]
seahawks_turnovers = turnovers.select("td")[1]
seahawks_turnovers_count = seahawks_turnovers.text
print(seahawks_turnovers_count)
patriots_total_plays_count = parser.select("#total-plays")[0].select("td")[2].text
seahawks_total_yards_count = parser.select("#total-yards")[0].select("td")[1].text | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Apis and Scraping/Web Scraping-119.py",
"copies": "1",
"size": "3394",
"license": "mit",
"hash": 3204754187570022000,
"line_mean": 34.3645833333,
"line_max": 107,
"alpha_frac": 0.7386564526,
"autogenerated": false,
"ratio": 3.2416427889207258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44802992415207255,
"avg_score": null,
"num_lines": null
} |
# 30.04.2009
#!
#! Linear Elasticity
#! =================
#$ \centerline{Example input file, \today}
#! This file models a cylinder that is fixed at one end while the
#! second end has a specified displacement of 0.01 in the x direction
#! (this boundary condition is named Displaced). There is also a specified
#! displacement of 0.005 in the z direction for points in
#! the region labeled SomewhereTop. This boundary condition is named
#! PerturbedSurface. The region SomewhereTop is specified as those nodes for
#! which
#! (z > 0.017) & (x > 0.03) & (x < 0.07).
#! The output is the displacement for each node, saved by default to
#! simple_out.vtk. The material is linear elastic and its properties are
#! specified as Lame parameters (see
#! http://en.wikipedia.org/wiki/Lam%C3%A9_parameters)
#!
#! Mesh
#! ----
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/3d/cylinder.mesh'
#! Regions
#! -------
#! Whole domain 'Omega', left and right ends.
regions = {
'Omega' : ('all', {}),
'Left' : ('nodes in (x < 0.001)', {}),
'Right' : ('nodes in (x > 0.099)', {}),
'SomewhereTop' : ('nodes in (z > 0.017) & (x > 0.03) & (x < 0.07)', {}),
}
#! Materials
#! ---------
#! The linear elastic material model is used. Properties are
#! specified as Lame parameters.
materials = {
'solid' : ({'lam' : 1e1, 'mu' : 1e0},),
}
#! Fields
#! ------
#! A field is used to define the approximation on a (sub)domain
#! A displacement field (three DOFs/node) will be computed on a region
#! called 'Omega' using P1 (four-node tetrahedral) finite elements.
fields = {
'displacement': ('real', 'vector', 'Omega', 1),
}
#! Integrals
#! ---------
#! Define the integral type Volume/Surface and quadrature rule
#! (here: dim=3, order=1).
integrals = {
'i1' : ('v', 'gauss_o1_d3'),
}
#! Variables
#! ---------
#! One field is used for unknown variable (generate discrete degrees
#! of freedom) and the seccond field for the corresponding test variable of
#! the weak formulation.
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
}
#! Boundary Conditions
#! -------------------
#! The left end of the cilinder is fixed (all DOFs are zero) and
#! the 'right' end has non-zero displacements only in the x direction.
ebcs = {
'Fixed' : ('Left', {'u.all' : 0.0}),
'Displaced' : ('Right', {'u.0' : 0.01, 'u.[1,2]' : 0.0}),
'PerturbedSurface' : ('SomewhereTop', {'u.2' : 0.005}),
}
#! Equations
#! ---------
#! The weak formulation of the linear elastic problem.
equations = {
'balance_of_forces' :
"""dw_lin_elastic_iso.i1.Omega( solid.lam, solid.mu, v, u ) = 0""",
}
#! Solvers
#! -------
#! Define linear and nonlinear solver.
#! Even linear problems are solved by a nonlinear solver (KISS rule) - only one
#! iteration is needed and the final rezidual is obtained for free.
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton',
{ 'i_max' : 1,
'eps_a' : 1e-10,
'eps_r' : 1.0,
'macheps' : 1e-16,
# Linear system error < (eps_a * lin_red).
'lin_red' : 1e-2,
'ls_red' : 0.1,
'ls_red_warp' : 0.001,
'ls_on' : 1.1,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
# 'nonlinear' or 'linear' (ignore i_max)
'problem' : 'nonlinear'}),
}
| {
"repo_name": "olivierverdier/sfepy",
"path": "examples/linear_elasticity/linear_elastic.py",
"copies": "1",
"size": "3615",
"license": "bsd-3-clause",
"hash": 3724869877416006700,
"line_mean": 33.4285714286,
"line_max": 79,
"alpha_frac": 0.5587828492,
"autogenerated": false,
"ratio": 3.2190560997328586,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4277838948932859,
"avg_score": null,
"num_lines": null
} |
# 30.05.2007, c
# last revision: 25.02.2008
from __future__ import absolute_import
from sfepy import data_dir
import six
filename_mesh = data_dir + '/meshes/2d/square_unit_tri.mesh'
material_1 = {
'name' : 'coef',
'values' : {
'val' : 1.0,
},
}
material_2 = {
'name' : 'm',
'values' : {
'K' : [[1.0, 0.0], [0.0, 1.0]],
},
}
field_1 = {
'name' : 'a_harmonic_field',
'dtype' : 'real',
'shape' : 'scalar',
'region' : 'Omega',
'approx_order' : 2,
}
variable_1 = {
'name' : 't',
'kind' : 'unknown field',
'field' : 'a_harmonic_field',
'order' : 0,
}
variable_2 = {
'name' : 's',
'kind' : 'test field',
'field' : 'a_harmonic_field',
'dual' : 't',
}
region_1000 = {
'name' : 'Omega',
'select' : 'all',
}
region_1 = {
'name' : 'Left',
'select' : 'vertices in (x < -0.499)',
'kind' : 'facet',
}
region_2 = {
'name' : 'Right',
'select' : 'vertices in (x > 0.499)',
'kind' : 'facet',
}
region_3 = {
'name' : 'Gamma',
'select' : 'vertices of surface',
'kind' : 'facet',
}
ebc_1 = {
'name' : 't_left',
'region' : 'Left',
'dofs' : {'t.0' : 5.0},
}
ebc_2 = {
'name' : 't_right',
'region' : 'Right',
'dofs' : {'t.0' : 0.0},
}
# 'Left' : ('T3', (30,), 'linear_y'),
integral_1 = {
'name' : 'i',
'order' : 2,
}
equations = {
'Temperature' : """dw_laplace.i.Omega( coef.val, s, t ) = 0"""
}
solution = {
't' : '- 5.0 * (x - 0.5)',
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
}
lin_min, lin_max = 0.0, 2.0
##
# 31.05.2007, c
def linear( bc, ts, coor, which ):
vals = coor[:,which]
min_val, max_val = vals.min(), vals.max()
vals = (vals - min_val) / (max_val - min_val) * (lin_max - lin_min) + lin_min
return vals
##
# 31.05.2007, c
def linear_x( bc, ts, coor ):
return linear( bc, ts, coor, 0 )
def linear_y( bc, ts, coor ):
return linear( bc, ts, coor, 1 )
def linear_z( bc, ts, coor ):
return linear( bc, ts, coor, 2 )
from sfepy.base.testing import TestCommon
##
# 30.05.2007, c
class Test( TestCommon ):
##
# 30.05.2007, c
def from_conf( conf, options ):
from sfepy.applications import solve_pde
problem, state = solve_pde(conf, save_results=False)
test = Test(problem=problem, state=state, conf=conf, options=options)
return test
from_conf = staticmethod( from_conf )
##
# 30.05.2007, c
def test_solution( self ):
sol = self.conf.solution
vec = self.state()
problem = self.problem
variables = problem.get_variables()
ok = True
for var_name, expression in six.iteritems(sol):
coor = variables[var_name].field.get_coor()
ana_sol = self.eval_coor_expression( expression, coor )
num_sol = variables.get_state_part_view( vec, var_name )
ret = self.compare_vectors( ana_sol, num_sol,
label1 = 'analytical %s' % var_name,
label2 = 'numerical %s' % var_name )
if not ret:
self.report( 'variable %s: failed' % var_name )
ok = ok and ret
return ok
##
# c: 30.05.2007, r: 19.02.2008
def test_boundary_fluxes( self ):
import os.path as op
from sfepy.linalg import rotation_matrix2d
from sfepy.discrete import Material
problem = self.problem
angles = [0, 30, 45]
region_names = ['Left', 'Right', 'Gamma']
values = [5.0, -5.0, 0.0]
variables = problem.get_variables()
get_state = variables.get_state_part_view
state = self.state.copy(deep=True)
problem.time_update(ebcs={}, epbcs={})
# problem.save_ebc( 'aux.vtk' )
state.apply_ebc()
nls = problem.get_nls()
aux = nls.fun(state())
field = variables['t'].field
conf_m = problem.conf.get_item_by_name('materials', 'm')
m = Material.from_conf(conf_m, problem.functions)
name = op.join( self.options.out_dir,
op.split( problem.domain.mesh.name )[1] + '_%02d.mesh' )
orig_coors = problem.get_mesh_coors().copy()
ok = True
for ia, angle in enumerate( angles ):
self.report( '%d: mesh rotation %d degrees' % (ia, angle) )
problem.domain.mesh.transform_coors( rotation_matrix2d( angle ),
ref_coors = orig_coors )
problem.set_mesh_coors(problem.domain.mesh.coors,
update_fields=True)
problem.domain.mesh.write( name % angle, io = 'auto' )
for ii, region_name in enumerate( region_names ):
flux_term = 'd_surface_flux.i.%s( m.K, t )' % region_name
val1 = problem.evaluate(flux_term, t=variables['t'], m=m)
rvec = get_state( aux, 't', True )
reg = problem.domain.regions[region_name]
nods = field.get_dofs_in_region(reg, merge=True)
val2 = rvec[nods].sum() # Assume 1 dof per node.
ok = ok and ((abs( val1 - values[ii] ) < 1e-10) and
(abs( val2 - values[ii] ) < 1e-10))
self.report( ' %d. %s: %e == %e == %e'\
% (ii, region_name, val1, val2, values[ii]) )
# Restore original coordinates.
problem.domain.mesh.transform_coors(rotation_matrix2d(0),
ref_coors=orig_coors)
problem.set_mesh_coors(problem.domain.mesh.coors,
update_fields=True)
return ok
| {
"repo_name": "lokik/sfepy",
"path": "tests/test_laplace_unit_square.py",
"copies": "3",
"size": "5870",
"license": "bsd-3-clause",
"hash": -1831525260669356800,
"line_mean": 25.4414414414,
"line_max": 81,
"alpha_frac": 0.5052810903,
"autogenerated": false,
"ratio": 3.0652741514360313,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5070555241736032,
"avg_score": null,
"num_lines": null
} |
# 30.05.2007, c
# last revision: 25.02.2008
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/2d/square_unit_tri.mesh'
material_1 = {
'name' : 'coef',
'values' : {
'val' : 1.0,
},
}
material_2 = {
'name' : 'm',
'values' : {
'K' : [[1.0, 0.0], [0.0, 1.0]],
},
}
field_1 = {
'name' : 'a_harmonic_field',
'dtype' : 'real',
'shape' : 'scalar',
'region' : 'Omega',
'approx_order' : 2,
}
variable_1 = {
'name' : 't',
'kind' : 'unknown field',
'field' : 'a_harmonic_field',
'order' : 0,
}
variable_2 = {
'name' : 's',
'kind' : 'test field',
'field' : 'a_harmonic_field',
'dual' : 't',
}
region_1000 = {
'name' : 'Omega',
'select' : 'all',
}
region_1 = {
'name' : 'Left',
'select' : 'nodes in (x < -0.499)',
'can_cells' : True,
}
region_2 = {
'name' : 'Right',
'select' : 'nodes in (x > 0.499)',
'can_cells' : True,
}
region_3 = {
'name' : 'Gamma',
'select' : 'nodes of surface',
'can_cells' : True,
}
ebc_1 = {
'name' : 't_left',
'region' : 'Left',
'dofs' : {'t.0' : 5.0},
}
ebc_2 = {
'name' : 't_right',
'region' : 'Right',
'dofs' : {'t.0' : 0.0},
}
# 'Left' : ('T3', (30,), 'linear_y'),
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o2_d2',
}
integral_2 = {
'name' : 'i2',
'kind' : 's3',
'quadrature' : 'gauss_o1_d1',
}
equations = {
'Temperature' : """dw_laplace.i1.Omega( coef.val, s, t ) = 0"""
# 'Temperature' : """dw_hdpm_d.i1.Omega( m.K, s, t ) = 0"""
}
solution = {
't' : '- 5.0 * (x - 0.5)',
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.umfpack',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
'eps_r' : 1.0,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp' : 0.001,
'ls_on' : 1.1,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'lin_solver' : 'umfpack',
'matrix' : 'internal', # 'external' or 'internal'
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
fe = {
'chunk_size' : 1000
}
lin_min, lin_max = 0.0, 2.0
##
# 31.05.2007, c
def linear( bc, ts, coor, which ):
vals = coor[:,which]
min_val, max_val = vals.min(), vals.max()
vals = (vals - min_val) / (max_val - min_val) * (lin_max - lin_min) + lin_min
return vals
##
# 31.05.2007, c
def linear_x( bc, ts, coor ):
return linear( bc, ts, coor, 0 )
def linear_y( bc, ts, coor ):
return linear( bc, ts, coor, 1 )
def linear_z( bc, ts, coor ):
return linear( bc, ts, coor, 2 )
from sfepy.base.testing import TestCommon
##
# 30.05.2007, c
class Test( TestCommon ):
##
# 30.05.2007, c
def from_conf( conf, options ):
from sfepy.solvers.generic import solve_stationary
problem, vec = solve_stationary(conf)
test = Test(problem=problem, vec=vec, conf=conf, options=options)
return test
from_conf = staticmethod( from_conf )
##
# 30.05.2007, c
def test_solution( self ):
sol = self.conf.solution
vec = self.vec
problem = self.problem
variables = problem.get_variables()
ok = True
for var_name, expression in sol.iteritems():
coor = variables[var_name].field.get_coor()
ana_sol = self.eval_coor_expression( expression, coor )
num_sol = variables.get_state_part_view( vec, var_name )
ret = self.compare_vectors( ana_sol, num_sol,
label1 = 'analytical %s' % var_name,
label2 = 'numerical %s' % var_name )
if not ret:
self.report( 'variable %s: failed' % var_name )
ok = ok and ret
return ok
##
# c: 30.05.2007, r: 19.02.2008
def test_boundary_fluxes( self ):
import os.path as op
from sfepy.base.base import Struct
from sfepy.linalg import rotation_matrix2d
from sfepy.fem.evaluate import BasicEvaluator
problem = self.problem
vec = self.vec
angles = [0, 30, 45]
region_names = ['Left', 'Right', 'Gamma']
values = [5.0, -5.0, 0.0]
variables = problem.get_variables()
get_state = variables.get_state_part_view
state = vec.copy()
problem.time_update(ebcs={}, epbcs={})
# problem.save_ebc( 'aux.vtk' )
problem.apply_ebc( state )
ev = BasicEvaluator( problem )
aux = ev.eval_residual( state )
field = variables['t'].field
name = op.join( self.options.out_dir,
op.split( problem.domain.mesh.name )[1] + '_%02d.mesh' )
orig_coors = problem.get_mesh_coors().copy()
ok = True
for ia, angle in enumerate( angles ):
self.report( '%d: mesh rotation %d degrees' % (ia, angle) )
problem.domain.mesh.transform_coors( rotation_matrix2d( angle ),
ref_coors = orig_coors )
problem.domain.mesh.write( name % angle, io = 'auto' )
for ii, region_name in enumerate( region_names ):
flux_term = 'd_hdpm_surfdvel.i2.%s( m.K, t )' % region_name
val1 = problem.evaluate(flux_term, t=variables['t'])
rvec = get_state( aux, 't', True )
reg = problem.domain.regions[region_name]
nods = reg.get_field_nodes( field, merge = True )
val2 = rvec[nods].sum() # Assume 1 dof per node.
ok = ok and ((abs( val1 - values[ii] ) < 1e-10) and
(abs( val2 - values[ii] ) < 1e-10))
self.report( ' %d. %s: %e == %e == %e'\
% (ii, region_name, val1, val2, values[ii]) )
return ok
| {
"repo_name": "olivierverdier/sfepy",
"path": "tests/test_laplace_unit_square.py",
"copies": "1",
"size": "6037",
"license": "bsd-3-clause",
"hash": 8304519282148385000,
"line_mean": 24.5805084746,
"line_max": 81,
"alpha_frac": 0.4956104025,
"autogenerated": false,
"ratio": 2.899615754082613,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3895226156582613,
"avg_score": null,
"num_lines": null
} |
# 300. Longest Increasing Subsequence
#
# Given an unsorted array of integers, find the length of longest increasing subsequence.
# For example,
# Given [10, 9, 2, 5, 3, 7, 101, 18],
# The longest increasing subsequence is [2, 3, 7, 101], therefore the length is 4.
# Note that there may be more than one LIS combination,
# it is only necessary for you to return the length.
# Your algorithm should run in O(n2) complexity.
# Follow up: Could you improve it to O(n log n) time complexity?
class Solution(object):
# https://gengwg.blogspot.com/2018/02/leetcode-300-longest-increasing.html
# note this is subsequence not sub array.
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# can simply return max(dp) if with this line
# if not nums:
# return 0
sz = len(nums)
# dp[i]: length of LIS ending with nums[i]
dp = [1] * sz
for i in range(sz):
for j in range(i):
if nums[i] > nums[j]:
# max of multiple possible sub problems
dp[i] = max(dp[i], dp[j] + 1)
return max(dp) if dp else 0
if __name__ == '__main__':
print(Solution().lengthOfLIS([10, 9, 2, 5, 3, 7, 101, 18]))
| {
"repo_name": "gengwg/leetcode",
"path": "300_longest_increasing_subsequence.py",
"copies": "1",
"size": "1282",
"license": "apache-2.0",
"hash": -8852912291516857000,
"line_mean": 31.8717948718,
"line_max": 89,
"alpha_frac": 0.5889235569,
"autogenerated": false,
"ratio": 3.40053050397878,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.448945406087878,
"avg_score": null,
"num_lines": null
} |
# 303 - Range Sum Query Immutable (Easy)
# https://leetcode.com/problems/range-sum-query-immutable/
class NumArray(object):
def __init__(self, nums):
"""
initialize your data structure here.
:type nums: List[int]
"""
self.arr = []
acum = 0
self.arr.append(0)
for i in range(len(nums)):
acum += nums[i]
self.arr.append(acum)
def sumRange(self, i, j):
"""
sum of elements nums[i..j], inclusive.
:type i: int
:type j: int
:rtype: int
"""
return self.arr[j+1] - self.arr[i]
# Your NumArray object will be instantiated and called as such:
# numArray = NumArray(nums)
# numArray.sumRange(0, 1)
# numArray.sumRange(1, 2)
"""
Given nums = [-2, 0, 3, -5, 2, -1]
sumRange(0, 2) -> 1
sumRange(2, 5) -> -1
sumRange(0, 5) -> -3
Time complexity : O(1) time per query, O(n) time pre-computation. Since the
cumulative sum is cached, each sumRange query can be calculated in O(1)
time. Space complexity : O(n).
""" | {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/01_Easy/lc_303.py",
"copies": "1",
"size": "1081",
"license": "mit",
"hash": -5537046844776160000,
"line_mean": 26.7435897436,
"line_max": 79,
"alpha_frac": 0.5605920444,
"autogenerated": false,
"ratio": 3.2560240963855422,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9199094773264175,
"avg_score": 0.023504273504273504,
"num_lines": 39
} |
# 303. Range Sum Query - Immutable
# Given an integer array nums,
# find the sum of the elements between indices i and j (i ≤ j), inclusive.
# Example:
# Given nums = [-2, 0, 3, -5, 2, -1]
# sumRange(0, 2) -> 1
# sumRange(2, 5) -> -1
# sumRange(0, 5) -> -3
# Note:
# You may assume that the array does not change.
# There are many calls to sumRange function.
# http://bookshadow.com/weblog/2015/11/10/leetcode-range-sum-query-immutable/
# 计算辅助数组sums:
# sums[0] = 0, for k = 0
# sums[k] = nums[0] + nums[1] + ... + nums[k-1], for k > 0
# 则sumRange(i, j) = sums[j+1] - sums[i]
# e.g.
# sums[5] = nums[0] + nums[1] + nums[2] + nums[3] + nums[4]
# sums[2] = nums[0] + nums[1]
# sumRange(2,4) = sums[5] - sums[2] = nums[2] + nums[3] + nums[4]
class NumArray(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
size = len(nums)
self.sums = [0] * (size + 1)
for i in range(size):
self.sums[i+1] = self.sums[i] + nums[i]
# for i in range(1, size+1):
# self.sums[i] = self.sums[i-1] + nums[i-1]
def sumRange(self, i, j):
"""
:type i: int
:type j: int
:rtype: int
"""
return self.sums[j+1] - self.sums[i]
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# param_1 = obj.sumRange(i,j)
| {
"repo_name": "gengwg/leetcode",
"path": "303_range_sum_query_immutable.py",
"copies": "1",
"size": "1403",
"license": "apache-2.0",
"hash": -5491774437696001000,
"line_mean": 23.298245614,
"line_max": 77,
"alpha_frac": 0.5415162455,
"autogenerated": false,
"ratio": 2.564814814814815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8604960446279727,
"avg_score": 0.00027412280701754384,
"num_lines": 57
} |
# 304 Range Sum Query 2D - Immutable
#
# Given a 2D matrix matrix,
# find the sum of the elements inside the rectangle defined by
# its upper left corner (row1, col1) and lower right corner (row2, col2).
# Example:
# Given matrix = [
# [3, 0, 1, 4, 2],
# [5, 6, 3, 2, 1],
# [1, 2, 0, 1, 5],
# [4, 1, 0, 1, 7],
# [1, 0, 3, 0, 5]
# ]
# sumRegion(2, 1, 4, 3) -> 8
# sumRegion(1, 1, 2, 2) -> 11
# sumRegion(1, 2, 2, 4) -> 12
# Note:
# You may assume that the matrix does not change.
# There are many calls to sumRegion function.
# You may assume that row1 ≤ row2 and col1 ≤ col2.
# http://bookshadow.com/weblog/2015/11/12/leetcode-range-sum-query-2d-immutable/
# 构造辅助二维数组sums
# sums[x][y]表示从0,0到x,y的子矩阵的和
# 利用容斥原理,可知:
# sumRange(row1, col1, row2, col2)
# = sums[row2][col2] + sums[row1 - 1][col1 - 1] - sums[row1 - 1][col2] - sums[row2][col1 - 1]
# 将辅助矩阵的行数和列数+1,可以简化对矩阵边界的处理。
class NumMatrix(object):
def __init__(self, matrix):
"""
:type matrix: List[List[int]]
"""
m = len(matrix)
n = len(matrix[0]) if m else 0
self.sums = [[0 for _ in range(n+1)] for __ in range(m+1)]
for i in range(1, m+1):
for j in range(1, n+1):
self.sums[i][j] = matrix[i-1][j-1] + self.sums[i][j-1] \
+ self.sums[i-1][j] - self.sums[i-1][j-1]
def sumRegion(self, row1, col1, row2, col2):
"""
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
return self.sums[row2 + 1][col2 + 1] + self.sums[row1][col1] \
- self.sums[row1][col2 + 1] - self.sums[row2 + 1][col1]
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
| {
"repo_name": "gengwg/leetcode",
"path": "304_range_sum_query_2d_immutable.py",
"copies": "1",
"size": "1941",
"license": "apache-2.0",
"hash": -4026733841934629000,
"line_mean": 25.5362318841,
"line_max": 93,
"alpha_frac": 0.555434189,
"autogenerated": false,
"ratio": 2.3656330749354004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34210672639354,
"avg_score": null,
"num_lines": null
} |
# 30
# l 7
class Solution:
"""
@param A : a list of integers
@param target : an integer to be searched
@return : a list of length 2, [index1, index2]
"""
def searchRange(self, A, target):
# write your code here
if A is None or A == []:
return [-1, -1]
start = 0
end = len(A) - 1
mid = start - (start - end) / 2
while start + 1 < end:
if A[mid] > target:
end = mid
if A[mid] < target:
start = mid
if A[mid] == target:
break
mid = start - (start - end) / 2
else:
if A[start] == target:
if A[end] == target:
return [start, end]
if A[end] != target:
return [start, start]
else:
if A[end] == target:
return [end, end]
else:
return [-1, -1]
new_end = new_start = mid
# find start
mid = start - (start - new_end) / 2
while start + 1 < new_end:
if A[mid] >= target:
new_end = mid
if A[mid] < target:
start = mid
mid = start - (start - new_end) / 2
if A[start] != target:
start = new_end
# find end
mid = new_start - (new_start - end) / 2
while new_start + 1 < end:
if A[mid] <= target:
new_start = mid
else:
end = mid
mid = new_start - (new_start - end) / 2
if A[end] != target:
end = new_start
return [start, end]
| {
"repo_name": "stonemary/lintcode_solutions",
"path": "search-for-a-range/1.py",
"copies": "1",
"size": "1795",
"license": "apache-2.0",
"hash": -7719765717984588000,
"line_mean": 27.046875,
"line_max": 51,
"alpha_frac": 0.3799442897,
"autogenerated": false,
"ratio": 4.1841491841491845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5064093473849185,
"avg_score": null,
"num_lines": null
} |
"""31.0/.2015 PyOSE: Stacked exomoons with the Orbital Sampling Effect."""
import PyOSE
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import rc
from numpy import pi
# Set stellar parameters
StellarRadius = 696342. # km
limb1 = 0.3643
limb2 = 0.2807
# Set planet parameters
#PlanetRadius = 63700. # [km]
PlanetRadius = 63750.
PlanetAxis = 149597870.700 # [km]
PlanetImpact = 0.4 # [0..1.x]; central transit is 0.
PlanetPeriod = 365.25 # [days]
# Set moon parameters
MoonRadius = 17500. # [km]
MoonAxis = 384000. # [km]
MoonEccentricity = 0.7 # 0..1
MoonAscendingNode = 30.0 # degrees
MoonLongitudePeriastron = 50.0 # degrees
MoonInclination = 83.0 # 0..90 in degrees. 0 is the reference plain (no incl).
# Set other parameters
ShowPlanetMoonEclipses = True # True: the reality; False would be no mutual
# eclipses. Of course unphysical, but useful for tests and comparisons)
ShowPlanet = False # True: Planet+Moon; False: Moon only
Noise = 0 # [ppm per minute]; 0 = no noise is added
NumberOfTransits = 0 # How many (randomly chosen) transits are observed;
# if 0 then all available are sampled (and their number is 10*Quality).
PhaseToHighlight = 0.09 # If no highlighting is desired, choose value < 0
Quality = 250 # Radius of star in pixels --> size of numerical sampling grid
NumberOfSamples = 250 # How many transits are to be sampled
# 3D model
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
MyModelview = PyOSE.modelview(
StellarRadius, limb1, limb2, PlanetRadius, PlanetImpact,
MoonRadius, MoonAxis, MoonEccentricity, MoonAscendingNode,
MoonLongitudePeriastron, MoonInclination, PhaseToHighlight, Quality)
ax = plt.axes()
ax.arrow(0, 0.4, 0.364, 0, head_width=0.0, head_length=0.0, fc='k', ec='k',
zorder = 10) # direction
#ax.arrow(0.364, 0.4, 0, -0.1, head_width=0.0, head_length=0.0, fc='k', ec='k')
ax.arrow(0.356, 0.0, 0.0, 0.205, head_width=0.0, head_length=0.0, fc='k', ec='k',
zorder = 10) # b_S
ax.tick_params(direction='out')
plt.tick_params(axis='both', which='major', labelsize=16)
plt.annotate(r"direction", xy=(0.17, 0.42), size=16)
plt.annotate(r"$b_S$", xy=(0.4, 0.05), size=16)
#plt.annotate(r"$b_P$ ", xy=(-0.12, 0.1), size=16)
plt.xlabel('distance [stellar radii]',fontsize=16)
plt.ylabel('distance [stellar radii]',fontsize=16)
ax.set_aspect('equal')
plt.savefig("figure3-3dview.pdf", bbox_inches='tight')
MyModelview.show()
# River
MyRiverKepler = PyOSE.river(
StellarRadius, limb1, limb2,
PlanetRadius, PlanetAxis, PlanetImpact, PlanetPeriod,
MoonRadius, MoonAxis, MoonEccentricity, MoonAscendingNode,
MoonLongitudePeriastron, MoonInclination,
ShowPlanetMoonEclipses, Quality, NumberOfSamples, Noise)
# River function returns pixel map. To plot time axis, call function timeaxis
MyTime = PyOSE.timeaxis(
PlanetPeriod, PlanetAxis, MoonRadius, StellarRadius, Quality)
plt.imshow(MyRiverKepler, cmap=cm.gray, interpolation='none',
extent=[MyTime[0], -MyTime[0], 1, 0])
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
ax = plt.axes()
ax.arrow(MyTime[0], PhaseToHighlight, 0.001, 0, head_width=0.1, head_length=0.1,
fc='k', ec='k')
ax.arrow(-0.5, PhaseToHighlight, 0.001, 0, head_width=0.05, head_length=0.05,
fc='k', ec='k')
plt.tick_params(axis='both', which='major', labelsize=16)
plt.xlabel('time around planetary mid-transit [days]',fontsize=16)
plt.ylabel('phase [0..1]',fontsize=16)
plt.axis([-0.5, 0.5, 1, 0], fontsize=16)
ax.tick_params(direction='out')
plt.savefig("figure3-river.pdf", bbox_inches='tight')
plt.show()
# Curve
MyNewCurve = PyOSE.curve(StellarRadius, limb1, limb2, PlanetRadius, PlanetAxis,
PlanetImpact, PlanetPeriod, MoonRadius, MoonAxis, MoonEccentricity,
MoonAscendingNode, MoonLongitudePeriastron, MoonInclination,
ShowPlanetMoonEclipses, ShowPlanet, Quality, NumberOfSamples, Noise,
NumberOfTransits)
Time = MyNewCurve[0][1:]
Flux = MyNewCurve[1][1:]
plt.plot(Time, Flux, color = 'k')
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.tick_params(axis='both', which='major', labelsize=16)
plt.xlabel('time around planetary mid-transit [days]',fontsize=16)
plt.ylabel('normalized stellar brightness [$10^{-6}$]',fontsize=16)
plt.axis([-0.5, +0.5, -700, 1], set_aspect='equal', fontsize=16)
ax.tick_params(direction='out')
plt.savefig("figure3-curve.pdf", bbox_inches='tight')
plt.show()
# Integral
MyIntegral = PyOSE.integral(StellarRadius, limb1, limb2, PlanetRadius,
PlanetAxis, PlanetImpact, PlanetPeriod, MoonRadius, MoonAxis,
MoonEccentricity, MoonAscendingNode, MoonLongitudePeriastron,
MoonInclination, ShowPlanetMoonEclipses, ShowPlanet,
Quality, NumberOfSamples, Noise, NumberOfTransits)
print 'Total occulted stellar flux:', MyIntegral, 'ppm hrs'
#Save curve to Excel file
#book = xlwt.Workbook()
#sheet1 = book.add_sheet('sheet1')
#for i in range(len(Time)):
# sheet1.write(i,0,Time[i])
# sheet1.write(i,1,Flux[i])
#book.save("curve.xls")
#book.save(TemporaryFile())
# Debug OSE duration. Use only with noiseless data and eccentricity = 0
v = 2 * pi * PlanetAxis / PlanetPeriod
D = (2 * MoonAxis + 2 * MoonRadius + 2 * StellarRadius) / v
print 'Moon transit duration (analytical):', D, '[days]'
#Get first data point that contains negative flux value
for i in range(len(Time)):
if Flux[i] < 0:
FirstDataPoint = Time[i]
break
#Get last data point that contains negative flux value
for i in range(int(0.5 * len(Time)), len(Time)):
if Flux[i] == 0:
LastDataPoint = Time[i-1]
break
MoonTransitDurationFromData = LastDataPoint - FirstDataPoint
print 'Moon transit duration (from data): ', \
MoonTransitDurationFromData, '[days]'
ErrorInDuration = ((MoonTransitDurationFromData / D) - 1) * 100
print 'Error:', ErrorInDuration, '[%]. Note: Assumes eccentricity = impact = 0.'
| {
"repo_name": "hippke/PyOSE",
"path": "CreateFigure3.py",
"copies": "1",
"size": "5893",
"license": "mit",
"hash": -5215666870820811000,
"line_mean": 34.7151515152,
"line_max": 81,
"alpha_frac": 0.7113524521,
"autogenerated": false,
"ratio": 2.7409302325581395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8903281491398676,
"avg_score": 0.009800238651892805,
"num_lines": 165
} |
# 31.05.2007, c
# last revision: 25.02.2008
from __future__ import absolute_import
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/2d/circle_sym.mesh'
material_1 = {
'name' : 'coef',
'values' : {
'val' : 1.0,
},
}
material_2 = {
'name' : 'm',
'values' : {
'K' : [[1.0, 0.0], [0.0, 1.0]],
},
}
field_1 = {
'name' : 'a_harmonic_field',
'dtype' : 'real',
'shape' : 'scalar',
'region' : 'Omega',
'approx_order' : 2,
}
variable_1 = {
'name' : 't',
'kind' : 'unknown field',
'field' : 'a_harmonic_field',
'order' : 0,
}
variable_2 = {
'name' : 's',
'kind' : 'test field',
'field' : 'a_harmonic_field',
'dual' : 't',
}
region_1000 = {
'name' : 'Omega',
'select' : 'all',
}
region_1 = {
'name' : 'Centre',
'select' : 'vertices in (x < 1e-8) & (x > -1e-8) & (y < 1e-8) & (y > -1e-8)',
'kind' : 'vertex'
}
region_2 = {
'name' : 'Gamma',
'select' : 'vertices of surface',
'kind' : 'facet',
}
ebc_1 = {
'name' : 't_centre',
'region' : 'Centre',
'dofs' : {'t.0' : 1.0},
}
ebc_2 = {
'name' : 't_gamma',
'region' : 'Gamma',
'dofs' : {'t.0' : 0.0},
}
integral_1 = {
'name' : 'i',
'order' : 2,
}
equations = {
'Temperature' : """dw_laplace.i.Omega( coef.val, s, t ) = 0"""
}
solution = {
't' : '- 5.0 * (x - 0.5)',
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
}
from sfepy.base.testing import TestCommon
##
# 31.05.2007, c
class Test( TestCommon ):
##
# 30.05.2007, c
def from_conf( conf, options ):
from sfepy.applications import solve_pde
problem, state = solve_pde(conf, save_results=False)
test = Test(problem=problem, state=state, conf=conf, options=options)
return test
from_conf = staticmethod( from_conf )
##
# 31.05.2007, c
# 02.10.2007
def test_boundary_fluxes( self ):
from sfepy.discrete import Material
problem = self.problem
region_names = ['Gamma']
variables = problem.get_variables()
get_state = variables.get_state_part_view
state = self.state.copy(deep=True)
problem.time_update(ebcs={}, epbcs={})
## problem.save_ebc( 'aux.vtk' )
state.apply_ebc()
nls = problem.get_nls()
aux = nls.fun(state())
field = variables['t'].field
conf_m = problem.conf.get_item_by_name('materials', 'm')
m = Material.from_conf(conf_m, problem.functions)
ok = True
for ii, region_name in enumerate( region_names ):
flux_term = 'ev_surface_flux.1.%s( m.K, t )' % region_name
val1 = problem.evaluate(flux_term, t=variables['t'], m=m)
rvec = get_state( aux, 't', True )
reg = problem.domain.regions[region_name]
nods = field.get_dofs_in_region(reg, merge=True)
val2 = rvec[nods].sum() # Assume 1 dof per node.
eps = 1e-2
ok = ok and ((abs( val1 - val2 ) < eps))
self.report( '%d. %s: |%e - %e| = %e < %.2e'\
% (ii, region_name, val1, val2, abs( val1 - val2 ),
eps) )
return ok
| {
"repo_name": "sfepy/sfepy",
"path": "tests/test_laplace_unit_disk.py",
"copies": "2",
"size": "3356",
"license": "bsd-3-clause",
"hash": -5126609553571748000,
"line_mean": 20.7922077922,
"line_max": 81,
"alpha_frac": 0.4988081049,
"autogenerated": false,
"ratio": 2.8013355592654423,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43001436641654417,
"avg_score": null,
"num_lines": null
} |
# 310. Minimum Height Trees
# For an undirected graph with tree characteristics, we can choose any node as the root.
# The result graph is then a rooted tree. Among all possible rooted trees,
# those with minimum height are called minimum height trees (MHTs).
# Given such a graph, write a function to find all the MHTs and return a list of their root labels.
#
# Format
# The graph contains n nodes which are labeled from 0 to n - 1.
# You will be given the number n and a list of undirected edges (each edge is a pair of labels).
#
# You can assume that no duplicate edges will appear in edges.
# Since all edges are undirected, [0, 1] is the same as [1, 0] and thus will not appear together in edges.
#
# Example 1 :
#
# Input: n = 4, edges = [[1, 0], [1, 2], [1, 3]]
#
# 0
# |
# 1
# / \
# 2 3
#
# Output: [1]
#
# Example 2 :
#
# Input: n = 6, edges = [[0, 3], [1, 3], [2, 3], [4, 3], [5, 4]]
#
# 0 1 2
# \ | /
# 3
# |
# 4
# |
# 5
#
# Output: [3, 4]
#
# Note:
#
# According to the definition of tree on Wikipedia: “a tree is an undirected graph in which any two vertices are connected by exactly one path. In other words, any connected graph without simple cycles is a tree.”
# The height of a rooted tree is the number of edges on the longest downward path between the root and a leaf.
#
class Solution:
# https://leetcode.com/problems/minimum-height-trees/discuss/76055/Share-some-thoughts
def findMinHeightTrees(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[int]
"""
if n == 1:
return [0]
adj = [set() for _ in range(n)]
for i, j in edges:
adj[i].add(j)
adj[j].add(i)
leaves = [i for i in range(n) if len(adj[i]) == 1]
while n > 2:
n -= len(leaves)
newLeaves = []
for i in leaves:
j = adj[i].pop()
adj[j].remove(i)
if len(adj[j]) == 1: newLeaves.append(j)
leaves = newLeaves
return leaves
print(Solution().findMinHeightTrees(4, [[1, 0], [1, 2], [1, 3]]))
print(Solution().findMinHeightTrees(6, [[0, 3], [1, 3], [2, 3], [4, 3], [5, 4]]))
| {
"repo_name": "gengwg/leetcode",
"path": "310_minimum_height_trees.py",
"copies": "1",
"size": "2312",
"license": "apache-2.0",
"hash": -7914796982830569000,
"line_mean": 28.974025974,
"line_max": 217,
"alpha_frac": 0.5619584055,
"autogenerated": false,
"ratio": 3.1878453038674035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42498037093674035,
"avg_score": null,
"num_lines": null
} |
# 3/13/2016 copy from tcor_030525.py
# cd /Volumes/Transcend/SCAN_PROGRAM3
# python
import os
import sys
import cv2
import numpy as np
fscene=os.getcwd()
os.chdir('../Utility')
cwd=os.getcwd()
sys.path.append(cwd)
import rtc_util as ut
import tcor_util as tc
os.chdir(fscene)
#----------------------------
# Initialize
#----------------------------
print "#### Initialize ####"
# parameter input
fold=sys.argv[1]
subf,tdec=fold.split('_')
depth=float(subf[3:5])/100
nmax=int(subf[6:])
nstr=int(sys.argv[2])
nend=int(sys.argv[3])
dec=float(tdec)/10.0
model=subf[5]
print 'depth:',depth
print 'dec:',dec
print 'nmax:',nmax
print nstr,'-',nend
f=open(fold +'/aparm.txt')
text=f.readlines()
f.close()
el=tc.read_parm(text,'el',1)[0]
az=tc.read_parm(text,'az',1)[0]
nband=int(tc.read_parm(text,'nband',1)[0])
offset=tc.read_parm(text,'offset',nband)
gain=tc.read_parm(text,'gain',nband)
if fscene.find('OLI') == -1:
penv=tc.read_parm(text,'penv',3)
else:
penv=tc.read_parm(text,'penv',4)
depth=tc.read_parm(text,'depth',1)[0]
wsize=tc.read_parm(text,'wsize',2)
wsize=[int(x) for x in wsize]
dec=tc.read_parm(text,'dec',1)[0]
twid=tc.read_parm(text,'twid',15)
cls_name=text[-1][:-1]
fun_name=text[-2][:-1]
ut.r_set0=float(text[-3][:-1])
ntau,nhigh,nsang=text[-4].split()
ut.t_set=np.arange(int(ntau))*0.2
print cls_name
print fun_name
print ut.r_set0
print ut.t_set
#------------------------------
# DEM Input and INC Calculation
#------------------------------
# dem input and calc inc
os.chdir('DATA')
dem=cv2.imread('dem.tif',-1)
dem[dem < 0.0]=0.0
#dem=tc.read_tif(fold+'/'+'dem.tif')
tc.jmax,tc.imax=dem.shape
inc=tc.incident(dem,el,az,30.0,30.0)
slp=tc.slope(dem,30.0,30.0)
#s_ang=np.load('sangle.npy')
#veg=np.ones(tc.imax*tc.jmax,dtype=np.uint16).reshape(tc.jmax,tc.imax)
#------------------------------
# Main Processing
#------------------------------
if fscene.find('OLI') == -1:
b_list=[1,2,3]
else:
b_list=[1,2,3,4]
for band in b_list:
print "#### Processing of Band"+str(band)+" ####"
# tau-roh function for every pixel
ut.cosb0=np.cos((90.0-el)*np.pi/180); ut.cosb0
print "--- function list ---"
f_list=np.load(fun_name+'_'+str(band)+'.npy')
cls=np.load(cls_name+'.npy')
os.chdir('../'+fold)
tau=depth*np.ones(tc.imax*tc.jmax).reshape(tc.jmax,tc.imax)
eref=penv[0]*np.ones(tc.imax*tc.jmax).reshape(tc.jmax,tc.imax)
for n in range(nstr):
print "--- "+str(n)+ " iteration ---"
t_ref=np.load('ref'+str(band)+str(n)+'.npy')
temp=np.where(t_ref==1.0)
cls[temp]=-1
eref=(1.0-dec)*eref+dec*tc.xmedian(t_ref,wsize[0])
t_taux=np.load('tau'+str(band)+str(n)+'.npy')
temp=np.where(t_taux==1.8)
cls[temp]=-1
tau=(1.0-dec)*tau+dec*tc.ymedian(t_taux,cls,wsize[1],twid[n])
temp=np.where(cls==-1)
print 100.0*len(temp[0])/float(tc.imax*tc.jmax)
cls=tc.mclass(256*inc,128*slp,256*256*t_ref,nmax)
np.save('cls'+str(band),cls)
iters=np.arange(nend-nstr)+nstr
for iter in iters:
print "--- "+str(iter)+ " iteration ---"
print " > reflectance "
ref=ut.mk_ref(tc.jmax,tc.imax,f_list,tau,eref)
temp=np.where(ref==1.0)
cls[temp]=-1
if model == 'M' : cref=tc.aestm(ref,cls)
elif model == 'P' : cref=tc.aesth(ref,20,cls)
else : cref=tc.aest(ref,cls)
print " > aerosol "
eref=(1.0-dec)*eref+dec*tc.xmedian(ref,wsize[0])
taux=ut.mk_tau(tc.jmax,tc.imax,f_list,eref,cref)
temp=np.where(taux==1.8)
cls[temp]=-1
print " > median filter "
tau=(1.0-dec)*tau+dec*tc.ymedian(taux,cls,wsize[1],twid[iter])
temp=np.where(cls==-1) ; print 100.0*len(temp[0])/float(tc.imax*tc.jmax)
np.save('tau'+str(band)+str(iter),taux)
np.save('ref'+str(band)+str(iter),ref)
#temp=np.where(cls == -1)
#np.save('cls'+str(band)+str(iter)+'x',temp)
os.chdir('../DATA')
exit()
| {
"repo_name": "y-iikura/AtmosphericCorrection",
"path": "PostProcessing/post_reclss.py",
"copies": "1",
"size": "3811",
"license": "mit",
"hash": 8197105201693837000,
"line_mean": 26.615942029,
"line_max": 76,
"alpha_frac": 0.607189714,
"autogenerated": false,
"ratio": 2.3308868501529054,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34380765641529054,
"avg_score": null,
"num_lines": null
} |
# 313. Super Ugly Number
# Write a program to find the nth super ugly number.
#
# Super ugly numbers are positive numbers whose all prime factors are in the given prime list primes of size k.
#
# Example:
#
# Input: n = 12, primes = [2,7,13,19]
# Output: 32
# Explanation: [1,2,4,7,8,13,14,16,19,26,28,32] is the sequence of the first 12
# super ugly numbers given primes = [2,7,13,19] of size 4.
#
# Note:
#
# 1 is a super ugly number for any given primes.
# The given numbers in primes are in ascending order.
# 0 < k ≤ 100, 0 < n ≤ 106, 0 < primes[i] < 1000.
# The nth super ugly number is guaranteed to fit in a 32-bit signed integer.
#
class Solution:
# https://leetcode.com/problems/super-ugly-number/discuss/76291/Java-three-methods-23ms-36-ms-58ms(with-heap)-performance-explained
# Basic idea is same as ugly number II, new ugly number is generated by multiplying a prime
# with previous generated ugly number. One catch is need to remove duplicate
def nthSuperUglyNumber(self, n, primes):
"""
:type n: int
:type primes: List[int]
:rtype: int
"""
import sys
ugly = [0] * n
idx = [0] * len(primes)
ugly[0] = 1
for i in range(1, n):
# find next
# ugly[i] = sys.maxint # py2
ugly[i] = sys.maxsize
for j in range(0, len(primes)):
ugly[i] = min(ugly[i], primes[j] * ugly[idx[j]])
# slip duplicate
for j in range(0, len(primes)):
while primes[j] * ugly[idx[j]] <= ugly[i]:
idx[j] += 1
return ugly[n - 1]
sol = Solution()
print(sol.nthSuperUglyNumber(12, [2,7,13,19]))
| {
"repo_name": "gengwg/leetcode",
"path": "313_super_ugly_number.py",
"copies": "1",
"size": "1740",
"license": "apache-2.0",
"hash": 3546217102709732400,
"line_mean": 31.1481481481,
"line_max": 135,
"alpha_frac": 0.5852534562,
"autogenerated": false,
"ratio": 3.1853211009174314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42705745571174314,
"avg_score": null,
"num_lines": null
} |
# 314. Binary Tree Vertical Order Traversal
# Given a binary tree, return the vertical order traversal of its nodes' values.
# (ie, from top to bottom, column by column).
# If two nodes are in the same row and column, the order should be from left to right.
# Examples:
#
# Given binary tree [3,9,20,null,null,15,7],
#
# 3
# /\
# / \
# 9 20
# /\
# / \
# 15 7
#
# return its vertical order traversal as:
#
# [
# [9],
# [3,15],
# [20],
# [7]
# ]
#
# Given binary tree [3,9,8,4,0,1,7],
#
# 3
# /\
# / \
# 9 8
# /\ /\
# / \/ \
# 4 01 7
#
# return its vertical order traversal as:
#
# [
# [4],
# [9],
# [3,0,1],
# [8],
# [7]
# ]
#
# Given binary tree [3,9,8,4,0,1,7,null,null,null,2,5] (0's right child is 2 and 1's left child is 5),
#
# 3
# /\
# / \
# 9 8
# /\ /\
# / \/ \
# 4 01 7
# /\
# / \
# 5 2
#
# return its vertical order traversal as:
#
# [
# [4],
# [9,5],
# [3,0,1],
# [8,2],
# [7]
# ]
#
# https://www.geeksforgeeks.org/print-binary-tree-vertical-order/
# The idea is to traverse the tree once and get the minimum and maximum horizontal distance with respect to root.
# For the tree shown above, minimum distance is -2 (for node with value 4) and maximum distance is 3 (For node with value 9).
# Once we have maximum and minimum distances from root, we iterate for each vertical line at distance minimum to maximum from root,
# and for each vertical line traverse the tree and print the nodes which lie on that vertical line.
class Node:
def __init__(self, key):
self.data = key
self.left = None
self.right = None
class Solution:
# A utility function to find min and max distances with
# respect to root
def findMinMax(self, node, minimum, maximum, hd):
if node is None:
return
if hd < minimum[0]:
minimum[0] = hd
elif hd > maximum[0]:
maximum[0] = hd
# recur for left and right subtrees
self.findMinMax(node.left, minimum, maximum, hd-1)
self.findMinMax(node.right, minimum, maximum, hd+1)
# A utility function to print all nodes on a given line_no
# hd is horizontal distance of current node with respect to root
def printVerticalLine(self, node, line_no, hd):
if node is None:
return
if hd == line_no:
print node.data,
self.printVerticalLine(node.left, line_no, hd-1)
self.printVerticalLine(node.right, line_no, hd+1)
def verticalOrder(self, root):
minimum = [0]
maximum = [0]
self.findMinMax(root, minimum, maximum, 0)
print minimum[0], maximum[0]
for line_no in range(minimum[0], maximum[0]+1):
self.printVerticalLine(root, line_no, 0)
print
if __name__ == '__main__':
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
root.right.left = Node(6)
root.right.right = Node(7)
root.right.left.right = Node(8)
root.right.right.right = Node(9)
print "Vertical order traversal is"
Solution().verticalOrder(root)
| {
"repo_name": "gengwg/leetcode",
"path": "314_binary_tree_vertical_order_traversal.py",
"copies": "1",
"size": "3410",
"license": "apache-2.0",
"hash": 1164314591294910200,
"line_mean": 23.8905109489,
"line_max": 131,
"alpha_frac": 0.5451612903,
"autogenerated": false,
"ratio": 3.178005591798695,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4223166882098695,
"avg_score": null,
"num_lines": null
} |
# 318. Maximum Product of Word Lengths
# Given a string array words, find the maximum value of length(word[i]) * length(word[j])
# where the two words do not share common letters.
# You may assume that each word will contain only lower case letters.
# If no such two words exist, return 0.
# Example 1:
# Input: ["abcw","baz","foo","bar","xtfn","abcdef"]
# Output: 16
# Explanation: The two words can be "abcw", "xtfn".
# Example 2:
# Input: ["a","ab","abc","d","cd","bcd","abcd"]
# Output: 4
# Explanation: The two words can be "ab", "cd".
# Example 3:
# Input: ["a","aa","aaa","aaaa"]
# Output: 0
# Explanation: No such pair of words.
class Solution(object):
# https://www.hrwhisper.me/leetcode-maximum-product-of-word-lengths/
# nums[i] |= 1 << (c - 'a') is using bit to represent if a char exists in the word.
# For example, if a and b exists, it will be 11 (binary).
# if((nums[i] & nums[j]) == 0 is to check if the two words have common chars.
def maxProduct(self, words):
"""
:type words: List[str]
:rtype: int
"""
n = len(words)
elements = [0] * n
for i, s in enumerate(words):
for c in s:
elements[i] |= 1 << (ord(c) - 97)
ans = 0
for i in range(n):
for j in range(i+1, n):
if not (elements[i] & elements[j]):
ans = max(ans, len(words[i]) * len(words[j]))
return ans
s = Solution()
print(s.maxProduct(["abcw", "baz", "foo", "bar", "xtfn", "abcdef"]))
| {
"repo_name": "gengwg/leetcode",
"path": "318_Maximum_Product_of_Word_Lengths.py",
"copies": "1",
"size": "1557",
"license": "apache-2.0",
"hash": 8721006329104645000,
"line_mean": 27.3090909091,
"line_max": 90,
"alpha_frac": 0.5651894669,
"autogenerated": false,
"ratio": 3.1905737704918034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42557632373918036,
"avg_score": null,
"num_lines": null
} |
## 3.1 A First Individual
import random
from deap import base
from deap import creator
from deap import tools
IND_SIZE = 5
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("attr_float", random.random)
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_float, n=IND_SIZE)
ind1 = toolbox.individual()
print ind1 # [0.86..., 0.27..., 0.70..., 0.03..., 0.87...]
print ind1.fitness.valid # False
## 3.2 Evaluation
def evaluate(individual):
# Do some hard computing on the individual
a = sum(individual)
b = len(individual)
return a, 1. / b
ind1.fitness.values = evaluate(ind1)
print ind1.fitness.valid # True
print ind1.fitness # (2.73, 0.2)
## 3.3 Mutation
mutant = toolbox.clone(ind1)
ind2, = tools.mutGaussian(mutant, mu=0.0, sigma=0.2, indpb=0.2)
del mutant.fitness.values
print ind2 is mutant # True
print mutant is ind1 # False
## 3.4 Crossover
child1, child2 = [toolbox.clone(ind) for ind in (ind1, ind2)]
tools.cxBlend(child1, child2, 0.5)
del child1.fitness.values
del child2.fitness.values
## 3.5 Selection
selected = tools.selBest([child1, child2], 2)
print child1 in selected # True
## 3.5 Note
LAMBDA = 10
toolbox.register("select", tools.selRandom)
population = [ind1, ind2] * 10
selected = toolbox.select(population, LAMBDA)
offspring = [toolbox.clone(ind) for ind in selected]
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "docs/code/tutorials/part_3/3_next_step.py",
"copies": "2",
"size": "1495",
"license": "mit",
"hash": 6748982779075502000,
"line_mean": 24.3389830508,
"line_max": 68,
"alpha_frac": 0.7070234114,
"autogenerated": false,
"ratio": 2.8207547169811322,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4527778128381132,
"avg_score": null,
"num_lines": null
} |
31#Copyright (c) 2007-8, Playful Invention Company.
#Copyright (c) 2008-11, Walter Bender
#Copyright (c) 2011 Collabora Ltd. <http://www.collabora.co.uk/>
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import gtk
from math import pi
import os
import pango
import cairo
import pangocairo
from tautils import get_path
from taconstants import COLORDICT, TMP_SVG_PATH
def wrap100(n):
''' A variant on mod... 101 -> 99; 199 -> 1 '''
n = int(n)
n %= 200
if n > 99:
n = 199 - n
return n
def calc_shade(c, s, invert=False):
''' Convert a color to the current shade (lightness/darkness). '''
# Assumes 16 bit input values
if invert:
if s == -1:
return int(c)
elif s < 0:
return int(c / (1 + s))
return int((c - 65536 * s) / (1 - s))
else:
if s < 0:
return int(c * (1 + s))
return int(c + (65536 - c) * s)
def calc_gray(c, g, invert=False):
''' Gray is a psuedo saturation calculation. '''
# Assumes 16 bit input values
if g == 100:
return int(c)
if invert:
if g == 0:
return int(c)
else:
return int(((c * 100) - (32768 * (100 - g))) / g)
else:
return int(((c * g) + (32768 * (100 - g))) / 100)
colors = {}
DEGTOR = pi / 180.
RTODEG = 180. / pi
COLOR_TABLE = (
0xFF0000, 0xFF0D00, 0xFF1A00, 0xFF2600, 0xFF3300,
0xFF4000, 0xFF4D00, 0xFF5900, 0xFF6600, 0xFF7300,
0xFF8000, 0xFF8C00, 0xFF9900, 0xFFA600, 0xFFB300,
0xFFBF00, 0xFFCC00, 0xFFD900, 0xFFE600, 0xFFF200,
0xFFFF00, 0xE6FF00, 0xCCFF00, 0xB3FF00, 0x99FF00,
0x80FF00, 0x66FF00, 0x4DFF00, 0x33FF00, 0x1AFF00,
0x00FF00, 0x00FF0D, 0x00FF1A, 0x00FF26, 0x00FF33,
0x00FF40, 0x00FF4D, 0x00FF59, 0x00FF66, 0x00FF73,
0x00FF80, 0x00FF8C, 0x00FF99, 0x00FFA6, 0x00FFB3,
0x00FFBF, 0x00FFCC, 0x00FFD9, 0x00FFE6, 0x00FFF2,
0x00FFFF, 0x00F2FF, 0x00E6FF, 0x00D9FF, 0x00CCFF,
0x00BFFF, 0x00B3FF, 0x00A6FF, 0x0099FF, 0x008CFF,
0x0080FF, 0x0073FF, 0x0066FF, 0x0059FF, 0x004DFF,
0x0040FF, 0x0033FF, 0x0026FF, 0x001AFF, 0x000DFF,
0x0000FF, 0x0D00FF, 0x1A00FF, 0x2600FF, 0x3300FF,
0x4000FF, 0x4D00FF, 0x5900FF, 0x6600FF, 0x7300FF,
0x8000FF, 0x8C00FF, 0x9900FF, 0xA600FF, 0xB300FF,
0xBF00FF, 0xCC00FF, 0xD900FF, 0xE600FF, 0xF200FF,
0xFF00FF, 0xFF00E6, 0xFF00CC, 0xFF00B3, 0xFF0099,
0xFF0080, 0xFF0066, 0xFF004D, 0xFF0033, 0xFF001A)
class TurtleGraphics:
''' A class for the Turtle graphics canvas '''
def __init__(self, turtle_window, width, height):
''' Create a sprite to hold the canvas. '''
self.turtle_window = turtle_window
self.width = width
self.height = height
self.textsize = 48
self._fgrgb = [255, 0, 0]
self._bgrgb = [255, 248, 222]
self._shade = 0
self._color = 0
self._gray = 100
self.cr_svg = None # Surface used for saving to SVG
# Build a cairo.Context from a cairo.XlibSurface
self.canvas = cairo.Context(self.turtle_window.turtle_canvas)
cr = gtk.gdk.CairoContext(self.canvas)
cr.set_line_cap(1) # Set the line cap to be round
self.set_pen_size(5)
def setup_svg_surface(self):
''' Set up a surface for saving to SVG '''
if self.turtle_window.running_sugar:
svg_surface = cairo.SVGSurface(
os.path.join(get_path(self.turtle_window.activity, 'instance'),
'output.svg'), self.width, self.height)
else:
svg_surface = cairo.SVGSurface(
TMP_SVG_PATH, self.width, self.height)
self.cr_svg = cairo.Context(svg_surface)
self.cr_svg.set_line_cap(1) # Set the line cap to be round
def fill_polygon(self, poly_points):
''' Draw the polygon... '''
def _fill_polygon(cr, poly_points):
cr.new_path()
for i, p in enumerate(poly_points):
if p[0] == 'move':
cr.move_to(p[1], p[2])
elif p[0] == 'rarc':
cr.arc(p[1], p[2], p[3], p[4], p[5])
elif p[0] == 'larc':
cr.arc_negative(p[1], p[2], p[3], p[4], p[5])
else: # line
cr.line_to(p[1], p[2])
cr.close_path()
cr.fill()
_fill_polygon(self.canvas, poly_points)
self.inval()
if self.cr_svg is not None:
_fill_polygon(self.cr_svg, poly_points)
def clearscreen(self, share=True):
'''Clear the canvas and reset most graphics attributes to defaults.'''
def _clearscreen(cr):
cr.move_to(0, 0)
self._bgrgb = [255, 248, 222]
cr.set_source_rgb(self._bgrgb[0] / 255.,
self._bgrgb[1] / 255.,
self._bgrgb[2] / 255.)
cr.rectangle(0, 0, self.width * 2, self.height * 2)
cr.fill()
_clearscreen(self.canvas)
self.inval()
if self.cr_svg is not None:
_clearscreen(self.cr_svg)
def rarc(self, x, y, r, a, heading):
''' draw a clockwise arc '''
def _rarc(cr, x, y, r, a, h):
cr.arc(x, y, r, (h - 180) * DEGTOR, (h - 180 + a) * DEGTOR)
cr.stroke()
_rarc(self.canvas, x, y, r, a, heading)
self.inval()
if self.cr_svg is not None:
_rarc(self.cr_svg, x, y, r, a, heading)
def larc(self, x, y, r, a, heading):
''' draw a counter-clockwise arc '''
def _larc(cr, x, y, r, a, h):
cr.arc_negative(x, y, r, h * DEGTOR, (h - a) * DEGTOR)
cr.stroke()
_larc(self.canvas, x, y, r, a, heading)
self.inval()
if self.cr_svg is not None:
_larc(self.cr_svg, x, y, r, a, heading)
def set_pen_size(self, pen_size):
''' Set the pen size '''
self.canvas.set_line_width(pen_size)
if self.cr_svg is not None:
self.cr_svg.set_line_width(pen_size)
def fillscreen(self, c, s):
''' Deprecated method: Fill screen with color/shade '''
self.fillscreen_with_gray(c, s, self._gray)
def fillscreen_with_gray(self, color, shade, gray):
''' Fill screen with color/shade/gray and reset to defaults '''
save_rgb = self._fgrgb[:]
# Special case for color blocks
if color in COLORDICT:
if COLORDICT[color][0] is None:
self._shade = COLORDICT[color][1]
else:
self._color = COLORDICT[color][0]
else:
self._color = color
if shade in COLORDICT:
self._shade = COLORDICT[shade][1]
else:
self._shade = shade
if gray in COLORDICT:
self._gray = COLORDICT[gray][2]
else:
self._gray = gray
if self._gray < 0:
self._gray = 0
if self._gray > 100:
self._gray = 100
self.set_fgcolor(shade=self._shade, gray=self._gray, color=self._color)
self._bgrgb = self._fgrgb[:]
def _fillscreen(cr, rgb, w, h):
cr.set_source_rgb(rgb[0] / 255., rgb[1] / 255., rgb[2] / 255.)
cr.rectangle(0, 0, w * 2, h * 2)
cr.fill()
_fillscreen(self.canvas, self._fgrgb, self.width, self.height)
self.inval()
if self.cr_svg is not None:
_fillscreen(self.cr_svg, self._fgrgb, self.width, self.height)
self._fgrgb = save_rgb[:]
def set_fgcolor(self, shade=None, gray=None, color=None):
''' Set the foreground color '''
if shade is not None:
self._shade = shade
if gray is not None:
self._gray = gray
if color is not None:
self._color = color
sh = (wrap100(self._shade) - 50) / 50.0
rgb = COLOR_TABLE[wrap100(self._color)]
r = (rgb >> 8) & 0xff00
r = calc_gray(r, self._gray)
r = calc_shade(r, sh)
g = rgb & 0xff00
g = calc_gray(g, self._gray)
g = calc_shade(g, sh)
b = (rgb << 8) & 0xff00
b = calc_gray(b, self._gray)
b = calc_shade(b, sh)
self._fgrgb = [r >> 8, g >> 8, b >> 8]
def draw_surface(self, surface, x, y, w, h):
''' Draw a surface '''
def _draw_surface(cr, surface, x, y, w, h):
cc = gtk.gdk.CairoContext(cr)
cc.set_source_surface(surface, x, y)
cc.rectangle(x, y, w, h)
cc.fill()
_draw_surface(self.canvas, surface, x, y, w, h)
self.inval()
if self.cr_svg is not None:
_draw_surface(self.cr_svg, surface, x, y, w, h)
def draw_pixbuf(self, pixbuf, a, b, x, y, w, h, heading):
''' Draw a pixbuf '''
def _draw_pixbuf(cr, pixbuf, a, b, x, y, w, h, heading):
# Build a gtk.gdk.CairoContext from a cairo.Context to access
# the set_source_pixbuf attribute.
cc = gtk.gdk.CairoContext(cr)
cc.save()
# center the rotation on the center of the image
cc.translate(x + w / 2., y + h / 2.)
cc.rotate(heading * DEGTOR)
cc.translate(-x - w / 2., -y - h / 2.)
cc.set_source_pixbuf(pixbuf, x, y)
cc.rectangle(x, y, w, h)
cc.fill()
cc.restore()
_draw_pixbuf(self.canvas, pixbuf, a, b, x, y, w, h, heading)
self.inval()
if self.cr_svg is not None:
_draw_pixbuf(self.cr_svg, pixbuf, a, b, x, y, w, h, heading)
def draw_text(self, label, x, y, size, width, heading, scale):
''' Draw text '''
def _draw_text(cr, label, x, y, size, width, scale, heading, rgb):
cc = pangocairo.CairoContext(cr)
pl = cc.create_layout()
fd = pango.FontDescription('Sans')
fd.set_size(int(size * scale) * pango.SCALE)
pl.set_font_description(fd)
if isinstance(label, (str, unicode)):
pl.set_text(label.replace('\0', ' '))
elif isinstance(label, (float, int)):
pl.set_text(str(label))
else:
pl.set_text(str(label))
pl.set_width(int(width) * pango.SCALE)
cc.save()
cc.translate(x, y)
cc.rotate(heading * DEGTOR)
cr.set_source_rgb(rgb[0] / 255., rgb[1] / 255., rgb[2] / 255.)
cc.update_layout(pl)
cc.show_layout(pl)
cc.restore()
width *= scale
_draw_text(self.canvas, label, x, y, size, width, scale, heading,
self._fgrgb)
self.inval()
if self.cr_svg is not None: # and self.pendown:
_draw_text(self.cr_svg, label, x, y, size, width, scale, heading,
self._fgrgb)
def set_source_rgb(self):
r = self._fgrgb[0] / 255.
g = self._fgrgb[1] / 255.
b = self._fgrgb[2] / 255.
self.canvas.set_source_rgb(r, g, b)
if self.cr_svg is not None:
self.cr_svg.set_source_rgb(r, g, b)
def draw_line(self, x1, y1, x2, y2):
''' Draw a line '''
def _draw_line(cr, x1, y1, x2, y2):
cr.move_to(x1, y1)
cr.line_to(x2, y2)
cr.stroke()
_draw_line(self.canvas, x1, y1, x2, y2)
if self.cr_svg is not None:
_draw_line(self.cr_svg, x1, y1, x2, y2)
self.inval()
def get_color_index(self, r, g, b, a=0):
''' Find the closest palette entry to the rgb triplet '''
if self._shade != 50 or self._gray != 100:
r <<= 8
g <<= 8
b <<= 8
if self._shade != 50:
sh = (wrap100(self._shade) - 50) / 50.
r = calc_shade(r, sh, True)
g = calc_shade(g, sh, True)
b = calc_shade(b, sh, True)
if self._gray != 100:
r = calc_gray(r, self._gray, True)
g = calc_gray(g, self._gray, True)
b = calc_gray(b, self._gray, True)
r >>= 8
g >>= 8
b >>= 8
min_distance = 1000000
closest_color = -1
for i, c in enumerate(COLOR_TABLE):
cr = int((c & 0xff0000) >> 16)
cg = int((c & 0x00ff00) >> 8)
cb = int((c & 0x0000ff))
distance_squared = \
((cr - r) ** 2) + ((cg - g) ** 2) + ((cb - b) ** 2)
if distance_squared == 0:
return i
if distance_squared < min_distance:
min_distance = distance_squared
closest_color = i
return closest_color
def get_pixel(self, x, y):
''' Read the pixel at x, y '''
if self.turtle_window.interactive_mode:
x = int(x)
y = int(y)
w = self.turtle_window.turtle_canvas.get_width()
h = self.turtle_window.turtle_canvas.get_height()
if x < 0 or x > (w - 1) or y < 0 or y > (h - 1):
return(-1, -1, -1, -1)
# create a new 1x1 cairo surface
cs = cairo.ImageSurface(cairo.FORMAT_RGB24, 1, 1)
cr = cairo.Context(cs)
cr.set_source_surface(self.turtle_window.turtle_canvas, -x, -y)
cr.rectangle(0, 0, 1, 1)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.fill()
cs.flush() # ensure all writing is done
pixels = cs.get_data() # Read the pixel
return (ord(pixels[2]), ord(pixels[1]), ord(pixels[0]), 0)
else:
return(-1, -1, -1, -1)
def svg_close(self):
''' Close current SVG graphic '''
self.cr_svg.show_page()
def svg_reset(self):
''' Reset svg flags '''
self.cr_svg = None
def inval(self):
''' Invalidate a region for gtk '''
self.turtle_window.inval_all()
| {
"repo_name": "walterbender/AmazonasTortuga",
"path": "TurtleArt/tacanvas.py",
"copies": "2",
"size": "14965",
"license": "mit",
"hash": -1116941944366732400,
"line_mean": 34.4620853081,
"line_max": 79,
"alpha_frac": 0.5347143334,
"autogenerated": false,
"ratio": 3.171894870707927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4706609204107927,
"avg_score": null,
"num_lines": null
} |
#31-Game
#PythonLab
import random as R
def createDeck():
return [i for i in range(1,15)]
deck = [ createDeck(),createDeck(),
createDeck(),createDeck() ]
Exit = False
while not Exit:
cards = []
mother = []
s = 0
choice = raw_input("Do you want to play? ")
if choice == 'yes' or choice == 'y':
firstCard = R.randint(1,14)
secondCard = R.randint(1,14)
cards.append(firstCard)
cards.append(secondCard)
mCard = R.randint(1,14)
mSCard = R.randint(1,14)
mother.append(firstCard)
mother.append(secondCard)
ms = sum(mother)
s = sum(cards)
print "Current sum: ",s
while True:
choice = raw_input("Another card?y/n: ")
if choice == 'y':
cards.append(R.randint(1,14))
s += sum(cards)
if s > 31:
print "You lost with ",s
break
else:
print "Your current number: ",sum(cards)
else:
print "You hand: ",sum(cards)
while True:
mother.append(R.randint(1,14))
ms += sum(mother)
if ms == 31 or (ms < 31 and (ms > s and s <= 31)):
print "Mother Wins"
print "Mother had ",ms
break
else:
print "You Won!"
print "Mother had ",ms
break
break
else:
print "There's the door!"
| {
"repo_name": "georgezafiris/python-lab",
"path": "cs2_thirtyone_game.py",
"copies": "1",
"size": "1729",
"license": "mit",
"hash": -8573842265179196000,
"line_mean": 24.8059701493,
"line_max": 70,
"alpha_frac": 0.4117987276,
"autogenerated": false,
"ratio": 4.227383863080685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5139182590680684,
"avg_score": null,
"num_lines": null
} |
# #3.1
# names=['zhang dongzhou','jiang miaoshan','zhang qiyang','zhang yifeng','zhang tong']
# def T(a):
# return str(a.title())
# message=T(names[0])+"\tand\t"+T(names[-4])+"\tis\t"+T(names[2])+","+T(names[3])+"\tand\t"+T(names[-1])+"'s parents!"
# print("It is the most important that\n",message)
# #3.2[列表元素的添加,修改,删除(del,在删除这个元素后不在使用这个元素,则用del)]
# families=['zhang dongzhou','jiang miaoshan','zhang yi','sssss']
# del families[-1]#(可以尝试将此行代码-
# families[-1]='zhang yifeng'#-
# families.append('zhang tong')# -
# families.insert(2,'zhang qiyang')#-此行代码打乱顺序 会有什么变化)
# print(families)
# #pop删除[如果在删除一个列表元素之后还要用到这个元素则用pop()删除].remove()删除也可以和pop一样再次使用已删除的元素
# f=['a','b','c','d']
# popped_f=f.pop(0)
# popped_F=f.pop()#pop后括号中不添加数字默认最后一位
# the_third_alphabet='c'#先将一个元素赋值给一个名称
# f.remove(the_third_alphabet)#用remove删除这个元素之后,
# print(the_third_alphabet)#可以输出改名称来使该元素重现
# print(f)
# print(popped_f)
# print(popped_F)
# print("The third alphabet is "+the_third_alphabet.upper()+".")
# message="The\tfirst\tEnglish\talphabet\tis\t"+popped_f.upper()+",\nthe\tsecond\talphabet\tis\t"+f[0].upper()+",\nthe\tforth\tis\t"+popped_F.upper()+"."
# print(message)
# msg= 'sahdhsahdias,{},dhasidhiaoshdioas,{},dasdhasdhasi'.format('88888','0000')
# print(msg)
# #3.3
| {
"repo_name": "Tiger-C/python",
"path": "3/3.py",
"copies": "1",
"size": "1562",
"license": "mit",
"hash": -8494961630740106000,
"line_mean": 29.8780487805,
"line_max": 153,
"alpha_frac": 0.6753554502,
"autogenerated": false,
"ratio": 1.8059914407988589,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.29813468909988594,
"avg_score": null,
"num_lines": null
} |
# 320. Generalized Abbreviation
# Write a function to generate the generalized abbreviations of a word.
# Example:
# Given word = "word", return the following list (order does not matter):
# ["word", "1ord", "w1rd", "wo1d", "wor1", "2rd", "w2d", "wo2", "1o1d", "1or1", "w1r1", "1o2", "2r1", "3d", "w3", "4"]
class Solution(object):
def generateAbbreviations(self, word):
"""
:type word: str
:rtype: List[str]
"""
def dfs(word, i, cur, res):
if i == len(word):
res.append(''.join(cur))
return
cur.append(word[i])
dfs(word, i+1, cur, res)
cur.pop()
if not cur or not cur[-1][-1].isdigit():
for l in range(1, len(word)-i+1):
cur.append(str(l))
dfs(word, i+l, cur, res)
cur.pop()
res, cur = [], []
dfs(word, 0, cur, res)
return res
# https://github.com/jzysheep/LeetCode/blob/master/320.%20Generalized%20Abbreviation.cpp
# https://gengwg.blogspot.com/2018/06/leetcode-320-generalized-abbreviation.html
def generateAbbreviations(self, word):
"""
:type word: str
:rtype: List[str]
time O(2^n)
pos: points to the current character
cur: current string formed
count: how many letters are abbreviated in the current streak
At each step:
Abbreviate the current letter
Keep the current letter and summarize the abbreviation in the current streak
"""
def dfs(res, word, i, cur, count):
if i == len(word):
if count > 0:
cur += str(count)
res.append(cur)
return
# abbreviate this letter
dfs(res, word, i+1, cur, count+1)
# keeep this letter, summarize the abbreviation in the current streak
if count > 0:
cur += str(count)
cur += word[i]
dfs(res, word, i+1, cur, 0)
res = []
dfs(res, word, 0, '', 0)
return res
print Solution().generateAbbreviations('word')
| {
"repo_name": "gengwg/leetcode",
"path": "320_generalized_abbreviation.py",
"copies": "1",
"size": "2200",
"license": "apache-2.0",
"hash": 2368717046090383000,
"line_mean": 30.4285714286,
"line_max": 118,
"alpha_frac": 0.5181818182,
"autogenerated": false,
"ratio": 3.536977491961415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9549937093440228,
"avg_score": 0.0010444433442373994,
"num_lines": 70
} |
# 322. Coin Change
# You are given coins of different denominations and a total amount of money amount. Write a function to compute the fewest number of coins that you need to make up that amount. If that amount of money cannot be made up by any combination of the coins, return -1.
# Example 1:
# coins = [1, 2, 5], amount = 11
# return 3 (11 = 5 + 5 + 1)
# Example 2:
# coins = [2], amount = 3
# return -1.
# Note:
# You may assume that you have an infinite number of each kind of coin.
class Solution(object):
# https://gengwg.blogspot.com/2018/03/leetcode-322-coin-change.html
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
dp = [0] + [-1] * amount
for i in range(1, amount + 1):
# set min to a large impossible value
# note it must be inside the loop
mincoins = i + 1
for j in [c for c in coins if c <= i]: # only possible if coin value smaller than i
if dp[i - j] != -1: # if i-j is impossible, i is also impossible.
mincoins = min(mincoins, dp[i - j] + 1)
# if min coints is impossible set it to -1
dp[i] = -1 if mincoins == i + 1 else mincoins
return dp[amount]
if __name__ == '__main__':
s = Solution()
print s.coinChange([1], 1) # 1
print s.coinChange([1, 2, 5], 11) # 3
print s.coinChange([1, 5, 10, 25], 63) # 6
print s.coinChange([1, 5, 10, 21, 25], 63) # 3
| {
"repo_name": "gengwg/leetcode",
"path": "322_coin_change.py",
"copies": "1",
"size": "1535",
"license": "apache-2.0",
"hash": 5479912583752918000,
"line_mean": 36.4390243902,
"line_max": 264,
"alpha_frac": 0.5732899023,
"autogenerated": false,
"ratio": 3.238396624472574,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9308499812622908,
"avg_score": 0.0006373428299331751,
"num_lines": 41
} |
# 323 Number of Connected Components in an Undirected Graph
# Given n nodes labeled from 0 to n-1 and a list of undirected edges (each edge is a pair of nodes),
# write a function to find the number of connected components in an undirected graph.
#
# Example 1:
#
# 0 3
# | |
# 1 --- 2 4
#
# Given n = 5 and edges = [[0, 1], [1, 2], [3, 4]], return 2.
#
# Example 2:
#
# 0 4
# | |
# 1 --- 2 --- 3
#
# Given n = 5 and edges = [[0, 1], [1, 2], [2, 3], [3, 4]], return 1.
#
# Note:
# You can assume that no duplicate edges will appear in edges. Since all edges are undirected,
# [0, 1] is the same as [1, 0] and thus will not appear together in edges.
class Solution:
# https://all4win78.wordpress.com/2016/06/27/leetcode-323-number-of-connected-components-in-an-undirected-graph/
def countComponents(self, n, edges):
count = n
idx = [i for i in range(n)]
#for i in range(n):
# idx[i] = i
for edge in edges:
a = edge[0]
b = edge[1]
fa = idx[a]
fb = idx[b]
if fa != fb:
for i in range(n):
if idx[i] == fb:
idx[i] = fa
count -= 1
return count
print(Solution().countComponents(5, [[0, 1], [1, 2], [3, 4]]))
print(Solution().countComponents(5, [[0, 1], [1, 2], [2, 3], [3, 4]]))
| {
"repo_name": "gengwg/leetcode",
"path": "323_number_of_connected_components_in_an_undirected_graph.py",
"copies": "1",
"size": "1439",
"license": "apache-2.0",
"hash": 1300470461648253700,
"line_mean": 29.6170212766,
"line_max": 116,
"alpha_frac": 0.510771369,
"autogenerated": false,
"ratio": 3.1766004415011038,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9178473670665728,
"avg_score": 0.0017796279670750925,
"num_lines": 47
} |
# 324. Wiggle Sort II
# Given an unsorted array nums, reorder it such that nums[0] < nums[1] > nums[2] < nums[3]....
#
# Example 1:
#
# Input: nums = [1, 5, 1, 1, 6, 4]
# Output: One possible answer is [1, 4, 1, 5, 1, 6].
#
# Example 2:
#
# Input: nums = [1, 3, 2, 2, 3, 1]
# Output: One possible answer is [2, 3, 1, 3, 1, 2].
#
# Note:
# You may assume all input has valid answer.
#
# Follow Up:
# Can you do it in O(n) time and/or in-place with O(1) extra space?
class Solution(object):
# 1. sort
# 2. find mid point
# 3. take one from end of two lists one by one
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
tmp = sorted(nums)
s = (len(nums) + 1) >> 1
t = len(nums)
for i in range(len(nums)):
if i & 1 == 0: # even number
s -= 1
nums[i] = tmp[s]
else: # odd number
t -= 1
nums[i] = tmp[t]
sol = Solution()
nums = [1, 5, 1, 1, 6, 4]
sol.wiggleSort(nums)
print(nums)
| {
"repo_name": "gengwg/leetcode",
"path": "324_wiggle_sort_ii.py",
"copies": "1",
"size": "1131",
"license": "apache-2.0",
"hash": 33473013736526304,
"line_mean": 24.1333333333,
"line_max": 94,
"alpha_frac": 0.5128205128,
"autogenerated": false,
"ratio": 2.9149484536082473,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8920063428052554,
"avg_score": 0.001541107671138631,
"num_lines": 45
} |
# 329-longest-increasing-path-in-matrix.py
class Solution(object):
def longestIncreasingPath_wa(self, matrix): # Wrong Answer, need 4 directions
"""
:type matrix: List[List[int]]
:rtype: int
"""
if len(matrix) == 0: return 0
d1 = [[1] * len(matrix[0]) for _ in matrix] # Greater
d2 = [[1] * len(matrix[0]) for _ in matrix] # Lesser
maxLength = 1
for i in xrange(len(matrix)):
for j in xrange(len(matrix[0])):
if i == 0 and j == 0: continue
if i == 0:
if matrix[i][j] > matrix[i][j-1]:
d1[i][j] = d1[i][j-1] + 1
elif matrix[i][j] < matrix[i][j-1]:
d2[i][j] = d2[i][j-1] + 1
elif j == 0:
if matrix[i][j] > matrix[i-1][j]:
d1[i][j] = d1[i-1][j] + 1
elif matrix[i][j] < matrix[i-1][j]:
d2[i][j] = d2[i-1][j] + 1
else:
if matrix[i][j] > matrix[i-1][j]:
d1[i][j] = d1[i-1][j] + 1
elif matrix[i][j] < matrix[i-1][j]:
d2[i][j] = d2[i-1][j] + 1
if matrix[i][j] > matrix[i][j-1]:
d1[i][j] = max(d1[i][j], d1[i][j-1]+1)
elif matrix[i][j] < matrix[i][j-1]:
d2[i][j] = max(d2[i][j], d2[i][j-1]+1)
maxLength = max(maxLength, d1[i][j], d2[i][j])
return maxLength
def longestIncreasingPath(self, matrix):
def dfs(x, y):
if not d[x][y]: # Only calculate once.
curr = matrix[x][y]
d[x][y] = 1 + max(
dfs(x-1, y) if x > 0 and curr > matrix[x-1][y] else 0,
dfs(x+1, y) if x < lx-1 and curr > matrix[x+1][y] else 0,
dfs(x, y-1) if y > 0 and curr > matrix[x][y-1] else 0,
dfs(x, y+1) if y < ly-1 and curr > matrix[x][y+1] else 0)
return d[x][y]
if not matrix or not matrix[0]: return 0
lx, ly = len(matrix), len(matrix[0])
d = [[0] * ly for _ in xrange(lx)]
return max(dfs(x, y) for x in range(lx) for y in range(ly))
s = Solution()
print s.longestIncreasingPath([[9,9,4],[6,6,8],[2,1,1]])
print s.longestIncreasingPath([[0,1,2,3,4,5,6,7,8,9],[19,18,17,16,15,14,13,12,11,10],[20,21,22,23,24,25,26,27,28,29],[39,38,37,36,35,34,33,32,31,30],[40,41,42,43,44,45,46,47,48,49],[59,58,57,56,55,54,53,52,51,50],[60,61,62,63,64,65,66,67,68,69],[79,78,77,76,75,74,73,72,71,70],[80,81,82,83,84,85,86,87,88,89],[99,98,97,96,95,94,93,92,91,90],[100,101,102,103,104,105,106,107,108,109],[119,118,117,116,115,114,113,112,111,110],[120,121,122,123,124,125,126,127,128,129],[139,138,137,136,135,134,133,132,131,130],[0,0,0,0,0,0,0,0,0,0]])
| {
"repo_name": "daicang/Leetcode-solutions",
"path": "329-longest-increasing-path-in-matrix.py",
"copies": "1",
"size": "2949",
"license": "mit",
"hash": -2003808647158119700,
"line_mean": 48.15,
"line_max": 532,
"alpha_frac": 0.4513394371,
"autogenerated": false,
"ratio": 2.676043557168784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8562043356222356,
"avg_score": 0.01306792760928549,
"num_lines": 60
} |
"""32bit ARM/NEON assembly emitter.
Used by code generators to produce ARM assembly with NEON simd code.
Provides tools for easier register management: named register variable
allocation/deallocation, and offers a more procedural/structured approach
to generating assembly.
TODO: right now neon emitter prints out assembly instructions immediately,
it might be beneficial to keep the whole structure and emit the assembly after
applying some optimizations like: instruction reordering or register reuse.
TODO: NeonRegister object assigns explicit registers at allocation time.
Similarily to emiting code, register mapping and reuse can be performed and
optimized lazily.
"""
class Error(Exception):
"""Module level error."""
class RegisterAllocationError(Error):
"""Cannot alocate registers."""
class LaneError(Error):
"""Wrong lane number."""
class ArgumentError(Error):
"""Wrong argument."""
def _Low(register):
assert register[0] == 'q'
num = int(register[1:])
return 'd%d' % (num * 2)
def _High(register):
assert register[0] == 'q'
num = int(register[1:])
return 'd%d' % (num * 2 + 1)
def _ExpandQuads(registers):
doubles = []
for register in registers:
if register[0] == 'q':
doubles.append(_Low(register))
doubles.append(_High(register))
else:
doubles.append(register)
return doubles
def _MakeCompatible(op1, op2, op3):
if op1[0] == 'd' or op2[0] == 'd' or op3[0] == 'd':
if op1[0] == 'q':
op1 = _Low(op1)
if op2[0] == 'q':
op2 = _Low(op2)
if op3[0] == 'q':
op3 = _Low(op3)
return (op1, op2, op3)
class _NeonRegisters32Bit(object):
"""Utility that keeps track of used 32bit ARM/NEON registers."""
def __init__(self):
self.double = set()
self.double_ever = set()
self.general = set()
self.general_ever = set()
self.parameters = set()
def MapParameter(self, parameter):
self.parameters.add(parameter)
return '%%[%s]' % parameter
def DoubleRegister(self, min_val=0):
for i in range(min_val, 32):
if i not in self.double:
self.double.add(i)
self.double_ever.add(i)
return 'd%d' % i
raise RegisterAllocationError('Not enough double registers.')
def QuadRegister(self, min_val=0):
for i in range(min_val, 16):
if ((i * 2) not in self.double) and ((i * 2 + 1) not in self.double):
self.double.add(i * 2)
self.double.add(i * 2 + 1)
self.double_ever.add(i * 2)
self.double_ever.add(i * 2 + 1)
return 'q%d' % i
raise RegisterAllocationError('Not enough quad registers.')
def GeneralRegister(self):
for i in range(0, 16):
if i not in self.general:
self.general.add(i)
self.general_ever.add(i)
return 'r%d' % i
raise RegisterAllocationError('Not enough general registers.')
def MappedParameters(self):
return [x for x in self.parameters]
def Clobbers(self):
return (['r%d' % i
for i in self.general_ever] + ['d%d' % i
for i in self.DoubleClobbers()])
def DoubleClobbers(self):
return sorted(self.double_ever)
def FreeRegister(self, register):
assert len(register) > 1
num = int(register[1:])
if register[0] == 'r':
assert num in self.general
self.general.remove(num)
elif register[0] == 'd':
assert num in self.double
self.double.remove(num)
elif register[0] == 'q':
assert num * 2 in self.double
assert num * 2 + 1 in self.double
self.double.remove(num * 2)
self.double.remove(num * 2 + 1)
else:
raise RegisterDeallocationError('Register not allocated: %s' % register)
def FreeRegisters(self, registers):
for register in registers:
self.FreeRegister(register)
class NeonEmitter(object):
"""Emits ARM/NEON assembly opcodes."""
def __init__(self, debug=False):
self.ops = {}
self.indent = ''
self.debug = debug
def PushIndent(self):
self.indent += ' '
def PopIndent(self):
self.indent = self.indent[:-2]
def EmitIndented(self, what):
print self.indent + what
def PushOp(self, op):
if op in self.ops.keys():
self.ops[op] += 1
else:
self.ops[op] = 1
def ClearCounters(self):
self.ops.clear()
def EmitNewline(self):
print ''
def EmitPreprocessor1(self, op, param):
print '#%s %s' % (op, param)
def EmitPreprocessor(self, op):
print '#%s' % op
def EmitInclude(self, include):
self.EmitPreprocessor1('include', include)
def EmitCall1(self, function, param):
self.EmitIndented('%s(%s);' % (function, param))
def EmitAssert(self, assert_expression):
if self.debug:
self.EmitCall1('assert', assert_expression)
def EmitHeaderBegin(self, header_name, includes):
self.EmitPreprocessor1('ifndef', (header_name + '_H_').upper())
self.EmitPreprocessor1('define', (header_name + '_H_').upper())
self.EmitNewline()
if includes:
for include in includes:
self.EmitInclude(include)
self.EmitNewline()
def EmitHeaderEnd(self):
self.EmitPreprocessor('endif')
def EmitCode(self, code):
self.EmitIndented('%s;' % code)
def EmitFunctionBeginA(self, function_name, params, return_type):
self.EmitIndented('%s %s(%s) {' %
(return_type, function_name,
', '.join(['%s %s' % (t, n) for (t, n) in params])))
self.PushIndent()
def EmitFunctionEnd(self):
self.PopIndent()
self.EmitIndented('}')
def EmitAsmBegin(self):
self.EmitIndented('asm volatile(')
self.PushIndent()
def EmitAsmMapping(self, elements, modifier):
if elements:
self.EmitIndented(': ' + ', '.join(['[%s] "%s"(%s)' % (d, modifier, d)
for d in elements]))
else:
self.EmitIndented(':')
def EmitClobbers(self, elements):
if elements:
self.EmitIndented(': ' + ', '.join(['"%s"' % c for c in elements]))
else:
self.EmitIndented(':')
def EmitAsmEnd(self, outputs, inputs, clobbers):
self.EmitAsmMapping(outputs, '+r')
self.EmitAsmMapping(inputs, 'r')
self.EmitClobbers(clobbers)
self.PopIndent()
self.EmitIndented(');')
def EmitComment(self, comment):
self.EmitIndented('// ' + comment)
def EmitNumericalLabel(self, label):
self.EmitIndented('"%d:"' % label)
def EmitOp1(self, op, param1):
self.PushOp(op)
self.EmitIndented('"%s %s\\n"' % (op, param1))
def EmitOp2(self, op, param1, param2):
self.PushOp(op)
self.EmitIndented('"%s %s, %s\\n"' % (op, param1, param2))
def EmitOp3(self, op, param1, param2, param3):
self.PushOp(op)
self.EmitIndented('"%s %s, %s, %s\\n"' % (op, param1, param2, param3))
def EmitAdd(self, destination, source, param):
self.EmitOp3('add', destination, source, param)
def EmitSubs(self, destination, source, param):
self.EmitOp3('subs', destination, source, param)
def EmitSub(self, destination, source, param):
self.EmitOp3('sub', destination, source, param)
def EmitMul(self, destination, source, param):
self.EmitOp3('mul', destination, source, param)
def EmitMov(self, param1, param2):
self.EmitOp2('mov', param1, param2)
def EmitBeqBack(self, label):
self.EmitOp1('beq', '%db' % label)
def EmitBeqFront(self, label):
self.EmitOp1('beq', '%df' % label)
def EmitBneBack(self, label):
self.EmitOp1('bne', '%db' % label)
def EmitBneFront(self, label):
self.EmitOp1('bne', '%df' % label)
def EmitVAdd(self, add_type, destination, source_1, source_2):
destination, source_1, source_2 = _MakeCompatible(destination, source_1,
source_2)
self.EmitOp3('vadd.%s' % add_type, destination, source_1, source_2)
def EmitVAddw(self, add_type, destination, source_1, source_2):
self.EmitOp3('vaddw.%s' % add_type, destination, source_1, source_2)
def EmitVCvt(self, cvt_to, cvt_from, destination, source):
self.EmitOp2('vcvt.%s.%s' % (cvt_to, cvt_from), destination, source)
def EmitVDup(self, dup_type, destination, source):
self.EmitOp2('vdup.%s' % dup_type, destination, source)
def EmitVMov(self, mov_type, destination, source):
self.EmitOp2('vmov.%s' % mov_type, destination, source)
def EmitVQmovn(self, mov_type, destination, source):
if destination[0] == 'q':
destination = _Low(destination)
self.EmitOp2('vqmovn.%s' % mov_type, destination, source)
def EmitVQmovn2(self, mov_type, destination, source_1, source_2):
self.EmitVQmovn(mov_type, _Low(destination), source_1)
self.EmitVQmovn(mov_type, _High(destination), source_2)
def EmitVQmovun(self, mov_type, destination, source):
if destination[0] == 'q':
destination = _Low(destination)
self.EmitOp2('vqmovun.%s' % mov_type, destination, source)
def EmitVMul(self, mul_type, destination, source_1, source_2):
destination, source_1, source_2 = _MakeCompatible(destination, source_1,
source_2)
self.EmitOp3('vmul.%s' % mul_type, destination, source_1, source_2)
def EmitVMulScalar(self, mul_type, destination, source_1, source_2):
self.EmitOp3('vmul.%s' % mul_type, destination, source_1, source_2)
def EmitVMull(self, mul_type, destination, source_1, source_2):
self.EmitOp3('vmull.%s' % mul_type, destination, source_1, source_2)
def EmitVPadd(self, add_type, destination, source_1, source_2):
self.EmitOp3('vpadd.%s' % add_type, destination, source_1, source_2)
def EmitVPaddl(self, add_type, destination, source):
self.EmitOp2('vpaddl.%s' % add_type, destination, source)
def EmitVPadal(self, add_type, destination, source):
self.EmitOp2('vpadal.%s' % add_type, destination, source)
def EmitVLoad(self, load_no, load_type, destination, source):
self.EmitVLoadA(load_no, load_type, [destination], source)
def EmitVLoadA(self, load_no, load_type, destinations, source):
self.EmitOp2('vld%d.%d' % (load_no, load_type),
'{%s}' % ', '.join(_ExpandQuads(destinations)), source)
def EmitVLoadAE(self,
load_type,
elem_count,
destinations,
source,
alignment=None):
bits_to_load = load_type * elem_count
destinations = _ExpandQuads(destinations)
if len(destinations) * 64 < bits_to_load:
raise ArgumentError('To few destinations: %d to load %d bits.' %
(len(destinations), bits_to_load))
while bits_to_load > 0:
if bits_to_load >= 256:
self.EmitVLoadA(1, 32, destinations[:4],
self.DereferenceIncrement(source, alignment))
bits_to_load -= 256
destinations = destinations[4:]
elif bits_to_load >= 192:
self.EmitVLoadA(1, 32, destinations[:3],
self.DereferenceIncrement(source, alignment))
bits_to_load -= 192
destinations = destinations[3:]
elif bits_to_load >= 128:
self.EmitVLoadA(1, 32, destinations[:2],
self.DereferenceIncrement(source, alignment))
bits_to_load -= 128
destinations = destinations[2:]
elif bits_to_load >= 64:
self.EmitVLoad(1, 32, destinations[0],
self.DereferenceIncrement(source, alignment))
bits_to_load -= 64
destinations = destinations[1:]
else:
destination = destinations[0]
if bits_to_load == 56:
self.EmitVLoad(1, 32, self.Lane(32, destination, 0),
self.DereferenceIncrement(source))
self.EmitVLoad(1, 16, self.Lane(16, destination, 2),
self.DereferenceIncrement(source))
self.EmitVLoad(1, 8, self.Lane(8, destination, 6),
self.DereferenceIncrement(source))
elif bits_to_load == 48:
self.EmitVLoad(1, 32, self.Lane(32, destination, 0),
self.DereferenceIncrement(source))
self.EmitVLoad(1, 16, self.Lane(16, destination, 2),
self.DereferenceIncrement(source))
elif bits_to_load == 40:
self.EmitVLoad(1, 32, self.Lane(32, destination, 0),
self.DereferenceIncrement(source))
self.EmitVLoad(1, 8, self.Lane(8, destination, 4),
self.DereferenceIncrement(source))
elif bits_to_load == 32:
self.EmitVLoad(1, 32, self.Lane(32, destination, 0),
self.DereferenceIncrement(source))
elif bits_to_load == 24:
self.EmitVLoad(1, 16, self.Lane(16, destination, 0),
self.DereferenceIncrement(source))
self.EmitVLoad(1, 8, self.Lane(8, destination, 2),
self.DereferenceIncrement(source))
elif bits_to_load == 16:
self.EmitVLoad(1, 16, self.Lane(16, destination, 0),
self.DereferenceIncrement(source))
elif bits_to_load == 8:
self.EmitVLoad(1, 8, self.Lane(8, destination, 0),
self.DereferenceIncrement(source))
else:
raise ArgumentError('Wrong leftover: %d' % bits_to_load)
return
def EmitVLoadE(self, load_type, count, destination, source, alignment=None):
self.EmitVLoadAE(load_type, count, [destination], source, alignment)
def EmitVLoadAllLanes(self, load_no, load_type, destination, source):
destinations = []
if destination[0] == 'q':
destinations.append(self.AllLanes(_Low(destination)))
destinations.append(self.AllLanes(_High(destination)))
else:
destinations.append(self.AllLanes(destination))
self.EmitVLoadA(load_no, load_type, destinations, source)
def EmitPld(self, load_address_register):
self.EmitOp1('pld', '[%s]' % load_address_register)
def EmitPldOffset(self, load_address_register, offset):
self.EmitOp1('pld', '[%s, %s]' % (load_address_register, offset))
def EmitVShl(self, shift_type, destination, source, shift):
self.EmitOp3('vshl.%s' % shift_type, destination, source, shift)
def EmitVStore(self, store_no, store_type, source, destination):
self.EmitVStoreA(store_no, store_type, [source], destination)
def EmitVStoreA(self, store_no, store_type, sources, destination):
self.EmitOp2('vst%d.%d' % (store_no, store_type),
'{%s}' % ', '.join(_ExpandQuads(sources)), destination)
def EmitVStoreAE(self,
store_type,
elem_count,
sources,
destination,
alignment=None):
bits_to_store = store_type * elem_count
sources = _ExpandQuads(sources)
if len(sources) * 64 < bits_to_store:
raise ArgumentError('To few sources: %d to store %d bits.' %
(len(sources), bits_to_store))
while bits_to_store > 0:
if bits_to_store >= 256:
self.EmitVStoreA(1, 32, sources[:4],
self.DereferenceIncrement(destination, alignment))
bits_to_store -= 256
sources = sources[4:]
elif bits_to_store >= 192:
self.EmitVStoreA(1, 32, sources[:3],
self.DereferenceIncrement(destination, alignment))
bits_to_store -= 192
sources = sources[3:]
elif bits_to_store >= 128:
self.EmitVStoreA(1, 32, sources[:2],
self.DereferenceIncrement(destination, alignment))
bits_to_store -= 128
sources = sources[2:]
elif bits_to_store >= 64:
self.EmitVStore(1, 32, sources[0],
self.DereferenceIncrement(destination, alignment))
bits_to_store -= 64
sources = sources[1:]
else:
source = sources[0]
if bits_to_store == 56:
self.EmitVStore(1, 32, self.Lane(32, source, 0),
self.DereferenceIncrement(destination))
self.EmitVStore(1, 16, self.Lane(16, source, 2),
self.DereferenceIncrement(destination))
self.EmitVStore(1, 8, self.Lane(8, source, 6),
self.DereferenceIncrement(destination))
elif bits_to_store == 48:
self.EmitVStore(1, 32, self.Lane(32, source, 0),
self.DereferenceIncrement(destination))
self.EmitVStore(1, 16, self.Lane(16, source, 2),
self.DereferenceIncrement(destination))
elif bits_to_store == 40:
self.EmitVStore(1, 32, self.Lane(32, source, 0),
self.DereferenceIncrement(destination))
self.EmitVStore(1, 8, self.Lane(8, source, 4),
self.DereferenceIncrement(destination))
elif bits_to_store == 32:
self.EmitVStore(1, 32, self.Lane(32, source, 0),
self.DereferenceIncrement(destination))
elif bits_to_store == 24:
self.EmitVStore(1, 16, self.Lane(16, source, 0),
self.DereferenceIncrement(destination))
self.EmitVStore(1, 8, self.Lane(8, source, 2),
self.DereferenceIncrement(destination))
elif bits_to_store == 16:
self.EmitVStore(1, 16, self.Lane(16, source, 0),
self.DereferenceIncrement(destination))
elif bits_to_store == 8:
self.EmitVStore(1, 8, self.Lane(8, source, 0),
self.DereferenceIncrement(destination))
else:
raise ArgumentError('Wrong leftover: %d' % bits_to_store)
return
def EmitVStoreE(self, store_type, count, source, destination, alignment=None):
self.EmitVStoreAE(store_type, count, [source], destination, alignment)
def EmitVStoreOffset(self, store_no, store_type, source, destination, offset):
self.EmitVStoreOffsetA(store_no, store_type, [source], destination, offset)
def EmitVStoreOffsetA(self, store_no, store_type, sources, destination,
offset):
self.EmitOp3('vst%d.%d' % (store_no, store_type),
'{%s}' % ', '.join(_ExpandQuads(sources)), destination, offset)
def EmitVStoreOffsetE(self, store_type, count, source, destination, offset):
"""Emit assembly to store a number elements from the source registers."""
if store_type is not 32:
raise ArgumentError('Unsupported store_type: %d' % store_type)
sources = []
if source[0] == 'q':
sources.append(_Low(source))
sources.append(_High(source))
if count * store_type > 128:
raise ArgumentError('To many %dbit elements in a q register: %d' %
(store_type, count))
else:
sources.append(source)
if count * store_type > 64:
raise ArgumentError('To many %dbit elements in a d register: %d' %
(store_type, count))
if count == 1:
self.EmitVStoreOffset(1, store_type, self.Lane(store_type, sources[0], 0),
self.Dereference(destination, None), offset)
elif count == 2:
self.EmitVStoreOffset(1, store_type, sources[0],
self.Dereference(destination, None), offset)
elif count == 3:
self.EmitVStore(1, store_type, sources[0],
self.DereferenceIncrement(destination, None))
self.EmitVStoreOffset(1, store_type, self.Lane(store_type, sources[1], 0),
self.Dereference(destination, None), offset)
self.EmitSub(destination, destination, self.ImmediateConstant(8))
elif count == 4:
self.EmitVStoreOffsetA(1, store_type, sources,
self.Dereference(destination, None), offset)
else:
raise ArgumentError('To many elements: %d' % count)
def EmitVSumReduce(self, reduce_type, elem_count, reduce_count, destinations,
sources):
"""Emit assembly for n-fold horizontal sum reduction."""
if reduce_type is not 'u32':
raise ArgumentError('Unsupported reduce: %s' % reduce_type)
sources = _ExpandQuads(sources)
destinations = _ExpandQuads(destinations)
if len(destinations) * 2 < elem_count:
raise ArgumentError('Not enough space in destination: %d vs %d' %
(len(destinations) * 2, elem_count))
if len(sources) * 2 != elem_count * reduce_count:
raise ArgumentError('Wrong number of sources: %d vs %d' %
(len(sources) * 2, elem_count * reduce_count))
if reduce_count <= 1:
raise ArgumentError('Unsupported reduce_count: %d' % reduce_count)
while reduce_count > 1:
if len(sources) % 2 == 1:
sources.append(sources[-1])
if reduce_count == 2:
for i in range(len(destinations)):
self.EmitVPadd(reduce_type, destinations[i], sources[2 * i],
sources[2 * i + 1])
return
else:
sources_2 = []
for i in range(len(sources) / 2):
self.EmitVPadd(reduce_type, sources[2 * i], sources[2 * i],
sources[2 * i + 1])
sources_2.append(sources[2 * i])
reduce_count /= 2
sources = sources_2
def Dereference(self, value, alignment=None):
if alignment:
return '[%s:%d]' % (value, alignment)
else:
return '[%s]' % value
def DereferenceIncrement(self, value, alignment=None):
return '%s!' % self.Dereference(value, alignment)
def ImmediateConstant(self, value):
return '#%d' % value
def AllLanes(self, value):
return '%s[]' % value
def Lane(self, bits, value, lane):
"""Get the proper n-bit lane from the given register."""
registers = []
if value[0] == 'q':
registers.append(_Low(value))
registers.append(_High(value))
else:
registers.append(value)
elems_per_register = 64 / bits
register = lane / elems_per_register
lane %= elems_per_register
return '%s[%d]' % (registers[register], lane)
def CreateRegisters(self):
return _NeonRegisters32Bit()
| {
"repo_name": "shishaochen/TensorFlow-0.8-Win",
"path": "third_party/gemmlowp/meta/generators/neon_emitter.py",
"copies": "5",
"size": "22203",
"license": "apache-2.0",
"hash": 8112293378004539000,
"line_mean": 34.5817307692,
"line_max": 80,
"alpha_frac": 0.604152592,
"autogenerated": false,
"ratio": 3.454107031736154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6558259623736155,
"avg_score": null,
"num_lines": null
} |
32"""
Creates th WAVES.for mesh for a
square domain under a point load.
@autor Juan Gomez
"""
from __future__ import division
import meshio
import mesh_waves as msw
import numpy as np
import fileinput
import glob
#%%
points, cells, point_data, cell_data , field_data = \
meshio.read("transparency.msh")
#
nodes_array = msw.node_writer(points , point_data)
print len(nodes_array)
#
#%%
nfin , els1_array = msw.ele_writer(cells , cell_data , 'quad9' , 11000 , 3 , 5 , 18 , 9 , 0 )
nini = nfin
nfin , els2_array = msw.face_recognition(cells , cell_data , 10000 , 5000 , nini)
nini = nfin
nfin , els3_array = msw.boundary_writer(cells , cell_data , 7000 , 6 , nini)
#
#%%
#
np.savetxt("2nodes.txt", nodes_array,
fmt=("%d", "%d", "%d" , "%d" , "%.4f", "%.4f"))
np.savetxt("5eles.txt", els1_array , fmt="%d")
np.savetxt("6eles.txt", els2_array , fmt="%d")
np.savetxt("7eles.txt", els3_array , fmt="%d")
file_list = glob.glob("*.txt")
with open('transparency.inp', 'w') as file:
input_lines = fileinput.input(file_list)
file.writelines(input_lines) | {
"repo_name": "jgomezc1/WAVES",
"path": "MODELS/MESHER/transparency_input.py",
"copies": "1",
"size": "1076",
"license": "mit",
"hash": 5631720392737371000,
"line_mean": 26.6153846154,
"line_max": 94,
"alpha_frac": 0.6459107807,
"autogenerated": false,
"ratio": 2.5619047619047617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8510438313700681,
"avg_score": 0.039475445780816076,
"num_lines": 39
} |
#32 - Top Scores.py
# You rank players in the game from highest to lowest score. So far you're using an algorithm that sorts in O(n\lg{n})O(nlgn) time, but players are complaining that their rankings aren't updated fast enough. You need a faster sorting algorithm.
# Write a function that takes:
# a list of unsorted_scores
# the highest_possible_score in the game
# and returns a sorted list of scores in less than O(nlog(n)) time.
unsorted_scores = [5,20, 3, 89, 89, 99, 98]
highest_possible_score = 100
def get_highest_scores(unsorted_scores, highest_possible_score):
# define a dictionary
scores_to_counts = {}
for score in unsorted_scores:
if score in scores_to_counts:
scores_to_counts[score] += 1
else:
scores_to_counts[score] = 1
sorted_scores = sorted(scores_to_counts, reverse = True)
full_sorted_list = []
for score in sorted_scores:
j = scores_to_counts[score]
while j > 0:
full_sorted_list.append(score)
j = j - 1
return(full_sorted_list)
print get_highest_scores(unsorted_scores, highest_possible_score)
| {
"repo_name": "bruno615/one-off-analysis",
"path": "Python/Inteview Cake/32 - Top Scores.py",
"copies": "1",
"size": "1084",
"license": "mit",
"hash": 6049270699746267000,
"line_mean": 28.2972972973,
"line_max": 244,
"alpha_frac": 0.7029520295,
"autogenerated": false,
"ratio": 3.335384615384615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4538336644884615,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.