repo_name
stringlengths 7
90
| path
stringlengths 5
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 976
581k
| license
stringclasses 15
values |
---|---|---|---|---|---|
seaotterman/tensorflow | tensorflow/examples/learn/iris_run_config.py | 86 | 2087 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. num_cores
# and gpu_memory_fraction
run_config = tf.contrib.learn.estimators.RunConfig(
num_cores=3, gpu_memory_fraction=0.6)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
config=run_config)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
sinhrks/expandas | pandas_ml/skaccessors/test/test_svm.py | 2 | 2995 | #!/usr/bin/env python
import pytest
import numpy as np
import sklearn.datasets as datasets
import sklearn.svm as svm
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestSVM(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.svm.SVC, svm.SVC)
self.assertIs(df.svm.LinearSVC, svm.LinearSVC)
self.assertIs(df.svm.NuSVC, svm.NuSVC)
self.assertIs(df.svm.SVR, svm.SVR)
self.assertIs(df.svm.NuSVR, svm.NuSVR)
self.assertIs(df.svm.OneClassSVM, svm.OneClassSVM)
def test_l1_min_c(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.svm.l1_min_c()
expected = svm.l1_min_c(iris.data, iris.target)
self.assertAlmostEqual(result, expected)
@pytest.mark.parametrize("algo", ['SVR', 'NuSVR'])
def test_Regressions_curve(self, algo):
# http://scikit-learn.org/stable/auto_examples/plot_kernel_ridge_regression.html
X = 5 * np.random.rand(1000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(X.shape[0] // 5))
df = pdml.ModelFrame(data=X, target=y)
mod1 = getattr(df.svm, algo)()
mod2 = getattr(svm, algo)()
df.fit(mod1)
mod2.fit(X, y)
result = df.predict(mod1)
expected = mod2.predict(X)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
self.assertIsInstance(df.predicted, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(df.predicted.values, expected)
@pytest.mark.parametrize("algo", ['SVR', 'NuSVR'])
def test_Regressions_iris(self, algo):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
mod1 = getattr(df.svm, algo)()
mod2 = getattr(svm, algo)()
df.fit(mod1)
mod2.fit(iris.data, iris.target)
result = df.predict(mod1)
expected = mod2.predict(iris.data)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
self.assertIsInstance(df.predicted, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(df.predicted.values, expected)
@pytest.mark.parametrize("algo", ['LinearSVC', 'NuSVC'])
def test_Classifications(self, algo):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
mod1 = getattr(df.svm, algo)(random_state=self.random_state)
mod2 = getattr(svm, algo)(random_state=self.random_state)
df.fit(mod1)
mod2.fit(iris.data, iris.target)
result = df.predict(mod1)
expected = mod2.predict(iris.data)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
| bsd-3-clause |
arahuja/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
vybstat/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
scipy/scipy | scipy/signal/bsplines.py | 12 | 19509 | from numpy import (logical_and, asarray, pi, zeros_like,
piecewise, array, arctan2, tan, zeros, arange, floor)
from numpy.core.umath import (sqrt, exp, greater, less, cos, add, sin,
less_equal, greater_equal)
# From splinemodule.c
from .spline import cspline2d, sepfir2d
from scipy.special import comb
from scipy._lib._util import float_factorial
__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic',
'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval']
def spline_filter(Iin, lmbda=5.0):
"""Smoothing spline (cubic) filtering of a rank-2 array.
Filter an input data set, `Iin`, using a (cubic) smoothing spline of
fall-off `lmbda`.
Parameters
----------
Iin : array_like
input data set
lmbda : float, optional
spline smooghing fall-off value, default is `5.0`.
Returns
-------
res : ndarray
filterd input data
Examples
--------
We can filter an multi dimentional signal (ex: 2D image) using cubic
B-spline filter:
>>> from scipy.signal import spline_filter
>>> import matplotlib.pyplot as plt
>>> orig_img = np.eye(20) # create an image
>>> orig_img[10, :] = 1.0
>>> sp_filter = spline_filter(orig_img, lmbda=0.1)
>>> f, ax = plt.subplots(1, 2, sharex=True)
>>> for ind, data in enumerate([[orig_img, "original image"],
... [sp_filter, "spline filter"]]):
... ax[ind].imshow(data[0], cmap='gray_r')
... ax[ind].set_title(data[1])
>>> plt.tight_layout()
>>> plt.show()
"""
intype = Iin.dtype.char
hcol = array([1.0, 4.0, 1.0], 'f') / 6.0
if intype in ['F', 'D']:
Iin = Iin.astype('F')
ckr = cspline2d(Iin.real, lmbda)
cki = cspline2d(Iin.imag, lmbda)
outr = sepfir2d(ckr, hcol, hcol)
outi = sepfir2d(cki, hcol, hcol)
out = (outr + 1j * outi).astype(intype)
elif intype in ['f', 'd']:
ckr = cspline2d(Iin, lmbda)
out = sepfir2d(ckr, hcol, hcol)
out = out.astype(intype)
else:
raise TypeError("Invalid data type for Iin")
return out
_splinefunc_cache = {}
def _bspline_piecefunctions(order):
"""Returns the function defined over the left-side pieces for a bspline of
a given order.
The 0th piece is the first one less than 0. The last piece is a function
identical to 0 (returned as the constant 0). (There are order//2 + 2 total
pieces).
Also returns the condition functions that when evaluated return boolean
arrays for use with `numpy.piecewise`.
"""
try:
return _splinefunc_cache[order]
except KeyError:
pass
def condfuncgen(num, val1, val2):
if num == 0:
return lambda x: logical_and(less_equal(x, val1),
greater_equal(x, val2))
elif num == 2:
return lambda x: less_equal(x, val2)
else:
return lambda x: logical_and(less(x, val1),
greater_equal(x, val2))
last = order // 2 + 2
if order % 2:
startbound = -1.0
else:
startbound = -0.5
condfuncs = [condfuncgen(0, 0, startbound)]
bound = startbound
for num in range(1, last - 1):
condfuncs.append(condfuncgen(1, bound, bound - 1))
bound = bound - 1
condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0))
# final value of bound is used in piecefuncgen below
# the functions to evaluate are taken from the left-hand side
# in the general expression derived from the central difference
# operator (because they involve fewer terms).
fval = float_factorial(order)
def piecefuncgen(num):
Mk = order // 2 - num
if (Mk < 0):
return 0 # final function is 0
coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
for k in range(Mk + 1)]
shifts = [-bound - k for k in range(Mk + 1)]
def thefunc(x):
res = 0.0
for k in range(Mk + 1):
res += coeffs[k] * (x + shifts[k]) ** order
return res
return thefunc
funclist = [piecefuncgen(k) for k in range(last)]
_splinefunc_cache[order] = (funclist, condfuncs)
return funclist, condfuncs
def bspline(x, n):
"""B-spline basis function of order n.
Parameters
----------
x : array_like
a knot vector
n : int
The order of the spline. Must be non-negative, i.e., n >= 0
Returns
-------
res : ndarray
B-spline basis function values
See Also
--------
cubic : A cubic B-spline.
quadratic : A quadratic B-spline.
Notes
-----
Uses numpy.piecewise and automatic function-generator.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = -abs(asarray(x))
# number of pieces on the left-side is (n+1)/2
funclist, condfuncs = _bspline_piecefunctions(n)
condlist = [func(ax) for func in condfuncs]
return piecewise(ax, condlist, funclist)
def gauss_spline(x, n):
r"""Gaussian approximation to B-spline basis function of order n.
Parameters
----------
x : array_like
a knot vector
n : int
The order of the spline. Must be non-negative, i.e., n >= 0
Returns
-------
res : ndarray
B-spline basis function values approximated by a zero-mean Gaussian
function.
Notes
-----
The B-spline basis function can be approximated well by a zero-mean
Gaussian function with standard-deviation equal to :math:`\sigma=(n+1)/12`
for large `n` :
.. math:: \frac{1}{\sqrt {2\pi\sigma^2}}exp(-\frac{x^2}{2\sigma})
References
----------
.. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen
F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In:
Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational
Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer
Science, vol 4485. Springer, Berlin, Heidelberg
.. [2] http://folk.uio.no/inf3330/scripting/doc/python/SciPy/tutorial/old/node24.html
Examples
--------
We can calculate B-Spline basis functions approximated by a gaussian
distribution:
>>> from scipy.signal import gauss_spline, bspline
>>> knots = np.array([-1.0, 0.0, -1.0])
>>> gauss_spline(knots, 3)
array([0.15418033, 0.6909883, 0.15418033]) # may vary
>>> bspline(knots, 3)
array([0.16666667, 0.66666667, 0.16666667]) # may vary
"""
x = asarray(x)
signsq = (n + 1) / 12.0
return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
def cubic(x):
"""A cubic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``.
Parameters
----------
x : array_like
a knot vector
Returns
-------
res : ndarray
Cubic B-spline basis function values
See Also
--------
bspline : B-spline basis function of order n
quadratic : A quadratic B-spline.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 1)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1)
cond2 = ~cond1 & less(ax, 2)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = 1.0 / 6 * (2 - ax2) ** 3
return res
def quadratic(x):
"""A quadratic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``.
Parameters
----------
x : array_like
a knot vector
Returns
-------
res : ndarray
Quadratic B-spline basis function values
See Also
--------
bspline : B-spline basis function of order n
cubic : A cubic B-spline.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 0.5)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 0.75 - ax1 ** 2
cond2 = ~cond1 & less(ax, 1.5)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = (ax2 - 1.5) ** 2 / 2.0
return res
def _coeff_smooth(lam):
xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam)
omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi))
rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam)
rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi)
return rho, omeg
def _hc(k, cs, rho, omega):
return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) *
greater(k, -1))
def _hs(k, cs, rho, omega):
c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) /
(1 - 2 * rho * rho * cos(2 * omega) + rho ** 4))
gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega)
ak = abs(k)
return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak))
def _cubic_smooth_coeff(signal, lamb):
rho, omega = _coeff_smooth(lamb)
cs = 1 - 2 * rho * cos(omega) + rho * rho
K = len(signal)
yp = zeros((K,), signal.dtype.char)
k = arange(K)
yp[0] = (_hc(0, cs, rho, omega) * signal[0] +
add.reduce(_hc(k + 1, cs, rho, omega) * signal))
yp[1] = (_hc(0, cs, rho, omega) * signal[0] +
_hc(1, cs, rho, omega) * signal[1] +
add.reduce(_hc(k + 2, cs, rho, omega) * signal))
for n in range(2, K):
yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] -
rho * rho * yp[n - 2])
y = zeros((K,), signal.dtype.char)
y[K - 1] = add.reduce((_hs(k, cs, rho, omega) +
_hs(k + 1, cs, rho, omega)) * signal[::-1])
y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) +
_hs(k + 2, cs, rho, omega)) * signal[::-1])
for n in range(K - 3, -1, -1):
y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] -
rho * rho * y[n + 2])
return y
def _cubic_coeff(signal):
zi = -2 + sqrt(3)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 6.0
def _quadratic_coeff(signal):
zi = -3 + 2 * sqrt(2.0)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype.char)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 8.0
def cspline1d(signal, lamb=0.0):
"""
Compute cubic spline coefficients for rank-1 array.
Find the cubic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient, default is 0.0.
Returns
-------
c : ndarray
Cubic spline coefficients.
See Also
--------
cspline1d_eval : Evaluate a cubic spline at the new set of points.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a cubic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import cspline1d, cspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = cspline1d_eval(cspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
if lamb != 0.0:
return _cubic_smooth_coeff(signal, lamb)
else:
return _cubic_coeff(signal)
def qspline1d(signal, lamb=0.0):
"""Compute quadratic spline coefficients for rank-1 array.
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient (must be zero for now).
Returns
-------
c : ndarray
Quadratic spline coefficients.
See Also
--------
qspline1d_eval : Evaluate a quadratic spline at the new set of points.
Notes
-----
Find the quadratic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 .
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a quadratic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import qspline1d, qspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = qspline1d_eval(qspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
if lamb != 0.0:
raise ValueError("Smoothing quadratic splines not supported yet.")
else:
return _quadratic_coeff(signal)
def cspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a cubic spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of:
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
Parameters
----------
cj : ndarray
cublic spline coefficients
newx : ndarray
New set of points.
dx : float, optional
Old sample-spacing, the default value is 1.0.
x0 : int, optional
Old origin, the default value is 0.
Returns
-------
res : ndarray
Evaluated a cubic spline points.
See Also
--------
cspline1d : Compute cubic spline coefficients for rank-1 array.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a cubic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import cspline1d, cspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = cspline1d_eval(cspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
newx = (asarray(newx) - x0) / float(dx)
res = zeros_like(newx, dtype=cj.dtype)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = cspline1d_eval(cj, -newx[cond1])
res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx, dtype=cj.dtype)
jlower = floor(newx - 2).astype(int) + 1
for i in range(4):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * cubic(newx - thisj)
res[cond3] = result
return res
def qspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a quadratic spline at the new set of points.
Parameters
----------
cj : ndarray
Quadratic spline coefficients
newx : ndarray
New set of points.
dx : float, optional
Old sample-spacing, the default value is 1.0.
x0 : int, optional
Old origin, the default value is 0.
Returns
-------
res : ndarray
Evaluated a quadratic spline points.
See Also
--------
qspline1d : Compute quadratic spline coefficients for rank-1 array.
Notes
-----
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of::
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a quadratic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import qspline1d, qspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = qspline1d_eval(qspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
newx = (asarray(newx) - x0) / dx
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = qspline1d_eval(cj, -newx[cond1])
res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 1.5).astype(int) + 1
for i in range(3):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * quadratic(newx - thisj)
res[cond3] = result
return res
| bsd-3-clause |
mmaraya/nd101 | ch02/lesson04/and.perceptron.py | 1 | 1065 | #!/usr/bin/env python
import pandas as pd
# Set weight1, weight2, and bias
weight1 = 0.5
weight2 = 0.5
bias = -1.0
# DON'T CHANGE ANYTHING BELOW
# Inputs and outputs
test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
correct_outputs = [False, False, False, True]
outputs = []
# Generate and check output
for test_input, correct_output in zip(test_inputs, correct_outputs):
linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias
output = int(linear_combination >= 0)
is_correct_string = 'Yes' if output == correct_output else 'No'
outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string])
# Print output
num_wrong = len([output[4] for output in outputs if output[4] == 'No'])
output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct'])
if not num_wrong:
print('Nice! You got it all correct.\n')
else:
print('You got {} wrong. Keep trying!\n'.format(num_wrong))
print(output_frame.to_string(index=False))
| mit |
mahak/spark | python/pyspark/pandas/base.py | 3 | 55960 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base and utility classes for pandas-on-Spark objects.
"""
from abc import ABCMeta, abstractmethod
from functools import wraps, partial
from itertools import chain
from typing import Any, Callable, Optional, Sequence, Tuple, Union, cast, TYPE_CHECKING
import numpy as np
import pandas as pd # noqa: F401
from pandas.api.types import is_list_like, CategoricalDtype
from pyspark.sql import functions as F, Column, Window
from pyspark.sql.types import (
DoubleType,
FloatType,
LongType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, Dtype, IndexOpsLike, Label, SeriesOrIndex
from pyspark.pandas.config import get_option, option_context
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
NATURAL_ORDER_COLUMN_NAME,
SPARK_DEFAULT_INDEX_NAME,
)
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.spark.accessors import SparkIndexOpsMethods
from pyspark.pandas.typedef import extension_dtypes
from pyspark.pandas.utils import (
combine_frames,
same_anchor,
scol_for,
validate_axis,
ERROR_MESSAGE_CANNOT_COMBINE,
)
from pyspark.pandas.frame import DataFrame
if TYPE_CHECKING:
from pyspark.sql._typing import ColumnOrName # noqa: F401 (SPARK-34943)
from pyspark.pandas.data_type_ops.base import DataTypeOps # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
def should_alignment_for_column_op(self: SeriesOrIndex, other: SeriesOrIndex) -> bool:
from pyspark.pandas.series import Series
if isinstance(self, Series) and isinstance(other, Series):
return not same_anchor(self, other)
else:
return self._internal.spark_frame is not other._internal.spark_frame
def align_diff_index_ops(
func: Callable[..., Column], this_index_ops: SeriesOrIndex, *args: Any
) -> SeriesOrIndex:
"""
Align the `IndexOpsMixin` objects and apply the function.
Parameters
----------
func : The function to apply
this_index_ops : IndexOpsMixin
A base `IndexOpsMixin` object
args : list of other arguments including other `IndexOpsMixin` objects
Returns
-------
`Index` if all `this_index_ops` and arguments are `Index`; otherwise `Series`
"""
from pyspark.pandas.indexes import Index
from pyspark.pandas.series import Series, first_series
cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)]
if isinstance(this_index_ops, Series) and all(isinstance(col, Series) for col in cols):
combined = combine_frames(
this_index_ops.to_frame(),
*[cast(Series, col).rename(i) for i, col in enumerate(cols)],
how="full"
)
return column_op(func)(
combined["this"]._psser_for(combined["this"]._internal.column_labels[0]),
*[
combined["that"]._psser_for(label)
for label in combined["that"]._internal.column_labels
]
).rename(this_index_ops.name)
else:
# This could cause as many counts, reset_index calls, joins for combining
# as the number of `Index`s in `args`. So far it's fine since we can assume the ops
# only work between at most two `Index`s. We might need to fix it in the future.
self_len = len(this_index_ops)
if any(len(col) != self_len for col in args if isinstance(col, IndexOpsMixin)):
raise ValueError("operands could not be broadcast together with shapes")
with option_context("compute.default_index_type", "distributed-sequence"):
if isinstance(this_index_ops, Index) and all(isinstance(col, Index) for col in cols):
return Index(
column_op(func)(
this_index_ops.to_series().reset_index(drop=True),
*[
arg.to_series().reset_index(drop=True)
if isinstance(arg, Index)
else arg
for arg in args
]
).sort_index(),
name=this_index_ops.name,
)
elif isinstance(this_index_ops, Series):
this = cast(DataFrame, this_index_ops.reset_index())
that = [
cast(Series, col.to_series() if isinstance(col, Index) else col)
.rename(i)
.reset_index(drop=True)
for i, col in enumerate(cols)
]
combined = combine_frames(this, *that, how="full").sort_index()
combined = combined.set_index(
combined._internal.column_labels[: this_index_ops._internal.index_level]
)
combined.index.names = this_index_ops._internal.index_names
return column_op(func)(
first_series(combined["this"]),
*[
combined["that"]._psser_for(label)
for label in combined["that"]._internal.column_labels
]
).rename(this_index_ops.name)
else:
this = cast(Index, this_index_ops).to_frame().reset_index(drop=True)
that_series = next(col for col in cols if isinstance(col, Series))
that_frame = that_series._psdf[
[
cast(Series, col.to_series() if isinstance(col, Index) else col).rename(i)
for i, col in enumerate(cols)
]
]
combined = combine_frames(this, that_frame.reset_index()).sort_index()
self_index = (
combined["this"].set_index(combined["this"]._internal.column_labels).index
)
other = combined["that"].set_index(
combined["that"]._internal.column_labels[: that_series._internal.index_level]
)
other.index.names = that_series._internal.index_names
return column_op(func)(
self_index,
*[
other._psser_for(label)
for label, col in zip(other._internal.column_labels, cols)
]
).rename(that_series.name)
def booleanize_null(scol: Column, f: Callable[..., Column]) -> Column:
"""
Booleanize Null in Spark Column
"""
comp_ops = [
getattr(Column, "__{}__".format(comp_op))
for comp_op in ["eq", "ne", "lt", "le", "ge", "gt"]
]
if f in comp_ops:
# if `f` is "!=", fill null with True otherwise False
filler = f == Column.__ne__
scol = F.when(scol.isNull(), filler).otherwise(scol)
return scol
def column_op(f: Callable[..., Column]) -> Callable[..., SeriesOrIndex]:
"""
A decorator that wraps APIs taking/returning Spark Column so that pandas-on-Spark Series can be
supported too. If this decorator is used for the `f` function that takes Spark Column and
returns Spark Column, decorated `f` takes pandas-on-Spark Series as well and returns
pandas-on-Spark Series.
:param f: a function that takes Spark Column and returns Spark Column.
:param self: pandas-on-Spark Series
:param args: arguments that the function `f` takes.
"""
@wraps(f)
def wrapper(self: SeriesOrIndex, *args: Any) -> SeriesOrIndex:
from pyspark.pandas.indexes.base import Index
from pyspark.pandas.series import Series
# It is possible for the function `f` takes other arguments than Spark Column.
# To cover this case, explicitly check if the argument is pandas-on-Spark Series and
# extract Spark Column. For other arguments, they are used as are.
cols = [arg for arg in args if isinstance(arg, (Series, Index))]
if all(not should_alignment_for_column_op(self, col) for col in cols):
# Same DataFrame anchors
scol = f(
self.spark.column,
*[arg.spark.column if isinstance(arg, IndexOpsMixin) else arg for arg in args]
)
field = InternalField.from_struct_field(
self._internal.spark_frame.select(scol).schema[0],
use_extension_dtypes=any(
isinstance(col.dtype, extension_dtypes) for col in [self] + cols
),
)
if not field.is_extension_dtype:
scol = booleanize_null(scol, f).alias(field.name)
if isinstance(self, Series) or not any(isinstance(col, Series) for col in cols):
index_ops = self._with_new_scol(scol, field=field)
else:
psser = next(col for col in cols if isinstance(col, Series))
index_ops = psser._with_new_scol(scol, field=field)
elif get_option("compute.ops_on_diff_frames"):
index_ops = align_diff_index_ops(f, self, *args)
else:
raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)
if not all(self.name == col.name for col in cols):
index_ops = index_ops.rename(None)
return index_ops
return wrapper
def numpy_column_op(f: Callable[..., Column]) -> Callable[..., SeriesOrIndex]:
@wraps(f)
def wrapper(self: SeriesOrIndex, *args: Any) -> SeriesOrIndex:
# PySpark does not support NumPy type out of the box. For now, we convert NumPy types
# into some primitive types understandable in PySpark.
new_args = []
for arg in args:
# TODO: This is a quick hack to support NumPy type. We should revisit this.
if isinstance(self.spark.data_type, LongType) and isinstance(arg, np.timedelta64):
new_args.append(float(arg / np.timedelta64(1, "s")))
else:
new_args.append(arg)
return column_op(f)(self, *new_args)
return wrapper
class IndexOpsMixin(object, metaclass=ABCMeta):
"""common ops mixin to support a unified interface / docs for Series / Index
Assuming there are following attributes or properties and function.
"""
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@property
@abstractmethod
def _psdf(self) -> DataFrame:
pass
@abstractmethod
def _with_new_scol(
self: IndexOpsLike, scol: Column, *, field: Optional[InternalField] = None
) -> IndexOpsLike:
pass
@property
@abstractmethod
def _column_label(self) -> Optional[Label]:
pass
@property
@abstractmethod
def spark(self: IndexOpsLike) -> SparkIndexOpsMethods[IndexOpsLike]:
pass
@property
def _dtype_op(self) -> "DataTypeOps":
from pyspark.pandas.data_type_ops.base import DataTypeOps
return DataTypeOps(self.dtype, self.spark.data_type)
@abstractmethod
def copy(self: IndexOpsLike) -> IndexOpsLike:
pass
# arithmetic operators
def __neg__(self: IndexOpsLike) -> IndexOpsLike:
return cast(IndexOpsLike, column_op(Column.__neg__)(self))
def __add__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.add(self, other)
def __sub__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.sub(self, other)
def __mul__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.mul(self, other)
def __truediv__(self, other: Any) -> SeriesOrIndex:
"""
__truediv__ has different behaviour between pandas and PySpark for several cases.
1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf
2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf
3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf
4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf
+-------------------------------------------+
| dividend (divisor: 0) | PySpark | pandas |
|-----------------------|---------|---------|
| np.inf | null | np.inf |
| -np.inf | null | -np.inf |
| 10 | null | np.inf |
| -10 | null | -np.inf |
+-----------------------|---------|---------+
"""
return self._dtype_op.truediv(self, other)
def __mod__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.mod(self, other)
def __radd__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.radd(self, other)
def __rsub__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rsub(self, other)
def __rmul__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rmul(self, other)
def __rtruediv__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rtruediv(self, other)
def __floordiv__(self, other: Any) -> SeriesOrIndex:
"""
__floordiv__ has different behaviour between pandas and PySpark for several cases.
1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf
2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf
3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf
4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf
+-------------------------------------------+
| dividend (divisor: 0) | PySpark | pandas |
|-----------------------|---------|---------|
| np.inf | null | np.inf |
| -np.inf | null | -np.inf |
| 10 | null | np.inf |
| -10 | null | -np.inf |
+-----------------------|---------|---------+
"""
return self._dtype_op.floordiv(self, other)
def __rfloordiv__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rfloordiv(self, other)
def __rmod__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rmod(self, other)
def __pow__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.pow(self, other)
def __rpow__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rpow(self, other)
def __abs__(self: IndexOpsLike) -> IndexOpsLike:
return cast(IndexOpsLike, column_op(F.abs)(self))
# comparison operators
def __eq__(self, other: Any) -> SeriesOrIndex: # type: ignore[override]
return column_op(Column.__eq__)(self, other)
def __ne__(self, other: Any) -> SeriesOrIndex: # type: ignore[override]
return column_op(Column.__ne__)(self, other)
__lt__ = column_op(Column.__lt__)
__le__ = column_op(Column.__le__)
__ge__ = column_op(Column.__ge__)
__gt__ = column_op(Column.__gt__)
def __invert__(self: IndexOpsLike) -> IndexOpsLike:
return cast(IndexOpsLike, column_op(Column.__invert__)(self))
# `and`, `or`, `not` cannot be overloaded in Python,
# so use bitwise operators as boolean operators
def __and__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.__and__(self, other)
def __or__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.__or__(self, other)
def __rand__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.rand(self, other)
def __ror__(self, other: Any) -> SeriesOrIndex:
return self._dtype_op.ror(self, other)
def __len__(self) -> int:
return len(self._psdf)
# NDArray Compat
def __array_ufunc__(
self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any
) -> SeriesOrIndex:
from pyspark.pandas import numpy_compat
# Try dunder methods first.
result = numpy_compat.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
# After that, we try with PySpark APIs.
if result is NotImplemented:
result = numpy_compat.maybe_dispatch_ufunc_to_spark_func(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return cast(SeriesOrIndex, result)
else:
# TODO: support more APIs?
raise NotImplementedError(
"pandas-on-Spark objects currently do not support %s." % ufunc
)
@property
def dtype(self) -> Dtype:
"""Return the dtype object of the underlying data.
Examples
--------
>>> s = ps.Series([1, 2, 3])
>>> s.dtype
dtype('int64')
>>> s = ps.Series(list('abc'))
>>> s.dtype
dtype('O')
>>> s = ps.Series(pd.date_range('20130101', periods=3))
>>> s.dtype
dtype('<M8[ns]')
>>> s.rename("a").to_frame().set_index("a").index.dtype
dtype('<M8[ns]')
"""
return self._internal.data_fields[0].dtype
@property
def empty(self) -> bool:
"""
Returns true if the current object is empty. Otherwise, returns false.
>>> ps.range(10).id.empty
False
>>> ps.range(0).id.empty
True
>>> ps.DataFrame({}, index=list('abc')).index.empty
False
"""
return self._internal.resolved_copy.spark_frame.rdd.isEmpty()
@property
def hasnans(self) -> bool:
"""
Return True if it has any missing values. Otherwise, it returns False.
>>> ps.DataFrame({}, index=list('abc')).index.hasnans
False
>>> ps.Series(['a', None]).hasnans
True
>>> ps.Series([1.0, 2.0, np.nan]).hasnans
True
>>> ps.Series([1, 2, 3]).hasnans
False
>>> (ps.Series([1.0, 2.0, np.nan]) + 1).hasnans
True
>>> ps.Series([1, 2, 3]).rename("a").to_frame().set_index("a").index.hasnans
False
"""
sdf = self._internal.spark_frame
scol = self.spark.column
if isinstance(self.spark.data_type, (DoubleType, FloatType)):
return sdf.select(F.max(scol.isNull() | F.isnan(scol))).collect()[0][0]
else:
return sdf.select(F.max(scol.isNull())).collect()[0][0]
@property
def is_monotonic(self) -> bool:
"""
Return boolean if values in the object are monotonically increasing.
.. note:: the current implementation of is_monotonic requires to shuffle
and aggregate multiple times to check the order locally and globally,
which is potentially expensive. In case of multi-index, all data are
transferred to single node which can easily cause out-of-memory error currently.
.. note:: Disable the Spark config `spark.sql.optimizer.nestedSchemaPruning.enabled`
for multi-index if you're using pandas-on-Spark < 1.7.0 with PySpark 3.1.1.
Returns
-------
is_monotonic : bool
Examples
--------
>>> ser = ps.Series(['1/1/2018', '3/1/2018', '4/1/2018'])
>>> ser.is_monotonic
True
>>> df = ps.DataFrame({'dates': [None, '1/1/2018', '2/1/2018', '3/1/2018']})
>>> df.dates.is_monotonic
False
>>> df.index.is_monotonic
True
>>> ser = ps.Series([1])
>>> ser.is_monotonic
True
>>> ser = ps.Series([])
>>> ser.is_monotonic
True
>>> ser.rename("a").to_frame().set_index("a").index.is_monotonic
True
>>> ser = ps.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])
>>> ser.is_monotonic
False
>>> ser.index.is_monotonic
True
Support for MultiIndex
>>> midx = ps.MultiIndex.from_tuples(
... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])
>>> midx # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('y', 'c'),
('y', 'd'),
('z', 'e')],
)
>>> midx.is_monotonic
True
>>> midx = ps.MultiIndex.from_tuples(
... [('z', 'a'), ('z', 'b'), ('y', 'c'), ('y', 'd'), ('x', 'e')])
>>> midx # doctest: +SKIP
MultiIndex([('z', 'a'),
('z', 'b'),
('y', 'c'),
('y', 'd'),
('x', 'e')],
)
>>> midx.is_monotonic
False
"""
return self._is_monotonic("increasing")
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self) -> bool:
"""
Return boolean if values in the object are monotonically decreasing.
.. note:: the current implementation of is_monotonic_decreasing requires to shuffle
and aggregate multiple times to check the order locally and globally,
which is potentially expensive. In case of multi-index, all data are transferred
to single node which can easily cause out-of-memory error currently.
.. note:: Disable the Spark config `spark.sql.optimizer.nestedSchemaPruning.enabled`
for multi-index if you're using pandas-on-Spark < 1.7.0 with PySpark 3.1.1.
Returns
-------
is_monotonic : bool
Examples
--------
>>> ser = ps.Series(['4/1/2018', '3/1/2018', '1/1/2018'])
>>> ser.is_monotonic_decreasing
True
>>> df = ps.DataFrame({'dates': [None, '3/1/2018', '2/1/2018', '1/1/2018']})
>>> df.dates.is_monotonic_decreasing
False
>>> df.index.is_monotonic_decreasing
False
>>> ser = ps.Series([1])
>>> ser.is_monotonic_decreasing
True
>>> ser = ps.Series([])
>>> ser.is_monotonic_decreasing
True
>>> ser.rename("a").to_frame().set_index("a").index.is_monotonic_decreasing
True
>>> ser = ps.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])
>>> ser.is_monotonic_decreasing
True
>>> ser.index.is_monotonic_decreasing
False
Support for MultiIndex
>>> midx = ps.MultiIndex.from_tuples(
... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])
>>> midx # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('y', 'c'),
('y', 'd'),
('z', 'e')],
)
>>> midx.is_monotonic_decreasing
False
>>> midx = ps.MultiIndex.from_tuples(
... [('z', 'e'), ('z', 'd'), ('y', 'c'), ('y', 'b'), ('x', 'a')])
>>> midx # doctest: +SKIP
MultiIndex([('z', 'a'),
('z', 'b'),
('y', 'c'),
('y', 'd'),
('x', 'e')],
)
>>> midx.is_monotonic_decreasing
True
"""
return self._is_monotonic("decreasing")
def _is_locally_monotonic_spark_column(self, order: str) -> Column:
window = (
Window.partitionBy(F.col("__partition_id"))
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(-1, -1)
)
if order == "increasing":
return (F.col("__origin") >= F.lag(F.col("__origin"), 1).over(window)) & F.col(
"__origin"
).isNotNull()
else:
return (F.col("__origin") <= F.lag(F.col("__origin"), 1).over(window)) & F.col(
"__origin"
).isNotNull()
def _is_monotonic(self, order: str) -> bool:
assert order in ("increasing", "decreasing")
sdf = self._internal.spark_frame
sdf = (
sdf.select(
F.spark_partition_id().alias(
"__partition_id"
), # Make sure we use the same partition id in the whole job.
F.col(NATURAL_ORDER_COLUMN_NAME),
self.spark.column.alias("__origin"),
)
.select(
F.col("__partition_id"),
F.col("__origin"),
self._is_locally_monotonic_spark_column(order).alias(
"__comparison_within_partition"
),
)
.groupby(F.col("__partition_id"))
.agg(
F.min(F.col("__origin")).alias("__partition_min"),
F.max(F.col("__origin")).alias("__partition_max"),
F.min(F.coalesce(F.col("__comparison_within_partition"), SF.lit(True))).alias(
"__comparison_within_partition"
),
)
)
# Now we're windowing the aggregation results without partition specification.
# The number of rows here will be as the same of partitions, which is expected
# to be small.
window = Window.orderBy(F.col("__partition_id")).rowsBetween(-1, -1)
if order == "increasing":
comparison_col = F.col("__partition_min") >= F.lag(F.col("__partition_max"), 1).over(
window
)
else:
comparison_col = F.col("__partition_min") <= F.lag(F.col("__partition_max"), 1).over(
window
)
sdf = sdf.select(
comparison_col.alias("__comparison_between_partitions"),
F.col("__comparison_within_partition"),
)
ret = sdf.select(
F.min(F.coalesce(F.col("__comparison_between_partitions"), SF.lit(True)))
& F.min(F.coalesce(F.col("__comparison_within_partition"), SF.lit(True)))
).collect()[0][0]
if ret is None:
return True
else:
return ret
@property
def ndim(self) -> int:
"""
Return an int representing the number of array dimensions.
Return 1 for Series / Index / MultiIndex.
Examples
--------
For Series
>>> s = ps.Series([None, 1, 2, 3, 4], index=[4, 5, 2, 1, 8])
>>> s.ndim
1
For Index
>>> s.index.ndim
1
For MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [1, 1, 1, 1, 1, 2, 1, 2, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s.index.ndim
1
"""
return 1
def astype(self: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
"""
Cast a pandas-on-Spark object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> ser = ps.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
>>> ser.rename("a").to_frame().set_index("a").index.astype('int64')
Int64Index([1, 2], dtype='int64', name='a')
"""
return self._dtype_op.astype(self, dtype)
def isin(self: IndexOpsLike, values: Sequence[Any]) -> IndexOpsLike:
"""
Check whether `values` are contained in Series or Index.
Return a boolean Series or Index showing whether each element in the Series
matches an element in the passed sequence of `values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test.
Returns
-------
isin : Series (bool dtype) or Index (bool dtype)
Examples
--------
>>> s = ps.Series(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'], name='animal')
>>> s.isin(['cow', 'lama'])
0 True
1 True
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
0 True
1 False
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
>>> s.rename("a").to_frame().set_index("a").index.isin(['lama'])
Index([True, False, True, False, True, False], dtype='object', name='a')
"""
if not is_list_like(values):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]".format(values_type=type(values).__name__)
)
values = values.tolist() if isinstance(values, np.ndarray) else list(values)
return self._with_new_scol(self.spark.column.isin([SF.lit(v) for v in values]))
def isnull(self: IndexOpsLike) -> IndexOpsLike:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values. Characters such as empty strings '' or
numpy.inf are not considered NA values
(unless you set pandas.options.mode.use_inf_as_na = True).
Returns
-------
Series or Index : Mask of bool values for each element in Series
that indicates whether an element is not an NA value.
Examples
--------
>>> ser = ps.Series([5, 6, np.NaN])
>>> ser.isna() # doctest: +NORMALIZE_WHITESPACE
0 False
1 False
2 True
dtype: bool
>>> ser.rename("a").to_frame().set_index("a").index.isna()
Index([False, False, True], dtype='object', name='a')
"""
from pyspark.pandas.indexes import MultiIndex
if isinstance(self, MultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
return self._dtype_op.isnull(self)
isna = isnull
def notnull(self: IndexOpsLike) -> IndexOpsLike:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True.
Characters such as empty strings '' or numpy.inf are not considered NA values
(unless you set pandas.options.mode.use_inf_as_na = True).
NA values, such as None or numpy.NaN, get mapped to False values.
Returns
-------
Series or Index : Mask of bool values for each element in Series
that indicates whether an element is not an NA value.
Examples
--------
Show which entries in a Series are not NA.
>>> ser = ps.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
>>> ser.rename("a").to_frame().set_index("a").index.notna()
Index([True, True, False], dtype='object', name='a')
"""
from pyspark.pandas.indexes import MultiIndex
if isinstance(self, MultiIndex):
raise NotImplementedError("notna is not defined for MultiIndex")
return (~self.isnull()).rename(self.name) # type: ignore
notna = notnull
# TODO: axis, skipna, and many arguments should be implemented.
def all(self, axis: Axis = 0) -> bool:
"""
Return whether all elements are True.
Returns True unless there at least one element within a series that is
False or equivalent (e.g. zero or empty)
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
>>> ps.Series([True, True]).all()
True
>>> ps.Series([True, False]).all()
False
>>> ps.Series([0, 1]).all()
False
>>> ps.Series([1, 2, 3]).all()
True
>>> ps.Series([True, True, None]).all()
True
>>> ps.Series([True, False, None]).all()
False
>>> ps.Series([]).all()
True
>>> ps.Series([np.nan]).all()
True
>>> df = ps.Series([True, False, None]).rename("a").to_frame()
>>> df.set_index("a").index.all()
False
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
sdf = self._internal.spark_frame.select(self.spark.column)
col = scol_for(sdf, sdf.columns[0])
# Note that we're ignoring `None`s here for now.
# any and every was added as of Spark 3.0
# ret = sdf.select(F.expr("every(CAST(`%s` AS BOOLEAN))" % sdf.columns[0])).collect()[0][0]
# Here we use min as its alternative:
ret = sdf.select(F.min(F.coalesce(col.cast("boolean"), SF.lit(True)))).collect()[0][0]
if ret is None:
return True
else:
return ret
# TODO: axis, skipna, and many arguments should be implemented.
def any(self, axis: Axis = 0) -> bool:
"""
Return whether any element is True.
Returns False unless there at least one element within a series that is
True or equivalent (e.g. non-zero or non-empty).
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
>>> ps.Series([False, False]).any()
False
>>> ps.Series([True, False]).any()
True
>>> ps.Series([0, 0]).any()
False
>>> ps.Series([0, 1, 2]).any()
True
>>> ps.Series([False, False, None]).any()
False
>>> ps.Series([True, False, None]).any()
True
>>> ps.Series([]).any()
False
>>> ps.Series([np.nan]).any()
False
>>> df = ps.Series([True, False, None]).rename("a").to_frame()
>>> df.set_index("a").index.any()
True
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
sdf = self._internal.spark_frame.select(self.spark.column)
col = scol_for(sdf, sdf.columns[0])
# Note that we're ignoring `None`s here for now.
# any and every was added as of Spark 3.0
# ret = sdf.select(F.expr("any(CAST(`%s` AS BOOLEAN))" % sdf.columns[0])).collect()[0][0]
# Here we use max as its alternative:
ret = sdf.select(F.max(F.coalesce(col.cast("boolean"), SF.lit(False)))).collect()[0][0]
if ret is None:
return False
else:
return ret
# TODO: add frep and axis parameter
def shift(
self: IndexOpsLike, periods: int = 1, fill_value: Optional[Any] = None
) -> IndexOpsLike:
"""
Shift Series/Index by desired number of periods.
.. note:: the current implementation of shift uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default depends on the dtype of self. For numeric data, np.nan is used.
Returns
-------
Copy of input Series/Index, shifted.
Examples
--------
>>> df = ps.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]},
... columns=['Col1', 'Col2', 'Col3'])
>>> df.Col1.shift(periods=3)
0 NaN
1 NaN
2 NaN
3 10.0
4 20.0
Name: Col1, dtype: float64
>>> df.Col2.shift(periods=3, fill_value=0)
0 0
1 0
2 0
3 13
4 23
Name: Col2, dtype: int64
>>> df.index.shift(periods=3, fill_value=0)
Int64Index([0, 0, 0, 0, 1], dtype='int64')
"""
return self._shift(periods, fill_value).spark.analyzed
def _shift(
self: IndexOpsLike,
periods: int,
fill_value: Any,
*,
part_cols: Sequence["ColumnOrName"] = ()
) -> IndexOpsLike:
if not isinstance(periods, int):
raise TypeError("periods should be an int; however, got [%s]" % type(periods).__name__)
col = self.spark.column
window = (
Window.partitionBy(*part_cols)
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(-periods, -periods)
)
lag_col = F.lag(col, periods).over(window)
col = F.when(lag_col.isNull() | F.isnan(lag_col), fill_value).otherwise(lag_col)
return self._with_new_scol(col, field=self._internal.data_fields[0].copy(nullable=True))
# TODO: Update Documentation for Bins Parameter when its supported
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins: None = None,
dropna: bool = True,
) -> "Series":
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values.
ascending : boolean, default False
Sort in ascending order.
bins : Not Yet Supported
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
Examples
--------
For Series
>>> df = ps.DataFrame({'x':[0, 0, 1, 1, 1, np.nan]})
>>> df.x.value_counts() # doctest: +NORMALIZE_WHITESPACE
1.0 3
0.0 2
Name: x, dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> df.x.value_counts(normalize=True) # doctest: +NORMALIZE_WHITESPACE
1.0 0.6
0.0 0.4
Name: x, dtype: float64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> df.x.value_counts(dropna=False) # doctest: +NORMALIZE_WHITESPACE
1.0 3
0.0 2
NaN 1
Name: x, dtype: int64
For Index
>>> idx = ps.Index([3, 1, 2, 3, 4, np.nan])
>>> idx
Float64Index([3.0, 1.0, 2.0, 3.0, 4.0, nan], dtype='float64')
>>> idx.value_counts().sort_index()
1.0 1
2.0 1
3.0 2
4.0 1
dtype: int64
**sort**
With `sort` set to `False`, the result wouldn't be sorted by number of count.
>>> idx.value_counts(sort=True).sort_index()
1.0 1
2.0 1
3.0 2
4.0 1
dtype: int64
**normalize**
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> idx.value_counts(normalize=True).sort_index()
1.0 0.2
2.0 0.2
3.0 0.4
4.0 0.2
dtype: float64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> idx.value_counts(dropna=False).sort_index() # doctest: +SKIP
1.0 1
2.0 1
3.0 2
4.0 1
NaN 1
dtype: int64
For MultiIndex.
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [1, 1, 1, 1, 1, 2, 1, 2, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s.index # doctest: +SKIP
MultiIndex([( 'lama', 'weight'),
( 'lama', 'weight'),
( 'lama', 'weight'),
( 'cow', 'weight'),
( 'cow', 'weight'),
( 'cow', 'length'),
('falcon', 'weight'),
('falcon', 'length'),
('falcon', 'length')],
)
>>> s.index.value_counts().sort_index()
(cow, length) 1
(cow, weight) 2
(falcon, length) 2
(falcon, weight) 1
(lama, weight) 3
dtype: int64
>>> s.index.value_counts(normalize=True).sort_index()
(cow, length) 0.111111
(cow, weight) 0.222222
(falcon, length) 0.222222
(falcon, weight) 0.111111
(lama, weight) 0.333333
dtype: float64
If Index has name, keep the name up.
>>> idx = ps.Index([0, 0, 0, 1, 1, 2, 3], name='pandas-on-Spark')
>>> idx.value_counts().sort_index()
0 3
1 2
2 1
3 1
Name: pandas-on-Spark, dtype: int64
"""
from pyspark.pandas.series import first_series
if bins is not None:
raise NotImplementedError("value_counts currently does not support bins")
if dropna:
sdf_dropna = self._internal.spark_frame.select(self.spark.column).dropna()
else:
sdf_dropna = self._internal.spark_frame.select(self.spark.column)
index_name = SPARK_DEFAULT_INDEX_NAME
column_name = self._internal.data_spark_column_names[0]
sdf = sdf_dropna.groupby(scol_for(sdf_dropna, column_name).alias(index_name)).count()
if sort:
if ascending:
sdf = sdf.orderBy(F.col("count"))
else:
sdf = sdf.orderBy(F.col("count").desc())
if normalize:
sum = sdf_dropna.count()
sdf = sdf.withColumn("count", F.col("count") / SF.lit(sum))
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, index_name)],
column_labels=self._internal.column_labels,
data_spark_columns=[scol_for(sdf, "count")],
column_label_names=self._internal.column_label_names,
)
return first_series(DataFrame(internal))
def nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> int:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to pandas-on-Spark and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to pandas-on-Spark.
Returns
-------
int
See Also
--------
DataFrame.nunique: Method nunique for DataFrame.
Series.count: Count non-NA/null observations in the Series.
Examples
--------
>>> ps.Series([1, 2, 3, np.nan]).nunique()
3
>>> ps.Series([1, 2, 3, np.nan]).nunique(dropna=False)
4
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> ps.Series([1, 2, 3, np.nan]).nunique(approx=True)
3
>>> idx = ps.Index([1, 1, 2, None])
>>> idx
Float64Index([1.0, 1.0, 2.0, nan], dtype='float64')
>>> idx.nunique()
2
>>> idx.nunique(dropna=False)
3
"""
res = self._internal.spark_frame.select([self._nunique(dropna, approx, rsd)])
return res.collect()[0][0]
def _nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> Column:
colname = self._internal.data_spark_column_names[0]
count_fn = cast(
Callable[[Column], Column],
partial(F.approx_count_distinct, rsd=rsd) if approx else F.countDistinct,
)
if dropna:
return count_fn(self.spark.column).alias(colname)
else:
return (
count_fn(self.spark.column)
+ F.when(
F.count(F.when(self.spark.column.isNull(), 1).otherwise(None)) >= 1, 1
).otherwise(0)
).alias(colname)
def take(self: IndexOpsLike, indices: Sequence[int]) -> IndexOpsLike:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
Series
>>> psser = ps.Series([100, 200, 300, 400, 500])
>>> psser
0 100
1 200
2 300
3 400
4 500
dtype: int64
>>> psser.take([0, 2, 4]).sort_index()
0 100
2 300
4 500
dtype: int64
Index
>>> psidx = ps.Index([100, 200, 300, 400, 500])
>>> psidx
Int64Index([100, 200, 300, 400, 500], dtype='int64')
>>> psidx.take([0, 2, 4]).sort_values()
Int64Index([100, 300, 500], dtype='int64')
MultiIndex
>>> psmidx = ps.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("x", "c")])
>>> psmidx # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('x', 'c')],
)
>>> psmidx.take([0, 2]) # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'c')],
)
"""
if not is_list_like(indices) or isinstance(indices, (dict, set)):
raise TypeError("`indices` must be a list-like except dict or set")
if isinstance(self, ps.Series):
return cast(IndexOpsLike, self.iloc[indices])
else:
return cast(IndexOpsLike, self._psdf.iloc[indices].index)
def factorize(
self: IndexOpsLike, sort: bool = True, na_sentinel: Optional[int] = -1
) -> Tuple[IndexOpsLike, pd.Index]:
"""
Encode the object as an enumerated type or categorical variable.
This method is useful for obtaining a numeric representation of an
array when all that matters is identifying distinct values.
Parameters
----------
sort : bool, default True
na_sentinel : int or None, default -1
Value to mark "not found". If None, will not drop the NaN
from the uniques of the values.
Returns
-------
codes : Series or Index
A Series or Index that's an indexer into `uniques`.
``uniques.take(codes)`` will have the same values as `values`.
uniques : pd.Index
The unique valid values.
.. note ::
Even if there's a missing value in `values`, `uniques` will
*not* contain an entry for it.
Examples
--------
>>> psser = ps.Series(['b', None, 'a', 'c', 'b'])
>>> codes, uniques = psser.factorize()
>>> codes
0 1
1 -1
2 0
3 2
4 1
dtype: int32
>>> uniques
Index(['a', 'b', 'c'], dtype='object')
>>> codes, uniques = psser.factorize(na_sentinel=None)
>>> codes
0 1
1 3
2 0
3 2
4 1
dtype: int32
>>> uniques
Index(['a', 'b', 'c', None], dtype='object')
>>> codes, uniques = psser.factorize(na_sentinel=-2)
>>> codes
0 1
1 -2
2 0
3 2
4 1
dtype: int32
>>> uniques
Index(['a', 'b', 'c'], dtype='object')
For Index:
>>> psidx = ps.Index(['b', None, 'a', 'c', 'b'])
>>> codes, uniques = psidx.factorize()
>>> codes
Int64Index([1, -1, 0, 2, 1], dtype='int64')
>>> uniques
Index(['a', 'b', 'c'], dtype='object')
"""
from pyspark.pandas.series import first_series
assert (na_sentinel is None) or isinstance(na_sentinel, int)
assert sort is True
if isinstance(self.dtype, CategoricalDtype):
categories = self.dtype.categories
if len(categories) == 0:
scol = SF.lit(None)
else:
kvs = list(
chain(
*[
(SF.lit(code), SF.lit(category))
for code, category in enumerate(categories)
]
)
)
map_scol = F.create_map(*kvs)
scol = map_scol.getItem(self.spark.column)
codes, uniques = self._with_new_scol(
scol.alias(self._internal.data_spark_column_names[0])
).factorize(na_sentinel=na_sentinel)
return codes, uniques.astype(self.dtype)
uniq_sdf = self._internal.spark_frame.select(self.spark.column).distinct()
# Check number of uniques and constructs sorted `uniques_list`
max_compute_count = get_option("compute.max_rows")
if max_compute_count is not None:
uniq_pdf = uniq_sdf.limit(max_compute_count + 1).toPandas()
if len(uniq_pdf) > max_compute_count:
raise ValueError(
"Current Series has more then {0} unique values. "
"Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' "
"to more than {0} rows. Note that, before changing the "
"'compute.max_rows', this operation is considerably expensive.".format(
max_compute_count
)
)
else:
uniq_pdf = uniq_sdf.toPandas()
# pandas takes both NaN and null in Spark to np.nan, so de-duplication is required
uniq_series = first_series(uniq_pdf).drop_duplicates()
uniques_list = uniq_series.tolist()
uniques_list = sorted(uniques_list, key=lambda x: (pd.isna(x), x))
# Constructs `unique_to_code` mapping non-na unique to code
unique_to_code = {}
if na_sentinel is not None:
na_sentinel_code = na_sentinel
code = 0
for unique in uniques_list:
if pd.isna(unique):
if na_sentinel is None:
na_sentinel_code = code
else:
unique_to_code[unique] = code
code += 1
kvs = list(
chain(*([(SF.lit(unique), SF.lit(code)) for unique, code in unique_to_code.items()]))
)
if len(kvs) == 0: # uniques are all missing values
new_scol = SF.lit(na_sentinel_code)
else:
scol = self.spark.column
if isinstance(self.spark.data_type, (FloatType, DoubleType)):
cond = scol.isNull() | F.isnan(scol)
else:
cond = scol.isNull()
map_scol = F.create_map(*kvs)
null_scol = F.when(cond, SF.lit(na_sentinel_code))
new_scol = null_scol.otherwise(map_scol.getItem(scol))
codes = self._with_new_scol(new_scol.alias(self._internal.data_spark_column_names[0]))
if na_sentinel is not None:
# Drops the NaN from the uniques of the values
uniques_list = [x for x in uniques_list if not pd.isna(x)]
uniques = pd.Index(uniques_list)
return codes, uniques
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.base
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.base.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.base tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.base,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
moutai/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 47 | 2495 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
sppalkia/weld | python/grizzly/grizzly/seriesweld.py | 3 | 19541 | import pandas as pd
import grizzly_impl
from lazy_op import LazyOpResult, to_weld_type
from weld.weldobject import *
import utils
class SeriesWeld(LazyOpResult):
"""Summary
Attributes:
column_name (TYPE): Description
df (TYPE): Description
dim (int): Description
expr (TYPE): Description
weld_type (TYPE): Description
"""
def __init__(self, expr, weld_type, df=None, column_name=None, index_type=None, index_name=None):
"""Summary
TODO: Implement an actual Index Object like how Pandas does
Args:
expr (TYPE): Description
weld_type (TYPE): Description
df (None, optional): Description
column_name (None, optional): Description
"""
self.expr = expr
self.weld_type = weld_type
self.dim = 1
self.df = df
self.column_name = column_name
self.index_type = index_type
self.index_name = index_name
def __getitem__(self, key):
"""Summary
Args:
predicates (TYPE): Description
new_value (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(key, slice):
start = key.start
# TODO : We currently do nothing with step
step = key.step
stop = key.stop
if self.index_type is not None:
index_expr = grizzly_impl.get_field(self.expr, 0)
column_expr = grizzly_impl.get_field(self.expr, 1)
zip_expr = grizzly_impl.zip_columns([index_expr, column_expr])
sliced_expr = grizzly_impl.slice_vec(zip_expr, start, stop)
unzip_expr = grizzly_impl.unzip_columns(
sliced_expr,
[self.index_type, self.weld_type]
)
return SeriesWeld(
unzip_expr,
self.weld_type,
self.df,
self.column_name,
self.index_type,
self.index_name
)
else:
return SeriesWeld(
grizzly_impl.slice_vec(
self.expr,
start,
stop
)
)
else:
# By default we return as if the key were predicates to filter by
return self.filter(key)
def __setitem__(self, predicates, new_value):
"""Summary
Args:
predicates (TYPE): Description
new_value (TYPE): Description
Returns:
TYPE: Description
"""
if self.df is not None and self.column_name is not None:
self.df[self.column_name] = self.mask(predicates, new_value)
@property
def loc(self):
return WeldLocIndexer(
self
)
def __getattr__(self, key):
"""Summary
Args:
key (TYPE): Description
Returns:
TYPE: Description
Raises:
Exception: Description
"""
if key == 'str' and self.weld_type == WeldVec(WeldChar()):
return StringSeriesWeld(
self.expr,
self.weld_type,
self.df,
self.column_name
)
raise AttributeError("Attr %s does not exist" % key)
@property
def index(self):
if self.index_type is not None:
return SeriesWeld(
grizzly_impl.get_field(
self.expr,
0
),
self.index_type,
self.df,
self.index_name
)
# TODO : Make all series have a series attribute
raise Exception("No index present")
def evaluate(self, verbose=False, passes=None):
if self.index_type is not None:
index, column = LazyOpResult(
self.expr,
WeldStruct([WeldVec(self.index_type), WeldVec(self.weld_type)]),
0
).evaluate(verbose=verbose, passes=passes)
series = pd.Series(column, index)
series.index.rename(self.index_name, True)
return series
else:
column = LazyOpResult.evaluate(self, verbose=verbose, passes=passes)
return pd.Series(column)
def sort_values(self, ascending=False):
""" Sorts the values of this series
"""
if self.index_type is not None:
index_expr = grizzly_impl.get_field(self.expr, 0)
column_expr = grizzly_impl.get_field(self.expr, 1)
zip_expr = grizzly_impl.zip_columns([index_expr, column_expr])
result_expr = grizzly_impl.sort(zip_expr, 1, self.weld_type, ascending)
unzip_expr = grizzly_impl.unzip_columns(
result_expr,
[self.index_type, self.weld_type]
)
return SeriesWeld(
unzip_expr,
self.weld_type,
self.df,
self.column_name,
self.index_type,
self.index_name
)
else:
result_expr = grizzly_impl.sort(self.expr)
# TODO need to finish this
def unique(self):
"""Summary
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.unique(
self.expr,
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def lower(self):
"""Summary
Returns:
TYPE: Description
"""
# TODO : Bug in nested map operating on strings
# TODO : Check that self.weld_type is a string type
vectype = self.weld_type
if isinstance(vectype, WeldVec):
elem_type = vectype.elemType
if isinstance(elem_type, WeldChar):
return SeriesWeld(
grizzly_impl.to_lower(
self.expr,
elem_type
),
self.weld_type,
self.df,
self.column_name
)
raise Exception("Cannot call to_lower on non string type")
def contains(self, string):
"""Summary
Returns:
TYPE: Description
"""
# Check that self.weld_type is a string type
vectype = self.weld_type
if isinstance(vectype, WeldVec):
elem_type = vectype.elemType
if isinstance(elem_type, WeldChar):
return SeriesWeld(
grizzly_impl.contains(
self.expr,
elem_type,
string
),
WeldBit(),
self.df,
self.column_name
)
raise Exception("Cannot call to_lower on non string type")
def isin(self, ls):
if isinstance(ls, SeriesWeld):
if self.weld_type == ls.weld_type:
return SeriesWeld(
grizzly_impl.isin(self.expr,
ls.expr,
self.weld_type),
WeldBit(),
self.df,
self.column_name
)
raise Exception("Cannot call isin on different typed list")
def prod(self):
"""Summary
Returns:
TYPE: Description
"""
return LazyOpResult(
grizzly_impl.aggr(
self.expr,
"*",
1,
self.weld_type
),
self.weld_type,
0
)
def sum(self):
"""Summary
Returns:
TYPE: Description
"""
return LazyOpResult(
grizzly_impl.aggr(
self.expr,
"+",
0,
self.weld_type
),
self.weld_type,
0
)
def max(self):
"""Summary
Returns:
TYPE: Description
"""
pass
def min(self):
"""Summary
Returns:
TYPE: Description
"""
pass
def count(self):
"""Summary
Returns:
TYPE: Description
"""
return LazyOpResult(
grizzly_impl.count(
self.expr,
self.weld_type
),
WeldInt(),
0
)
def mask(self, predicates, new_value):
"""Summary
Args:
predicates (TYPE): Description
new_value (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(predicates, SeriesWeld):
predicates = predicates.expr
return SeriesWeld(
grizzly_impl.mask(
self.expr,
predicates,
new_value,
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def filter(self, predicates):
if isinstance(predicates, SeriesWeld):
predicates = predicates.expr
return SeriesWeld(
grizzly_impl.filter(
self.expr,
predicates,
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def add(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"+",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def __sub__(self, other):
# TODO subtractionw without index variables
if self.index_type is not None:
index = grizzly_impl.get_field(self.expr, 0)
expr1 = grizzly_impl.get_field(self.expr, 1)
else:
expr1 = self.expr
if other.index_type is not None:
index2 = grizzly_impl.get_field(other.expr, 0)
expr2 = grizzly_impl.get_field(other.expr, 1)
else:
expr2 = other.expr
index_expr = LazyOpResult(index, self.index_type, 0)
sub_expr = SeriesWeld(
grizzly_impl.element_wise_op(
expr1,
expr2,
"-",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
index_sub_expr = utils.group([index_expr, sub_expr])
return SeriesWeld(
index_sub_expr.expr,
self.weld_type,
self.df,
self.column_name,
self.index_type,
self.index_name
)
# We also need to ensure that both indexes of the subtracted
# columns are compatible
def sub(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"-",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def mul(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"*",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def div(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"/",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def per_element_and(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"&&",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def mod(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"%",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def __eq__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
"==",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
def __ne__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
"!=",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
def __gt__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
">",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
def __ge__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if self.index_type is not None:
expr = grizzly_impl.get_field(self.expr, 1)
else:
expr = self.expr
return SeriesWeld(
grizzly_impl.compare(
expr,
other,
">=",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
def __lt__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
"<",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
def __le__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
"<=",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
class StringSeriesWeld:
"""Summary
Attributes:
column_name (TYPE): Description
df (TYPE): Description
dim (int): Description
expr (TYPE): Description
weld_type (TYPE): Description
"""
def __init__(self, expr, weld_type, df=None, column_name=None):
"""Summary
Args:
expr (TYPE): Description
weld_type (TYPE): Description
df (None, optional): Description
column_name (None, optional): Description
"""
self.expr = expr
self.weld_type = weld_type
self.dim = 1
self.df = df
self.column_name = column_name
def slice(self, start, size):
"""Summary
Args:
start (TYPE): Description
size (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.slice(
self.expr,
start,
size,
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
class WeldLocIndexer:
"""
Label location based indexer for selection by label for Series objects.
Attributes:
grizzly_obj (TYPE): The Series being indexed into.
"""
def __init__(self, grizzly_obj):
# If index_type field of grizzly_obj is None
# then we assume normal 0 - 1 indexing
self.grizzly_obj = grizzly_obj
def __getitem__(self, key):
if isinstance(self.grizzly_obj, SeriesWeld):
series = self.grizzly_obj
if isinstance(key, SeriesWeld):
if series.index_type is not None:
index_expr = grizzly_impl.get_field(series.expr, 0)
column_expr = grizzly_impl.get_field(series.expr, 1)
zip_expr = grizzly_impl.zip_columns([index_expr, column_expr])
predicate_expr = grizzly_impl.isin(index_expr, key.expr, series.index_type)
filtered_expr = grizzly_impl.filter(
zip_expr,
predicate_expr
)
unzip_expr = grizzly_impl.unzip_columns(
filtered_expr,
[series.index_type, series.weld_type]
)
return SeriesWeld(
unzip_expr,
series.weld_type,
series.df,
series.column_name,
series.index_type,
series.index_name
)
# TODO : Need to implement for non-pivot tables
raise Exception("Cannot invoke getitem on non SeriesWeld object")
| bsd-3-clause |
PMitura/smiles-neural-network | baselines/sicho_svm_uni_feature_sel.py | 1 | 8481 | #! /usr/bin/env python
import db
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from rdkit import Chem
from rdkit.Chem import Descriptors
from sklearn.feature_selection import VarianceThreshold
import pylab
data = db.getTarget_206_1977()
duplicates = {}
for datum in data:
if datum[0] in duplicates:
duplicates[datum[0]].append(datum[1])
else:
duplicates[datum[0]] = [datum[1]]
new_data = []
for smile, sval_arr in duplicates.iteritems():
lemin = np.amin(sval_arr)
lemax = np.amax(sval_arr)
if len(sval_arr) == 1:
new_data.append([smile,sval_arr[0]])
elif lemin != 0 and lemax != 0:
if not (len(sval_arr) < 20 and int(math.log(lemin, 10)) != int(math.log(lemax, 10))):
new_data.append([smile,np.median(sval_arr)])
data = new_data
df_data = {}
df_data['smiles'] = []
df_data['sval'] = []
df_reorder = ['smiles','sval']
for name, function in Descriptors.descList:
df_data[name] = []
df_reorder.append(name)
for i in range(len(data)):
smiles = data[i][0]
sval = data[i][1]
mol = Chem.MolFromSmiles(smiles)
for name, function in Descriptors.descList:
df_data[name].append(function(mol))
df_data['smiles'].append(smiles)
df_data['sval'].append(sval)
# create dataframe, reorder values so that smiles is first, sval is second
df = pd.DataFrame(df_data)
df = df[df_reorder]
df.set_index('smiles', inplace=True)
# we convert the IC50 values to pIC50
df.sval = df.sval.apply(lambda x : -1.0 * np.log10(x / 1.0e9))
# drop infinite values
df = df.drop(df[df.sval == np.inf].index)
def get_removed_feats(df, model):
return df.columns.values[1:][~model.get_support()]
def update_df(df, removed_descriptors, inplace=True):
if inplace:
df.drop(removed_descriptors, 1, inplace=True)
# print(df.shape)
return df
else:
new_df = df.drop(removed_descriptors, 1, inplace=False)
# print(new_df.shape)
return new_df
# find the names of the columns with zero variance
var_sel = VarianceThreshold()
var_sel.fit(df.iloc[:,1:])
removed_descriptors = get_removed_feats(df, var_sel)
# update the data frame
update_df(df, removed_descriptors)
# correlation filter
def find_correlated(data):
correlation_matrix = data.iloc[:,1:].corr(method='spearman')
removed_descs = set()
all_descs = correlation_matrix.columns.values
for label in all_descs:
if label not in removed_descs:
correlations_abs = correlation_matrix[label].abs()
mask = (correlations_abs > 0.7).values
to_remove = set(all_descs[mask])
to_remove.remove(label)
removed_descs.update(to_remove)
return removed_descs
update_df(df, find_correlated(df))
# regression tests
from sklearn.feature_selection import SelectPercentile,SelectKBest
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
# keep only the descriptors that show significant
# correlation with the target variable (pIC50)
regre_sele = SelectPercentile(f_regression, percentile=50)
regre_sele.fit(df.iloc[:,1:], df.sval)
removed_descriptors = get_removed_feats(df, regre_sele)
# update the data frame
update_df(df, removed_descriptors)
# print selected features
print(df.columns.tolist())
from sklearn.metrics import mean_squared_error
from sklearn.cross_validation import train_test_split
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
def createModelAndMeasure(df):
scaler = StandardScaler(copy=False)
scaler.fit(df.iloc[:,1:])
scaled_features = pd.DataFrame(scaler.transform(df.iloc[:,1:]), columns=df.iloc[:,1:].columns)
# Next, we create the datasets for cross-validation and testing:
features_train, features_test, sval_train, sval_test = train_test_split(
scaled_features
, df.sval
, test_size=0.4
, random_state=42
)
# and build the model:
param_grid = [
# {'C': [1, 10, 100, 1000], 'epsilon': [0.0, 0.1, 0.2, 0.3, 0.4], 'kernel': ['linear']},
# {'C': [1, 10, 100, 1000], 'epsilon': [0.0, 0.1, 0.2, 0.3, 0.4], 'kernel': ['poly'], 'degree' : [2, 3, 4, 5]},
{'C': [1, 10, 100, 1000], 'epsilon': [0.0, 0.1, 0.2, 0.3, 0.4], 'gamma': [0.01, 0.001, 0.0001], 'kernel': ['rbf']},
]
model = GridSearchCV(SVR(), param_grid, n_jobs=2, cv=5)
model.fit(features_train, sval_train)
model = model.best_estimator_
# print('Model params:')
# print(model.get_params())
# print()
# cross validation results
from sklearn.cross_validation import cross_val_score
scores = cross_val_score(model, features_train, sval_train, cv=5)
scores_mse = cross_val_score(model, features_train, sval_train, cv=5, scoring='mean_squared_error')
results = {
'cv_mse': abs(scores_mse.mean()),
'cv_mse_d': scores_mse.std() * 2,
'v_mse': mean_squared_error(model.predict(features_test), sval_test)
}
return results
# print('Cross validation:')
# print("Mean R^2: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# print("Mean R: %0.2f (+/- %0.2f)" % (np.sqrt(scores).mean(), np.sqrt(scores).std() * 2))
# print("Mean MSE: %0.2f (+/- %0.2f)" % (abs(scores_mse.mean()), scores_mse.std() * 2))
# print()
# test validation results
# print('Test validation:')
# print("R^2: %0.2f" % model.score(features_test, sval_test))
# print("R: %0.2f" % np.sqrt(model.score(features_test, sval_test)))
# print("MSE: %0.2f" % mean_squared_error(model.predict(features_test), sval_test))
# print()
######################################################################
features_count = len(df.columns)-1
print(features_count)
results = []
# Further forward selection of k best
for fw_step in range(features_count,0,-1):
print('Selecting %d/%d features:' % (fw_step,features_count))
selector = SelectKBest(f_regression,k=fw_step)
selector.fit(df.iloc[:,1:], df.sval)
removed_descriptors = get_removed_feats(df, selector)
# update the data frame
new_df = update_df(df, removed_descriptors, inplace=False)
print(new_df.columns.tolist())
res = createModelAndMeasure(new_df)
res['k']=fw_step
results.append(res)
resultsdf = pd.DataFrame(results)
resultsdf.set_index('k',inplace=True)
print(resultsdf)
resultsdf.plot()
plt.show()
# print(features_mean.values)
# scaled_features = pd.DataFrame(((df.iloc[:,1:]-features_mean)/features_std).values, columns=df.iloc[:,1:].columns)
# print(scaled_features)
# error plot
# (sval_test - model.predict(features_test)).abs().hist(bins=30).plot()
# plt.show()
# PCA plot
print("Computing decomposition:")
from sklearn import decomposition
'''
n_components = 5
pca = decomposition.PCA(n_components=n_components)
pca.fit(df.iloc[:,1:])
pca_result = pca.transform(df.iloc[:,1:])
from mpl_toolkits.mplot3d import Axes3D
from itertools import combinations
plt.rcParams["figure.figsize"] = [15, 15]
fig = plt.figure()
ax = fig.add_subplot(1,1,1,projection='3d')
PCAcombo = [3,1,0]
ax.scatter(
pca_result[:,PCAcombo[0]]
, pca_result[:,PCAcombo[1]]
, pca_result[:,PCAcombo[2]]
, c=df.sval
, cmap='YlOrRd'
)
ax.view_init(elev=30, azim=45)
ax.set_xlabel('PC%s' % (PCAcombo[0] + 1))
ax.set_ylabel('PC%s' % (PCAcombo[1] + 1))
ax.set_zlabel('PC%s' % (PCAcombo[2] + 1))
plt.show()
'''
'''
combos = list(combinations(range(n_components), 3))
plt.rcParams["figure.figsize"] = [15, 30]
fig = plt.figure(len(combos) / 2)
for idx, combo in enumerate(combos):
ax = fig.add_subplot(len(combos) / 2, 2, idx + 1, projection='3d')
ax.scatter(
pca_result[:,combo[0]]
, pca_result[:,combo[1]]
, pca_result[:,combo[2]]
, c=df.sval
, s=20
, cmap='YlOrRd' # red are the compounds with higher values of pIC50
)
ax.view_init(elev=30, azim=45)
ax.set_xlabel('PC%s' % (combo[0] + 1))
ax.set_ylabel('PC%s' % (combo[1] + 1))
ax.set_zlabel('PC%s' % (combo[2] + 1))
plt.show()
'''
'''
from sklearn.manifold import TSNE
model = TSNE(n_components=2)
TSNEdata = model.fit_transform(df.iloc[:,1:])
TSNEdf = pd.DataFrame(TSNEdata, columns=('x','y'))
TSNEdf['c'] = pd.Series(df.sval.values,index=TSNEdf.index)
plot = TSNEdf.plot.scatter(x = 'x', y = 'y', c = 'c', cmap = 'plasma')
plt.show()
'''
| bsd-3-clause |
rs2/pandas | pandas/tests/indexes/multi/test_drop.py | 2 | 4428 | import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Index, MultiIndex
import pandas._testing as tm
def test_drop(idx):
dropped = idx.drop([("foo", "two"), ("qux", "one")])
index = MultiIndex.from_tuples([("foo", "two"), ("qux", "one")])
dropped2 = idx.drop(index)
expected = idx[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = idx.drop(["bar"])
expected = idx[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = idx.drop("foo")
expected = idx[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([("bar", "two")])
with pytest.raises(KeyError, match=r"^10$"):
idx.drop([("bar", "two")])
with pytest.raises(KeyError, match=r"^10$"):
idx.drop(index)
with pytest.raises(KeyError, match=r"^'two'$"):
idx.drop(["foo", "two"])
# partially correct argument
mixed_index = MultiIndex.from_tuples([("qux", "one"), ("bar", "two")])
with pytest.raises(KeyError, match=r"^10$"):
idx.drop(mixed_index)
# error='ignore'
dropped = idx.drop(index, errors="ignore")
expected = idx[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = idx.drop(mixed_index, errors="ignore")
expected = idx[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = idx.drop(["foo", "two"], errors="ignore")
expected = idx[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = idx.drop(["foo", ("qux", "one")])
expected = idx[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ["foo", ("qux", "one"), "two"]
with pytest.raises(KeyError, match=r"^'two'$"):
idx.drop(mixed_index)
dropped = idx.drop(mixed_index, errors="ignore")
expected = idx[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(idx):
index = idx[idx.get_loc("foo")]
dropped = index.droplevel(0)
assert dropped.name == "second"
index = MultiIndex(
levels=[Index(range(4)), Index(range(4)), Index(range(4))],
codes=[
np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0]),
],
names=["one", "two", "three"],
)
dropped = index.droplevel(0)
assert dropped.names == ("two", "three")
dropped = index.droplevel("two")
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list():
index = MultiIndex(
levels=[Index(range(4)), Index(range(4)), Index(range(4))],
codes=[
np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0]),
],
names=["one", "two", "three"],
)
dropped = index[:2].droplevel(["three", "one"])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
msg = (
"Cannot remove 3 levels from an index with 3 levels: "
"at least one level must be left"
)
with pytest.raises(ValueError, match=msg):
index[:2].droplevel(["one", "two", "three"])
with pytest.raises(KeyError, match="'Level four not found'"):
index[:2].droplevel(["one", "four"])
def test_drop_not_lexsorted():
# GH 12078
# define the lexsorted version of the multi-index
tuples = [("a", ""), ("b1", "c1"), ("b2", "c2")]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=["b", "c"])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(
columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]]
)
df = df.pivot_table(index="a", columns=["b", "c"], values="d")
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop("a"), not_lexsorted_mi.drop("a"))
| bsd-3-clause |
evgchz/scikit-learn | sklearn/neural_network/rbm.py | 15 | 11957 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
rng = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, rng)
v_ = self._sample_visibles(h_, rng)
return v_
def partial_fit(self, X):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
KeithYue/StockTrading | utility.py | 1 | 4059 | # coding=utf-8
# utility function of stock
import os
import pandas as pd
import numpy as np
import logging
# config the logging system
logging.basicConfig(level=logging.DEBUG)
# define Stock class
class Stock():
'''
the stock class
'''
def __init__(self, code):
self.code=code
# search and load the stock data
try:
for f in os.listdir('./data'):
if f.startswith(self.code):
self.path = './data/{}'.format(f)
data = pd.read_csv(self.path, parse_dates=['Date'])
data.rename(columns=str.lower, inplace=True)
except Exception as e:
logging.error(e)
self.data=data # the raw dataframe data
# logging.info(self.data)
return
def __str__(self):
return self.code
# modify the DataFrame class to convert dict-like data for talib compudation
def tt(self):
'''
convert DataFrame to talib computation data
tt means to talib
return dict-like data
'''
d = {}
for c in self.columns:
d[c] = np.asarray(self[c])
return d
pd.DataFrame.tt = tt
def gold_cross_number(s):
'''
given a time series, return the count of crossing
'''
count = 0
for i in range(0, len(s)-1):
if s[i] < 0 and s[i+1]>0:
count = count + 1
return count
def send_email(body):
import smtplib
# Import the email modules we'll need
from email.mime.text import MIMEText
user = "[email protected]"
pwd = "keith5805880"
recipient = "[email protected]"
subject = "From Keith: Promising Stock Codes"
msg = MIMEText(body, 'html')
msg['Subject'] = subject
msg['From'] = user
msg['To'] = recipient
try:
# server = smtplib.SMTP_SSL('smtp.gmail.com:465')
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(user, pwd)
server.send_message(msg)
server.quit()
print('successfully sent the mail')
except Exception as e :
print(e)
# smooth the timeseries function
def smooth(x,window_len=11,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y[(window_len/2-1):-(window_len/2)]
def test():
s = Stock('0001')
print(s.data)
if __name__ == '__main__':
test()
| apache-2.0 |
Sentient07/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
nkoukou/University_Projects_Year_3 | EDM_Assembly/base_class.py | 1 | 11366 | '''
Defines the base class of an electric potential grid.
'''
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
from numba import jit
# Global dimensions (used for plots)
sqc_x = (2., 'cm') # unit length for SquareCable
sqc_u = (10., 'V') # unit potential for SquareCable
edm_x = (10., 'mm') # unit length for Edm
edm_u = (2., 'kV') # unit potential for Edm
# Plot parameters
font = {'family' : 'normal',
'weight' : 'normal'}
mpl.rc('font', **font)
mpl.rcParams['lines.linewidth'] = 5.
# Functions compiled just-in-time
@jit
def gen_sc_grid(b, t, u):
'''
Generates SquareCable grid.
'''
grid = np.full((b,b), u)
fix = np.ones((b,b))
grid = np.pad(grid, ((t-b)/2,), 'constant', constant_values=(0,))
fix = np.pad(fix, ((t-b)/2,), 'constant', constant_values=(0,))
grid = np.pad(grid, 1, 'constant', constant_values=(0,))
fix = np.pad(fix, 1, 'constant', constant_values=(1,))
return grid, fix
@jit
def gen_edm_grid(tube_dist, scale=1):
'''
Generates Edm grid.
'''
small_plate = np.full(2,1, dtype='float64')
big_plate = np.full(20,4, dtype='float64')
gap = np.zeros(1, dtype='float64')
row_one = np.concatenate((small_plate, gap, big_plate, gap, small_plate))
row_two = np.zeros(row_one.size)
row_three = -row_one
grid = np.vstack((row_one, row_two, row_three))
grid = np.pad(grid, tube_dist, 'constant', constant_values=(0,))
fix = np.where(grid==0, 0, 1)
if scale != 1:
scale = np.ones((scale, scale))
grid = np.kron(grid, scale)
fix = np.kron(fix, scale)
grid = np.pad(grid, 1, 'constant', constant_values=(0,))
fix = np.pad(fix, 1, 'constant', constant_values=(1,))
return grid, fix
@jit
def update(grid, fix, scale, w=-1):
'''
Updates SquareCable or Edm grid.
Relaxation parameter w (0 < w < 2) affects the speed of convergence.
- w = 'j': solves with Jacobi method
- w = -1: solves with estimated optimal w
'''
if w=='j' or w=='J':
new_grid=np.copy(grid)
for index, fixed in np.ndenumerate(fix):
if fixed: continue
new_grid[index] = 0.25*( grid[index[0]-1, index[1]] +
grid[index[0]+1, index[1]] +
grid[index[0], index[1]-1] +
grid[index[0], index[1]+1] )
return new_grid
if w==-1:
coef = float(grid.shape[1])/grid.shape[0]
const = 2.0 if coef==1. else 5.5
w = 2./(1+const/(coef*scale))
for index, fixed in np.ndenumerate(fix):
if fixed: continue
grid[index] = ((1-w) * grid[index] + 0.25 * w *
( grid[index[0]-1, index[1]] +
grid[index[0]+1, index[1]] +
grid[index[0], index[1]-1] +
grid[index[0], index[1]+1] ))
return grid
# Base class
class PotentialGrid(object):
def update_grid(self, w=-1):
'''
Updates grid once.
'''
self.grid = update(self.grid, self.fix, self.scale, w)
def converge_grid(self, w=-1, accuracy=0.05):
'''
Updates grid until convergence.
'''
temporal_spread = 1.
spatial_spread = 0.
updates = 0
while temporal_spread > accuracy*spatial_spread:
horizontal_spread = np.absolute(np.diff(self.grid, axis=-1)).max()
vertical_spread = np.absolute(np.diff(self.grid, axis=0)).max()
spatial_spread = max(horizontal_spread, vertical_spread)
old_grid = np.copy(self.grid)
self.update_grid(w)
temporal_spread = np.linalg.norm( (self.grid - old_grid) )
updates += 1
if updates%1000==0:
print '\nspatial spread = ', spatial_spread
print 'temporal spread = ', temporal_spread
print 'updates = ', updates
return temporal_spread, spatial_spread, updates
def plot_grid(self, title=None):
'''
Plots grid's potential field. Parameter title sets the title of the
plot.
'''
if self.grid.shape[0] == self.grid.shape[1]:
colour, shrink, aspect = 'YlOrRd', 1, (1, 10)
else:
colour, shrink, aspect = 'RdYlBu', 0.5, (1.2, 8)
grid = self.dim['u'][0]*self.grid
xedge = (grid.shape[1]-2.)*self.dim['x'][0]/self.scale/2.
yedge = (grid.shape[0]-2.)*self.dim['x'][0]/self.scale/2.
fig = plt.figure()
ax = fig.add_subplot(111)
if title=='intro':
ax.set_title(r'EDM experiment plate assembly', fontsize=45)
elif title=='results':
ax.set_title(r'Electric Potential Field', fontsize=45)
axx = ax.imshow(grid, extent= [-xedge, xedge, -yedge, yedge],
aspect=aspect[0], interpolation='None',
cmap=plt.cm.get_cmap(colour))
ax.set_xlabel(r'$system\ size\ ({0})$'.format(self.dim['x'][1]),
fontsize=45)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.tick_params(axis='both', labelsize=40)
cbar = fig.colorbar(axx, shrink=shrink, aspect=aspect[1])
cbar.ax.tick_params(labelsize=40)
cbar.set_label(r'$Potential\ \phi\ ({0})$'.format(self.dim['u'][1]),
size=50)
def analyse_scale(self, w=-1, datapoints=20, accuracy=0.05, plot=True):
'''
Plots number of updates against scale for given relaxation parameter w,
number of datapoints and accuracy of convergence. If plot=False,
returns computed updates and scales.
Plots also maximum spatial spread of potential against scale.
'''
scales = np.linspace(10, 10*datapoints, datapoints)
mesh, updates = [], []
for s in scales:
print s
self.set_scale(s, silent=True)
data = self.converge_grid(w, accuracy)
updates.append(data[2])
mesh.append(data[1]*self.dim['u'][0])
if not plot: return scales, updates
if w=='j':
xaxis = scales*scales
lab= r'$scale^2\ \left(\frac{1}{(%g%s)^2}\right)$'% (
self.dim['x'][0], self.dim['x'][1])
else:
xaxis = scales
lab= r'$scale\ \left(\frac{1}{%g%s}\right)$'% (self.dim['x'][0],
self.dim['x'][1])
slope = updates[-1]/xaxis[-1]
fit = slope*xaxis
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(r'Number of updates against Scale', fontsize=45)
ax.plot(xaxis, updates, label=r'Numerical data')
ax.plot(xaxis, fit, label=r'Linear fit ($slope=%.2f$)'% (slope))
ax.set_xlabel(lab, fontsize=35)
ax.set_ylabel(r'$temporal\ updates$', fontsize=35)
ax.tick_params(axis='both', labelsize=25)
ax.legend(loc='upper left', prop={'size':40})
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(r'Spatial spread against Scale', fontsize=45)
ax.plot(scales, mesh)
ax.set_xlabel(r'$scale\ \left(\frac{1}{%g%s}\right)$'%
(self.dim['x'][0], self.dim['x'][1]), fontsize=40)
ax.set_ylabel(r'$spatial\ spread\ (%s)$'% (self.dim['u'][1]),
fontsize=40)
ax.tick_params(axis='both', labelsize=25)
def analyse_spread(self, w=-1, datapoints=10):
'''
Plots spatial spread of potential against accuracy of convergence for
given relaxation parameter w and number of datapoints.
'''
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.set_title(r'Spatial spread against Accuracy of convergence',
# fontsize=75)
ax.set_xlabel(r'$fraction\ of\ spatial\ spread$', fontsize=40)
ax.invert_xaxis()
ax.set_ylabel(r'$spatial\ spread\ (%s)$'% (self.dim['u'][1]),
fontsize=40)
ax.tick_params(axis='both', labelsize=30)
accuracies = np.logspace(-1,-10,datapoints)
for scale in np.linspace(10,10*datapoints,datapoints):
self.set_scale(scale, silent=True)
spreads = []
for acc in accuracies:
t,s,u = self.converge_grid(w, acc)
spreads.append(s*self.dim['u'][0])
ax.plot(accuracies, spreads, label='Scale={0}'.format(scale))
return accuracies, spreads
def analyse_omega(self, guess, scales=-1, datapoints=20,
accuracy=0.05, plot=True):
'''
Plots number of updates against relaxation parameter for given initial
guess, system scales, number of datapoints and accuracy of convergence.
If plot=False, returns computed updates and relaxation parameters.
'''
if plot:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(r'Optimal omega search at different scales',
fontsize=55)
ax.set_xlabel('$relaxation\ parameter\ \omega$', fontsize=37)
ax.set_ylabel('$temporal\ updates$', fontsize=37)
ax.tick_params(axis='both', labelsize=30)
ws = np.pad(np.array([guess]), datapoints/2, 'linear_ramp',
end_values=(guess-0.05, 1.99))
if scales==-1: scales = [self.scale]
for scale in scales:
updates = []
for w in ws:
self.set_scale(scale, silent=True)
data = self.converge_grid(w, accuracy)
updates.append(data[-1])
if plot: ax.plot(ws, updates, label=r'Scale ${0}$'.format(scale))
else: return ws, updates
if plot: ax.legend(loc='upper center', prop={'size':40})
def plot_omega_vs_scale(self, const=2., datapoints=20):
'''
Plots relaxation parameter against scale along with approximate fit for
given number of datapoints.
The fitting is approximated by the user with the constant const which
appears in the formula: 2(1+const/(coef*scale)), where coef is the
ratio of x and y dimensions of the system.
'''
coef = float(self.grid.shape[1]-2)/(self.grid.shape[0]-2)
scales = np.linspace(10, 50, datapoints)
fit = 2./(1+const/(coef*scales))
ws = []
for scale in scales:
self.set_scale(scale, silent=True)
guess = 2./(1+const/(coef*self.scale))
w, update = self.analyse_omega(guess, plot=False)
w = w[update.index(min(update))]
ws.append(w)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(r'Relaxation parameter against scale', fontsize=55)
ax.set_xlabel(r'$scale\ \left(\frac{1}{%g%s}\right)$'%
(self.dim['x'][0], self.dim['x'][1]), fontsize=37)
ax.set_ylabel('$relaxation\ parameter\ \omega$', fontsize=37)
ax.tick_params(axis='both', labelsize=30)
ax.plot(scales, ws, label=r'Numerical data')
ax.plot(scales, fit, label=r'Approximate fit ($C=%.1f$)'% (const))
ax.legend(loc='upper left', prop={'size':40})
return scales, ws
| mit |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pandas/tests/frame/test_mutate_columns.py | 7 | 7831 | # -*- coding: utf-8 -*-
from __future__ import print_function
from pandas.compat import range, lrange
import numpy as np
from pandas import DataFrame, Series, Index
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
# Column add, remove, delete.
class TestDataFrameMutateColumns(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_assign(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
original = df.copy()
result = df.assign(C=df.B / df.A)
expected = df.copy()
expected['C'] = [4, 2.5, 2]
assert_frame_equal(result, expected)
# lambda syntax
result = df.assign(C=lambda x: x.B / x.A)
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
# Non-Series array-like
result = df.assign(C=[4, 2.5, 2])
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
result = df.assign(B=df.B / df.A)
expected = expected.drop('B', axis=1).rename(columns={'C': 'B'})
assert_frame_equal(result, expected)
# overwrite
result = df.assign(A=df.A + df.B)
expected = df.copy()
expected['A'] = [5, 7, 9]
assert_frame_equal(result, expected)
# lambda
result = df.assign(A=lambda x: x.A + x.B)
assert_frame_equal(result, expected)
def test_assign_multiple(self):
df = DataFrame([[1, 4], [2, 5], [3, 6]], columns=['A', 'B'])
result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B)
expected = DataFrame([[1, 4, 7, 1, 4], [2, 5, 8, 2, 5],
[3, 6, 9, 3, 6]], columns=list('ABCDE'))
assert_frame_equal(result, expected)
def test_assign_alphabetical(self):
# GH 9818
df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
result = df.assign(D=df.A + df.B, C=df.A - df.B)
expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]],
columns=list('ABCD'))
assert_frame_equal(result, expected)
result = df.assign(C=df.A - df.B, D=df.A + df.B)
assert_frame_equal(result, expected)
def test_assign_bad(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
# non-keyword argument
with tm.assertRaises(TypeError):
df.assign(lambda x: x.A)
with tm.assertRaises(AttributeError):
df.assign(C=df.A, D=df.A + df.C)
with tm.assertRaises(KeyError):
df.assign(C=lambda df: df.A, D=lambda df: df['A'] + df['C'])
with tm.assertRaises(KeyError):
df.assign(C=df.A, D=lambda x: x['A'] + x['C'])
def test_insert_error_msmgs(self):
# GH 7432
df = DataFrame({'foo': ['a', 'b', 'c'], 'bar': [
1, 2, 3], 'baz': ['d', 'e', 'f']}).set_index('foo')
s = DataFrame({'foo': ['a', 'b', 'c', 'a'], 'fiz': [
'g', 'h', 'i', 'j']}).set_index('foo')
msg = 'cannot reindex from a duplicate axis'
with assertRaisesRegexp(ValueError, msg):
df['newcol'] = s
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)),
columns=['a', 'b', 'c', 'd'])
msg = 'incompatible index of inserted column with frame index'
with assertRaisesRegexp(TypeError, msg):
df['gr'] = df.groupby(['b', 'c']).count()
def test_insert_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=lrange(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K),
index=lrange(N))
assert_frame_equal(df, expected)
def test_insert(self):
df = DataFrame(np.random.randn(5, 3), index=np.arange(5),
columns=['c', 'b', 'a'])
df.insert(0, 'foo', df['a'])
self.assert_index_equal(df.columns, Index(['foo', 'c', 'b', 'a']))
tm.assert_series_equal(df['a'], df['foo'], check_names=False)
df.insert(2, 'bar', df['c'])
self.assert_index_equal(df.columns,
Index(['foo', 'c', 'bar', 'b', 'a']))
tm.assert_almost_equal(df['c'], df['bar'], check_names=False)
# diff dtype
# new item
df['x'] = df['a'].astype('float32')
result = Series(dict(float64=5, float32=1))
self.assertTrue((df.get_dtype_counts() == result).all())
# replacing current (in different block)
df['a'] = df['a'].astype('float32')
result = Series(dict(float64=4, float32=2))
self.assertTrue((df.get_dtype_counts() == result).all())
df['y'] = df['a'].astype('int32')
result = Series(dict(float64=4, float32=2, int32=1))
self.assertTrue((df.get_dtype_counts() == result).all())
with assertRaisesRegexp(ValueError, 'already exists'):
df.insert(1, 'a', df['b'])
self.assertRaises(ValueError, df.insert, 1, 'c', df['b'])
df.columns.name = 'some_name'
# preserve columns name field
df.insert(0, 'baz', df['c'])
self.assertEqual(df.columns.name, 'some_name')
# GH 13522
df = DataFrame(index=['A', 'B', 'C'])
df['X'] = df.index
df['X'] = ['x', 'y', 'z']
exp = DataFrame(data={'X': ['x', 'y', 'z']}, index=['A', 'B', 'C'])
assert_frame_equal(df, exp)
def test_delitem(self):
del self.frame['A']
self.assertNotIn('A', self.frame)
def test_pop(self):
self.frame.columns.name = 'baz'
self.frame.pop('A')
self.assertNotIn('A', self.frame)
self.frame['foo'] = 'bar'
self.frame.pop('foo')
self.assertNotIn('foo', self.frame)
# TODO self.assertEqual(self.frame.columns.name, 'baz')
# 10912
# inplace ops cause caching issue
a = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[
'A', 'B', 'C'], index=['X', 'Y'])
b = a.pop('B')
b += 1
# original frame
expected = DataFrame([[1, 3], [4, 6]], columns=[
'A', 'C'], index=['X', 'Y'])
assert_frame_equal(a, expected)
# result
expected = Series([2, 5], index=['X', 'Y'], name='B') + 1
assert_series_equal(b, expected)
def test_pop_non_unique_cols(self):
df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
df.columns = ["a", "b", "a"]
res = df.pop("a")
self.assertEqual(type(res), DataFrame)
self.assertEqual(len(res), 2)
self.assertEqual(len(df.columns), 1)
self.assertTrue("b" in df.columns)
self.assertFalse("a" in df.columns)
self.assertEqual(len(df.index), 2)
def test_insert_column_bug_4032(self):
# GH4032, inserting a column and renaming causing errors
df = DataFrame({'b': [1.1, 2.2]})
df = df.rename(columns={})
df.insert(0, 'a', [1, 2])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1, 1.1], [2, 2.2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
df.insert(0, 'c', [1.3, 2.3])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1.3, 1, 1.1], [2.3, 2, 2.2]],
columns=['c', 'a', 'b'])
assert_frame_equal(result, expected)
| mit |
bikong2/scikit-learn | sklearn/tests/test_random_projection.py | 79 | 14035 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
evidation-health/pymc3 | setup.py | 1 | 2670 | #!/usr/bin/env python
from setuptools import setup
import sys
DISTNAME = 'pymc3'
DESCRIPTION = "PyMC3"
LONG_DESCRIPTION = """Bayesian estimation, particularly using Markov chain Monte Carlo (MCMC), is an increasingly relevant approach to statistical estimation. However, few statistical software packages implement MCMC samplers, and they are non-trivial to code by hand. ``pymc3`` is a python package that implements the Metropolis-Hastings algorithm as a python class, and is extremely flexible and applicable to a large suite of problems. ``pymc3`` includes methods for summarizing output, plotting, goodness-of-fit and convergence diagnostics."""
MAINTAINER = 'John Salvatier'
MAINTAINER_EMAIL = '[email protected]'
AUTHOR = 'John Salvatier and Christopher Fonnesbeck'
AUTHOR_EMAIL = '[email protected]'
URL = "http://github.com/pymc-devs/pymc"
LICENSE = "Apache License, Version 2.0"
VERSION = "3.0"
classifiers = ['Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Operating System :: OS Independent']
install_reqs = ['numpy>=1.7.1', 'scipy>=0.12.0', 'matplotlib>=1.2.1',
'Theano<=0.7.1dev', 'pandas>=0.15.0']
if sys.version_info < (3, 4):
install_reqs.append('enum34')
test_reqs = ['nose']
if sys.version_info[0] == 2: # py3 has mock in stdlib
test_reqs.append('mock')
dep_links = ['https://github.com/Theano/Theano/tarball/master#egg=Theano-0.7.1dev']
if __name__ == "__main__":
setup(name=DISTNAME,
version=VERSION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
packages=['pymc3', 'pymc3.distributions',
'pymc3.step_methods', 'pymc3.tuning',
'pymc3.tests', 'pymc3.glm', 'pymc3.examples',
'pymc3.backends'],
package_data = {'pymc3.examples': ['data/*.*']},
classifiers=classifiers,
install_requires=install_reqs,
dependency_links=dep_links,
tests_require=test_reqs,
test_suite='nose.collector')
| apache-2.0 |
cgrohman/ponies | hypo1.py | 1 | 4172 | import numpy as np
import pandas as pd
from horse import Horse
from race import Race
import pdb
from sklearn.preprocessing import Imputer, StandardScaler, OneHotEncoder
from sklearn.model_selection import train_test_split
#------------------------------------------------------------------------------
def main():
dataset = pd.read_csv('results/ml_2017-04-17.csv')
#data clean up
# H Odds columns: impute and scale
horse_odds_labels = ['h0_odds','h1_odds', 'h2_odds', 'h3_odds', 'h4_odds', 'h5_odds', 'h6_odds', 'h7_odds', 'h8_odds', 'h9_odds', 'h10_odds', 'h11_odds', 'h12_odds', 'h13_odds', 'h14_odds', 'h15_odds', 'h16_odds', 'h17_odds','h18_odds']
dataset = impute_def_col(dataset, horse_odds_labels)
dataset = scale_def_col(dataset, horse_odds_labels)
# H Weights columns: impute and scale
horse_weight_labels = ['h0_weight','h1_weight', 'h2_weight', 'h3_weight', 'h4_weight', 'h5_weight', 'h6_weight', 'h7_weight', 'h8_weight', 'h9_weight', 'h10_weight', 'h11_weight', 'h12_weight', 'h13_weight', 'h14_weight', 'h15_weight', 'h16_weight', 'h17_weight','h18_weight']
dataset = impute_def_col(dataset, horse_weight_labels)
dataset = scale_def_col(dataset, horse_weight_labels)
# H Claim value columns: impute and scale
horse_claim_value_labels = ['h0_claim_value','h1_claim_value', 'h2_claim_value', 'h3_claim_value', 'h4_claim_value', 'h5_claim_value', 'h6_claim_value', 'h7_claim_value', 'h8_claim_value', 'h9_claim_value', 'h10_claim_value', 'h11_claim_value', 'h12_claim_value', 'h13_claim_value', 'h14_claim_value', 'h15_claim_value', 'h16_claim_value', 'h17_claim_value','h18_claim_value']
dataset = impute_def_col(dataset, horse_claim_value_labels)
dataset = scale_def_col(dataset, horse_claim_value_labels)
# R Purse value columns: impute and scale
dataset = impute_def_col(dataset, ['purse'])
dataset = scale_def_col(dataset, ['purse'])
# R Purse value columns: impute and scale
dataset = impute_def_col(dataset, ['distance'])
dataset = scale_def_col(dataset, ['distance'])
# R Class Rating value columns: impute and scale
dataset = impute_def_col(dataset, ['class_rating'])
dataset = scale_def_col(dataset, ['class_rating'])
# Data splitting
x = dataset.iloc[:,:-1]
y = dataset.iloc[:,-1]
from sklearn.cross_validation import train_test_split
x_train, x_test, y_train, y_test = trian_test_split(x, y, test_size=0.2)
# SVC
from sklearn.svm import SVC
svc = SVC()
svc.fit(x_train, y_train)
y_pred_svm = svc.predict(x_test)
# Validation
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_pred_svm)
# predicted 0 1 Total:
# real 0 [7590, 1240] 8830
# 1 [2715, 2187] 4902
# Predict NOT wps accuracy: 85.95%
# Predict wps accuracy: 44.61%
# Removing race_number from data
x_nrn = x.iloc[:,1:]
x_nrn_train, x_nrn_test, y_nrn_train, y_nrn_test = train_test_split(x_nrn, y, test_size=0.2)
svc_nrn.fit(x_nrn_train,y_nrn_train)
y_pred_nrn = svc_nrn.predict(x_nrn_test)
# predicted 0 1 Total:
# real 0 [7609, 1221] 8830
# 1 [2812, 2090] 4902
# Predict NOT wps accuracy: 86.17%
# Presict wps accuracy: 42.63%
#------------------------------------------------------------------------------
def impute_def_col(df, label):
for la in label:
imp = Imputer()
col = np.array(df[la]).T
try:
df[la] = imp.fit_transform(col.reshape(-1,1))
except:
pdb.set_trace()
return df
#------------------------------------------------------------------------------
def scale_def_col(df, label):
for la in label:
sc = StandardScaler()
col = np.array(df[la]).T
try:
df[la] = sc.fit_transform(col.reshape(-1,1))
except:
pdb.set_trace()
return df
#------------------------------------------------------------------------------
def ohe_col(df, label):
for la in label:
le = LabelEncoder()
ohe = OneHotEncoder()
col = np.array(df[la]).T
try:
col_le = le.fit_transform(col.reshape(-1,1))
return
#------------------------------------------------------------------------------
if __name__ == '__main__':
main() | gpl-3.0 |
vberthiaume/vblandr | src/silenceTest.py | 1 | 3381 | import subprocess as sp
import scikits.audiolab
import numpy as np
from scipy.fftpack import fft, ifft
from scipy.io import wavfile
import bisect
import matplotlib.pyplot as plt
import time
plt.rcParams['agg.path.chunksize'] = 10000
#--CONVERT MP3 TO WAV------------------------------------------
#song_path = '/home/gris/Music/vblandr/test_small/punk/07 Alkaline Trio - Only Love.mp3'
#song_path = '/mnt/c/Users/barth/Documents/vblandr/train_small/punk/01 - True North.mp3'
#song_path = '/mnt/c/Users/barth/Documents/vblandr/train_small/audiobook/Blaise_Pascal_-_Discours_sur_les_passions_de_l_amour.mp3'
song_path = '/home/gris/Music/vblandr/train_small/audiobook/Blaise_Pascal_-_Discours_sur_les_passions_de_l_amour.mp3'
command = [ 'ffmpeg',
'-i', song_path,
'-f', 's16le',
'-acodec', 'pcm_s16le',
'-ar', '44100', # sms tools wavread can only read 44100 Hz
'-ac', '1', # mono file
'-loglevel', 'quiet',
'-'] #instead of having an output file, using '-' sends it in the pipe. not actually sure how this works.
#run the command
pipe = sp.Popen(command, stdout=sp.PIPE)
#read the output into a numpy array
stdoutdata = pipe.stdout.read()
audio_array = np.fromstring(stdoutdata, dtype=np.int16)
#--------------------------------------------------------------
def removeInitialSilence(cur_song_pcm):
#start_time = time.clock()
#raw
#min = np.min(cur_song_pcm)
#max = np.max(cur_song_pcm)
#print ("raw min:", min, "max:", max)
#using absolute value
env = abs(cur_song_pcm)
#min = np.min(env)
#max = np.max(env)
#print ("abs min:", min, "max:", max)
#float
env = env.astype(np.float32) #cast the array into float32
#min = np.min(env)
#max = np.max(env)
#print ("float min:", min, "max:", max)
#norm1
max = np.max(env)
env = np.multiply(env, 1.0 / max) #convert int16 range into [-.5, .5], but really because of the abs we're already between [0,.5]
min = np.min(env)
max = np.max(env)
print ("norm1 min:", min, "max:", max)
#end_time = time.clock()
#print ("time:", end_time - start_time)
plt.plot(env)
plt.show()
#convolving as a way to do a fast moving average
N = 100
env = np.convolve(env, np.ones((N,))/N)[(N-1):]
#first 44100 samples are silent. what is their max amplitude?
#print np.max(env[:44100])
#at 1.5s, we're clearly into audio, what is the max amplitude?
#print "before .5s, max: ", np.max(env[:.5*44100])
#here we're still in noise part
#print "in vocal part, max: ", np.max(env[.625*44100])
#detect first non-silent sample
threshold = .00004
endOfSilence = bisect.bisect(env,threshold)
print "end of silence: ", endOfSilence
#these don't work on hesse
plt.plot(env)
plt.show()
return cur_song_pcm[endOfSilence:]
#---- REMOVE SILENCE --------------------
ifft_output = removeInitialSilence(audio_array)
#truncate to 1 sec
ifft_output = ifft_output[:1*44100]
#--SAVE WAVE AS NEW FILE ----------------
ifft_output = np.round(ifft_output).astype('int16')
wavfile.write('/home/gris/Music/vblandr/silenceTest.wav', 44100, ifft_output)
#wavfile.write('/mnt/c/Users/barth/Documents/vblandr/silenceTest.wav', 44100, ifft_output)
| apache-2.0 |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 34