code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
from numpy import array, repeat, abs, minimum, floor, float_
from scipy.signal import lfilter_zi, lfilter
from skdh.utility.internal import apply_downsample
from skdh.utility import moving_mean
__all__ = ["get_activity_counts"]
input_coef = array(
[
-0.009341062898525,
-0.025470289659360,
-0.004235264826105,
0.044152415456420,
0.036493718347760,
-0.011893961934740,
-0.022917390623150,
-0.006788163862310,
0.000000000000000,
],
dtype=float_,
)
output_coef = array(
[
1.00000000000000000000,
-3.63367395910957000000,
5.03689812757486000000,
-3.09612247819666000000,
0.50620507633883000000,
0.32421701566682000000,
-0.15685485875559000000,
0.01949130205890000000,
0.00000000000000000000,
],
dtype=float_,
)
def get_activity_counts(fs, time, accel, epoch_seconds=60):
"""
Compute the activity counts from acceleration.
Parameters
----------
fs : float
Sampling frequency.
time : numpy.ndarray
Shape (N,) array of epoch timestamps (in seconds) for each sample.
accel : numpy.ndarray
Nx3 array of measured acceleration values, in units of g.
epoch_seconds : int, optional
Number of seconds in an epoch (time unit for counts). Default is 60 seconds.
Returns
-------
counts : numpy.ndarray
Array of activity counts
References
----------
.. [1] A. Neishabouri et al., “Quantification of acceleration as activity counts
in ActiGraph wearable,” Sci Rep, vol. 12, no. 1, Art. no. 1, Jul. 2022,
doi: 10.1038/s41598-022-16003-x.
Notes
-----
This implementation is still slightly different than that provided in [1]_.
Foremost is that the down-sampling is different to accommodate other sensor types
that have different sampling frequencies than what might be provided by ActiGraph.
"""
# 3. down-sample to 30hz
time_ds, (acc_ds,) = apply_downsample(
30.0,
time,
data=(accel,),
aa_filter=True,
fs=fs,
)
# 4. filter the data
# NOTE: this is the actigraph implementation - they specifically use
# a filter with a phase shift (ie not filtfilt), and TF representation
# instead of ZPK or SOS
zi = lfilter_zi(input_coef, output_coef).reshape((-1, 1))
acc_bpf, _ = lfilter(
input_coef,
output_coef,
acc_ds,
zi=repeat(zi, acc_ds.shape[1], axis=-1) * acc_ds[0],
axis=0,
)
# 5. scale the data
acc_bpf *= (3 / 4096) / (2.6 / 256) * 237.5
# 6. rectify
acc_trim = abs(acc_bpf)
# 7. trim
acc_trim[acc_trim < 4] = 0
acc_trim = floor(minimum(acc_trim, 128))
# 8. "downsample" to 10hz by taking moving mean
acc_10hz = moving_mean(acc_trim, 3, 3, trim=True, axis=0)
# 9. get the counts
block_size = epoch_seconds * 10 # 1 minute
# this time is a moving sum
epoch_counts = moving_mean(acc_10hz, block_size, block_size, trim=True, axis=0)
epoch_counts *= block_size # remove the "mean" part to get back to sum
return epoch_counts | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/utility/activity_counts.py | 0.931905 | 0.41182 | activity_counts.py | pypi |
from warnings import warn
from numpy import moveaxis, ascontiguousarray, full, nan, isnan
from skdh.utility import _extensions
from skdh.utility.windowing import get_windowed_view
__all__ = [
"moving_mean",
"moving_sd",
"moving_skewness",
"moving_kurtosis",
"moving_median",
"moving_max",
"moving_min",
]
def moving_mean(a, w_len, skip, trim=True, axis=-1):
r"""
Compute the moving mean.
Parameters
----------
a : array-like
Signal to compute moving mean for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving mean along. Default is -1.
Returns
-------
mmean : numpy.ndarray
Moving mean. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous.
Notes
-----
On the moving axis if `trim=True`, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Most efficient computations are for `skip` values that are either factors of
`wlen`, or greater or equal to `wlen`.
Warnings
--------
Catastropic cancellation is a concern when `skip` is less than `wlen` due to
the cumulative sum-type algorithm being used, when input values are very very
large, or very very small. With typical IMU data values this should not be an
issue, even for very long data series (multiple days worth of data)
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)
>>> moving_mean(x, 3, 3)
array([1., 4., 7.])
Compute with overlapping windows:
>>> moving_mean(x, 3, 1)
array([1., 2., 3., 4., 5., 6., 7., 8.])
Compute without trimming the result
>>> moving_mean(x, 3, 1, trim=False)
array([1., 2., 3., 4., 5., 6., 7., 8., nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output
should be equal to :math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_mean(y, window_length, window_skip, axis=1)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_mean(z, 3, 3, axis=0).flags['C_CONTIGUOUS']
False
>>> moving_mean(z, 3, 3, axis=1).flags['C_CONTIGUOUS']
False
>>> moving_mean(z, 3, 3, axis=2).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError("Window length is larger than the computation axis.")
rmean = _extensions.moving_mean(x, w_len, skip, trim)
# move computation axis back to original place and return
return moveaxis(rmean, -1, axis)
def moving_sd(a, w_len, skip, trim=True, axis=-1, return_previous=True):
r"""
Compute the moving sample standard deviation.
Parameters
----------
a : array-like
Signal to compute moving sample standard deviation for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving mean along. Default is -1.
return_previous : bool, optional
Return previous moments. These are computed either way, and are therefore optional returns.
Default is True.
Returns
-------
msd : numpy.ndarray
Moving sample standard deviation. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous.
mmean : numpy.ndarray, optional.
Moving mean. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous. Only returned if `return_previous=True`.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Most efficient computations are for `skip` values that are either factors of `wlen`, or greater
or equal to `wlen`.
Warnings
--------
Catastropic cancellation is a concern when `skip` is less than `wlen` due to the cumulative
sum-type algorithms being used, when input values are very very large, or very very small. With
typical IMU data values this should not be an issue, even for very long data series (multiple
days worth of data).
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)**2
>>> moving_sd(x, 3, 3, return_previous=True)
(array([ 2.081666 , 8.02080628, 14.0118997 ]),
array([ 1.66666667, 16.66666667, 49.66666667]))
Compute with overlapping windows:
>>> moving_mean(x, 3, 1, return_previous=False)
array([ 2.081666 , 4.04145188, 6.02771377, 8.02080628, 10.0166528 ,
12.01388086, 14.0118997 , 16.01041328])
Compute without trimming:
>>> moving_mean(x, 3, 1, trim=False, return_previous=False)
array([ 2.081666 , 4.04145188, 6.02771377, 8.02080628, 10.0166528 ,
12.01388086, 14.0118997 , 16.01041328, nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_sd(y, window_length, window_skip, axis=1, return_previous=False)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_sd(z, 3, 3, axis=0, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_sd(z, 3, 3, axis=1, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_sd(z, 3, 3, axis=2, return_previous=False).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError(
"Cannot have a window length larger than the computation axis."
)
res = _extensions.moving_sd(x, w_len, skip, trim, return_previous)
# move computation axis back to original place and return
if return_previous:
return moveaxis(res[0], -1, axis), moveaxis(res[1], -1, axis)
else:
return moveaxis(res, -1, axis)
def moving_skewness(a, w_len, skip, trim=True, axis=-1, return_previous=True):
r"""
Compute the moving sample skewness.
Parameters
----------
a : array-like
Signal to compute moving skewness for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving mean along. Default is -1.
return_previous : bool, optional
Return previous moments. These are computed either way, and are therefore optional returns.
Default is True.
Returns
-------
mskew : numpy.ndarray
Moving skewness. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous.
msd : numpy.ndarray, optional
Moving sample standard deviation. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous. Only returned if `return_previous=True`.
mmean : numpy.ndarray, optional.
Moving mean. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous. Only returned if `return_previous=True`.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Warnings
--------
While this implementation is quite fast, it is also quite mememory inefficient. 3 arrays
of equal length to the computation axis are created during computation, which can easily
exceed system memory if already using a significant amount of memory.
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)**2
>>> moving_skewness(x, 3, 3, return_previous=True)
(array([0.52800497, 0.15164108, 0.08720961]),
array([ 2.081666 , 8.02080628, 14.0118997 ]),
array([ 1.66666667, 16.66666667, 49.66666667]))
Compute with overlapping windows:
>>> moving_skewness(x, 3, 1, return_previous=False)
array([0.52800497, 0.29479961, 0.20070018, 0.15164108, 0.12172925,
0.10163023, 0.08720961, 0.07636413])
Compute without trimming:
>>> moving_skewness(x, 3, 1, trim=False, return_previous=False)
array([0.52800497, 0.29479961, 0.20070018, 0.15164108, 0.12172925,
0.10163023, 0.08720961, 0.07636413, nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_skewness(y, window_length, window_skip, axis=1, return_previous=False)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_skewness(z, 3, 3, axis=0, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_skewness(z, 3, 3, axis=1, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_skewness(z, 3, 3, axis=2, return_previous=False).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError(
"Cannot have a window length larger than the computation axis."
)
res = _extensions.moving_skewness(x, w_len, skip, trim, return_previous)
if isnan(res).any():
warn("NaN values present in output, possibly due to catastrophic cancellation.")
# move computation axis back to original place and return
if return_previous:
return tuple(moveaxis(i, -1, axis) for i in res)
else:
return moveaxis(res, -1, axis)
def moving_kurtosis(a, w_len, skip, trim=True, axis=-1, return_previous=True):
r"""
Compute the moving sample kurtosis.
Parameters
----------
a : array-like
Signal to compute moving kurtosis for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving mean along. Default is -1.
return_previous : bool, optional
Return previous moments. These are computed either way, and are therefore optional returns.
Default is True.
Returns
-------
mkurt : numpy.ndarray
Moving kurtosis. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous.
mskew : numpy.ndarray, optional
Moving skewness. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous. Only returned if `return_previous=True`.
msd : numpy.ndarray, optional
Moving sample standard deviation. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous. Only returned if `return_previous=True`.
mmean : numpy.ndarray, optional.
Moving mean. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous. Only returned if `return_previous=True`.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Warnings
--------
While this implementation is quite fast, it is also quite mememory inefficient. 4 arrays
of equal length to the computation axis are created during computation, which can easily
exceed system memory if already using a significant amount of memory.
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)**2
>>> moving_kurtosis(x, 3, 3, return_previous=True)
(array([-1.5, -1.5, -1.5]), # kurtosis
array([0.52800497, 0.15164108, 0.08720961]), # skewness
array([ 2.081666 , 8.02080628, 14.0118997 ]), # standard deviation
array([ 1.66666667, 16.66666667, 49.66666667])) # mean
Compute with overlapping windows:
>>> moving_kurtosis(np.random.random(100), 50, 20, return_previous=False)
array([-1.10155074, -1.20785479, -1.24363625]) # random
Compute without trimming:
>>> moving_kurtosis(np.random.random(100), 50, 20, return_previous=False)
array([-1.10155074, -1.20785479, -1.24363625, nan, nan]) # random
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_skewness(y, window_length, window_skip, axis=1, return_previous=False)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_kurtosis(z, 3, 3, axis=0, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_kurtosis(z, 3, 3, axis=1, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_kurtosis(z, 3, 3, axis=2, return_previous=False).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError(
"Cannot have a window length larger than the computation axis."
)
res = _extensions.moving_kurtosis(x, w_len, skip, trim, return_previous)
if isnan(res).any():
warn("NaN values present in output, possibly due to catastrophic cancellation.")
# move computation axis back to original place and return
if return_previous:
return tuple(moveaxis(i, -1, axis) for i in res)
else:
return moveaxis(res, -1, axis)
def moving_median(a, w_len, skip=1, trim=True, axis=-1):
r"""
Compute the moving mean.
Parameters
----------
a : array-like
Signal to compute moving mean for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples. Default is 1.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving mean along. Default is -1.
Returns
-------
mmed : numpy.ndarray
Moving median. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)
>>> moving_median(x, 3, 3)
array([1., 4., 7.])
Compute with overlapping windows:
>>> moving_median(x, 3, 1)
array([1., 2., 3., 4., 5., 6., 7., 8.])
Compute without trimming:
>>> moving_median(x, 3, 1)
array([1., 2., 3., 4., 5., 6., 7., 8., nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_median(y, window_length, window_skip, axis=1)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_median(z, 3, 3, axis=0).flags['C_CONTIGUOUS']
False
>>> moving_median(z, 3, 3, axis=1).flags['C_CONTIGUOUS']
False
>>> moving_median(z, 3, 3, axis=2).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError(
"Cannot have a window length larger than the computation axis."
)
rmed = _extensions.moving_median(x, w_len, skip, trim)
# move computation axis back to original place and return
return moveaxis(rmed, -1, axis)
def moving_max(a, w_len, skip, trim=True, axis=-1):
r"""
Compute the moving maximum value.
Parameters
----------
a : array-like
Signal to compute moving max for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving max along. Default is -1.
Returns
-------
mmax : numpy.ndarray
Moving max. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)
>>> moving_max(x, 3, 3)
array([2., 5., 8.])
Compute with overlapping windows:
>>> moving_max(x, 3, 1)
array([2., 3., 4., 5., 6., 7., 8.])
Compute without triming:
>>> moving_max(x, 3, 1)
array([2., 3., 4., 5., 6., 7., 8., nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_max(y, window_length, window_skip, axis=1)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_max(z, 3, 3, axis=0).flags['C_CONTIGUOUS']
False
>>> moving_max(z, 3, 3, axis=1).flags['C_CONTIGUOUS']
False
>>> moving_max(z, 3, 3, axis=2).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# Numpy uses SIMD instructions for max/min, so it will likely be faster
# unless there is a lot of overlap
cond1 = a.ndim == 1 and (skip / w_len) < 0.005
cond2 = a.ndim > 1 and (skip / w_len) < 0.3 # due to c-contiguity?
cond3 = a.ndim > 2 # windowing doesnt handle more than 2 dimensions currently
if any([cond1, cond2, cond3]):
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError("Window length is larger than the computation axis.")
rmax = _extensions.moving_max(x, w_len, skip, trim)
# move computation axis back to original place and return
return moveaxis(rmax, -1, axis)
else:
x = ascontiguousarray(
moveaxis(a, axis, 0)
) # need to move axis to the front for windowing
xw = get_windowed_view(x, w_len, skip)
if trim:
res = xw.max(axis=1) # computation axis is still the second axis
else:
nfill = (x.shape[0] - w_len) // skip + 1
rshape = list(x.shape)
rshape[0] = (x.shape[0] - 1) // skip + 1
res = full(rshape, nan)
res[:nfill] = xw.max(axis=1)
return moveaxis(res, 0, axis)
def moving_min(a, w_len, skip, trim=True, axis=-1):
r"""
Compute the moving maximum value.
Parameters
----------
a : array-like
Signal to compute moving max for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving max along. Default is -1.
Returns
-------
mmax : numpy.ndarray
Moving max. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)
>>> moving_min(x, 3, 3)
array([1., 4., 7.])
Compute with overlapping windows:
>>> moving_min(x, 3, 1)
array([1., 2., 3., 4., 5., 6., 7.])
Compute without trimming:
>>> moving_min(x, 3, 1)
array([1., 2., 3., 4., 5., 6., 7., nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_min(y, window_length, window_skip, axis=1)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_min(z, 3, 3, axis=0).flags['C_CONTIGUOUS']
False
>>> moving_min(z, 3, 3, axis=1).flags['C_CONTIGUOUS']
False
>>> moving_min(z, 3, 3, axis=2).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# Numpy uses SIMD instructions for max/min, so it will likely be faster
# unless there is a lot of overlap
cond1 = a.ndim == 1 and (skip / w_len) < 0.005
cond2 = a.ndim > 1 and (skip / w_len) < 0.3 # due to c-contiguity?
cond3 = a.ndim > 2 # windowing doesnt handle more than 2 dimensions currently
if any([cond1, cond2, cond3]):
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError("Window length is larger than the computation axis.")
rmin = _extensions.moving_min(x, w_len, skip, trim)
# move computation axis back to original place and return
return moveaxis(rmin, -1, axis)
else:
x = ascontiguousarray(
moveaxis(a, axis, 0)
) # need to move axis to the front for windowing
xw = get_windowed_view(x, w_len, skip)
if trim:
res = xw.min(axis=1) # computation axis is still the second axis
else:
nfill = (x.shape[0] - w_len) // skip + 1
rshape = list(x.shape)
rshape[0] = (x.shape[0] - 1) // skip + 1
res = full(rshape, nan)
res[:nfill] = xw.min(axis=1)
return moveaxis(res, 0, axis) | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/utility/math.py | 0.946818 | 0.657683 | math.py | pypi |
from numpy import require
from numpy.lib.stride_tricks import as_strided
__all__ = ["compute_window_samples", "get_windowed_view"]
class DimensionError(Exception):
"""
Custom error for if the input signal has too many dimensions
"""
pass
class ContiguityError(Exception):
"""
Custom error for if the input signal is not C-contiguous
"""
pass
def compute_window_samples(fs, window_length, window_step):
"""
Compute the number of samples for a window. Takes the sampling frequency, window length, and
window step in common representations and converts them into number of samples.
Parameters
----------
fs : float
Sampling frequency in Hz.
window_length : float
Window length in seconds. If not provided (None), will do no windowing. Default is None
window_step : {float, int}
Window step - the spacing between the start of windows. This can be specified several
different ways (see Notes). Default is 1.0
Returns
-------
length_n : int
Window length in samples
step_n : int
Window step in samples
Raises
------
ValueError
If `window_step` is negative, or if `window_step` is a float not in (0.0, 1.0]
Notes
-----
Computation of the window step depends on the type of input provided, and the range.
- `window_step` is a float in (0.0, 1.0]: specifies the fraction of a window to skip to get to
the start of the next window
- `window_step` is an integer > 1: specifies the number of samples to skip to get to the start
of the next window
Examples
--------
Compute the window length and step in samples for a 3s window with 50% overlap, with a
sampling rate of 50Hz
>>> compute_window_samples(50.0, 3.0, 0.5)
(150, 75)
Compute the window length for a 4.5s window with a step of 1 sample, and a sampling
rate of 100Hz
>>> compute_window_samples(100.0, 4.5, 1)
(450, 1)
"""
if window_step is None or window_length is None:
return None, None
length_n = int(round(fs * window_length))
if isinstance(window_step, int):
if window_step > 0:
step_n = window_step
else:
raise ValueError("window_step cannot be negative")
elif isinstance(window_step, float):
if 0.0 < window_step < 1.0:
step_n = int(round(length_n * window_step))
step_n = max(min(step_n, length_n), 1)
elif window_step == 1.0:
step_n = length_n
else:
raise ValueError("float values for window_step must be in (0.0, 1.0]")
return length_n, step_n
def get_windowed_view(x, window_length, step_size, ensure_c_contiguity=False):
"""
Return a moving window view over the data
Parameters
----------
x : numpy.ndarray
1- or 2-D array of signals to window. Windows occur along the 0 axis.
Must be C-contiguous.
window_length : int
Window length/size.
step_size : int
Step/stride size for windows - how many samples to step from window
center to window center.
ensure_c_contiguity : bool, optional
Create a new array with C-contiguity if the passed array is not C-contiguous.
This *may* result in the memory requirements significantly increasing. Default is False,
which will raise a ValueError if `x` is not C-contiguous
Returns
-------
x_win : numpy.ndarray
2- or 3-D array of windows of the original data, of shape (..., L[, ...])
"""
if not (x.ndim in [1, 2]):
raise DimensionError("Array cannot have more than 2 dimensions.")
if ensure_c_contiguity:
x = require(x, requirements=["C"])
else:
if not x.flags["C_CONTIGUOUS"]:
raise ContiguityError(
"Input array must be C-contiguous. See numpy.ascontiguousarray"
)
if x.ndim == 1:
nrows = ((x.size - window_length) // step_size) + 1
n = x.strides[0]
return as_strided(
x, shape=(nrows, window_length), strides=(step_size * n, n), writeable=False
)
else:
k = x.shape[1]
nrows = ((x.shape[0] - window_length) // step_size) + 1
n = x.strides[1]
new_shape = (nrows, window_length, k)
new_strides = (step_size * k * n, k * n, n)
return as_strided(x, shape=new_shape, strides=new_strides, writeable=False) | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/utility/windowing.py | 0.951402 | 0.590602 | windowing.py | pypi |
from numpy import (
mean,
asarray,
cumsum,
minimum,
sort,
argsort,
unique,
insert,
sum,
log,
nan,
float_,
)
from skdh.utility.internal import rle
__all__ = [
"average_duration",
"state_transition_probability",
"gini_index",
"average_hazard",
"state_power_law_distribution",
]
def gini(x, w=None, corr=True):
"""
Compute the GINI Index.
Parameters
----------
x : numpy.ndarray
Array of bout lengths
w : {None, numpy.ndarray}, optional
Weights for x. Must be the same size. If None, weights are not used.
corr : bool, optional
Apply finite sample correction. Default is True.
Returns
-------
g : float
Gini index
References
----------
.. [1] https://stackoverflow.com/questions/48999542/more-efficient-weighted-gini-coefficient-in
-python/48999797#48999797
"""
if x.size == 0:
return 0.0
elif x.size == 1:
return 1.0
# The rest of the code requires numpy arrays.
if w is not None:
sorted_indices = argsort(x)
sorted_x = x[sorted_indices]
sorted_w = w[sorted_indices]
# Force float dtype to avoid overflows
cumw = cumsum(sorted_w, dtype=float_)
cumxw = cumsum(sorted_x * sorted_w, dtype=float_)
g = sum(cumxw[1:] * cumw[:-1] - cumxw[:-1] * cumw[1:]) / (cumxw[-1] * cumw[-1])
if corr:
return g * x.size / (x.size - 1)
else:
return g
else:
sorted_x = sort(x)
n = x.size
cumx = cumsum(sorted_x, dtype=float_)
# The above formula, with all weights equal to 1 simplifies to:
g = (n + 1 - 2 * sum(cumx) / cumx[-1]) / n
if corr:
return minimum(g * n / (n - 1), 1)
else:
return g
def average_duration(a=None, *, lengths=None, values=None, voi=1):
"""
Compute the average duration in the desired state.
Parameters
----------
a : array-like, optional
1D array of binary values. If not provided, all of `lengths`, `starts`,
and `values` must be provided.
lengths : {numpy.ndarray, list}, optional
Lengths of runs of the binary values. If not provided, `a` must be. Must
be the same size as `values`.
values : {numpy.ndarray, list}, optional
Values of the runs. If not provided, all `lengths` will be assumed to be
for the `voi`.
voi : {int, bool}, optional
Value of interest, value for which to calculate the average run length.
Default is `1` (`True`).
Returns
-------
avg_dur : float
average duration, in samples, of the runs with value `voi`.
Examples
--------
>>> x = [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1]
>>> average_duration(x, voi=1)
2.0
>>> average_duration(x, voi=0)
5.333333333
>>> lengths = [4, 2, 9, 1, 3, 3]
>>> values = [0, 1, 0, 1, 0, 1]
>>> average_duration(lengths=lengths, values=values, voi=1)
2.0
>>> average_duration(lengths=lengths)
2.0
"""
if a is not None:
l, _, v = rle(a)
lens = l[v == voi]
else:
if lengths is None:
raise ValueError("One of `a` or `lengths` must be provided.")
lens = asarray(lengths)
if values is not None:
lens = lens[values == voi]
if lens.size == 0:
return 0.0
return mean(lens)
def state_transition_probability(a=None, *, lengths=None, values=None, voi=1):
r"""
Compute the probability of transitioning from the desired state to the
second state.
Parameters
----------
a : array-like, optional
1D array of binary values. If not provided, all of `lengths`, `starts`,
and `values` must be provided.
lengths : {numpy.ndarray, list}, optional, optional
Lengths of runs of the binary values. If not provided, `a` must be. Must
be the same size as `values`.
values : {numpy.ndarray, list}, optional, optional
Values of the runs. If not provided, all `lengths` will be assumed to be
for the `voi`.
voi : {int, bool}, optional
Value of interest, value for which to calculate the average run length.
Default is `1` (`True`).
Returns
-------
avg_dur : float
average duration, in samples, of the runs with value `voi`.
Examples
--------
>>> x = [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1]
>>> state_transition_probability(x, voi=1)
0.5
>>> state_transition_probability(x, voi=0)
0.1875
>>> lengths = [4, 2, 9, 1, 3, 3]
>>> values = [0, 1, 0, 1, 0, 1]
>>> state_transition_probability(lengths=lengths, values=values, voi=1)
0.5
>>> state_transition_probability(lengths=lengths)
0.5
References
----------
.. [1] J. Di et al., “Patterns of sedentary and active time accumulation are associated with
mortality in US adults: The NHANES study,” bioRxiv, p. 182337, Aug. 2017,
doi: 10.1101/182337.
Notes
-----
Higher values indicate more frequent switching between states, and as a result may indicate
greater fragmentation of sleep.
The implementation is straightforward [1]_, and is simply defined as
.. math:: satp = \frac{1}{\mu_{awake}}
where :math:`\mu_{awake}` is the mean awake bout time.
"""
if a is not None:
l, _, v = rle(a)
lens = l[v == voi]
else:
if lengths is None:
raise ValueError("One of `a` or `lengths` must be provided.")
lens = asarray(lengths)
if values is not None:
lens = lens[values == voi]
if lens.size == 0:
return nan
return 1 / mean(lens)
def gini_index(a=None, *, lengths=None, values=None, voi=1):
"""
Compute the normalized variability of the state bouts, also known as the GINI
index from economics.
Parameters
----------
a : array-like, optional
1D array of binary values. If not provided, all of `lengths`, `starts`,
and `values` must be provided.
lengths : {numpy.ndarray, list}, optional, optional
Lengths of runs of the binary values. If not provided, `a` must be. Must
be the same size as `values`.
values : {numpy.ndarray, list}, optional, optional
Values of the runs. If not provided, all `lengths` will be assumed to be
for the `voi`.
voi : {int, bool}, optional
Value of interest, value for which to calculate the average run length.
Default is `1` (`True`).
Returns
-------
avg_dur : float
average duration, in samples, of the runs with value `voi`.
Examples
--------
>>> x = [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1]
>>> gini_index(x, voi=1)
0.333333
>>> gini_index(x, voi=0)
0.375
>>> lengths = [4, 2, 9, 1, 3, 3]
>>> values = [0, 1, 0, 1, 0, 1]
>>> gini_index(lengths=lengths, values=values, voi=1)
0.333333
>>> gini_index(lengths=lengths)
0.333333
References
----------
.. [1] J. Di et al., “Patterns of sedentary and active time accumulation are associated with
mortality in US adults: The NHANES study,” bioRxiv, p. 182337, Aug. 2017,
doi: 10.1101/182337.
Notes
-----
Gini Index values are bounded between 0 and 1, with values near 1 indicating the total
time accumulating due to a small number of longer bouts, whereas values near 0 indicate all
bouts contribute more equally to the total time.
"""
if a is not None:
l, _, v = rle(a)
lens = l[v == voi]
else:
if lengths is None:
raise ValueError("One of `a` or `lengths` must be provided.")
lens = asarray(lengths)
if values is not None:
lens = lens[values == voi]
if lens.size == 0:
return 0.0
return gini(lens, w=None, corr=True)
def average_hazard(a=None, *, lengths=None, values=None, voi=1):
r"""
Compute the average hazard summary of the hazard function, as a function of the
state bout duration. The average hazard represents a summary of the frequency
of transitioning from one state to the other.
Parameters
----------
a : array-like, optional
1D array of binary values. If not provided, all of `lengths`, `starts`,
and `values` must be provided.
lengths : {numpy.ndarray, list}, optional, optional
Lengths of runs of the binary values. If not provided, `a` must be. Must
be the same size as `values`.
values : {numpy.ndarray, list}, optional, optional
Values of the runs. If not provided, all `lengths` will be assumed to be
for the `voi`.
voi : {int, bool}, optional
Value of interest, value for which to calculate the average run length.
Default is `1` (`True`).
Returns
-------
avg_dur : float
average duration, in samples, of the runs with value `voi`.
Examples
--------
>>> x = [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1]
>>> average_hazard(x, voi=1)
0.61111111
>>> average_hazard(x, voi=0)
0.61111111
>>> lengths = [4, 2, 9, 1, 3, 3]
>>> values = [0, 1, 0, 1, 0, 1]
>>> average_hazard(lengths=lengths, values=values, voi=1)
0.61111111
>>> average_hazard(lengths=lengths)
0.61111111
References
----------
.. [1] J. Di et al., “Patterns of sedentary and active time accumulation are
associated with mortality in US adults: The NHANES study,” bioRxiv,
p. 182337, Aug. 2017, doi: 10.1101/182337.
Notes
-----
Higher values indicate higher frequency in switching from sleep to awake states.
The average hazard is computed per [1]_:
.. math::
h(t_n_i) = \frac{n\left(t_n_i\right)}{n - n^c\left(t_n_{i-1}\right)}
\har{h} = \frac{1}{m}\sum_{t\in D}h(t)
where :math:`h(t_n_i)` is the hazard for the sleep bout of length :math:`t_n_i`,
:math:`n(t_n_i)` is the number of bouts of length :math:`t_n_i`, :math:`n` is
the total number of sleep bouts, :math:`n^c(t_n_i)` is the sum number of bouts
less than or equal to length :math:`t_n_i`, and :math:`t\in D` indicates all
bouts up to the maximum length (:math:`D`).
"""
if a is not None:
l, _, v = rle(a)
lens = l[v == voi]
else:
if lengths is None:
raise ValueError("One of `a` or `lengths` must be provided.")
lens = asarray(lengths)
if values is not None:
lens = lens[values == voi]
if lens.size == 0:
return nan
unq, cnts = unique(lens, return_counts=True)
sidx = argsort(unq)
cnts = cnts[sidx]
cumsum_cnts = insert(cumsum(cnts), 0, 0)
h = cnts / (cumsum_cnts[-1] - cumsum_cnts[:-1])
return sum(h) / unq.size
def state_power_law_distribution(a=None, *, lengths=None, values=None, voi=1):
r"""
Compute the scaling factor for the power law distribution over the desired
state bout lengths.
Parameters
----------
a : array-like, optional
1D array of binary values. If not provided, all of `lengths`, `starts`,
and `values` must be provided.
lengths : {numpy.ndarray, list}, optional, optional
Lengths of runs of the binary values. If not provided, `a` must be. Must
be the same size as `values`.
values : {numpy.ndarray, list}, optional, optional
Values of the runs. If not provided, all `lengths` will be assumed to be
for the `voi`.
voi : {int, bool}, optional
Value of interest, value for which to calculate the average run length.
Default is `1` (`True`).
Returns
-------
avg_dur : float
average duration, in samples, of the runs with value `voi`.
Examples
--------
>>> x = [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1]
>>> state_power_law_distribution(x, voi=1)
1.7749533004219864
>>> state_power_law_distribution(x, voi=0)
2.5517837760569524
>>> lengths = [4, 2, 9, 1, 3, 3]
>>> values = [0, 1, 0, 1, 0, 1]
>>> state_power_law_distribution(lengths=lengths, values=values, voi=1)
1.7749533004219864
>>> state_power_law_distribution(lengths=lengths)
1.7749533004219864
References
----------
.. [1] J. Di et al., “Patterns of sedentary and active time accumulation are
associated with mortality in US adults: The NHANES study,” bioRxiv,
p. 182337, Aug. 2017, doi: 10.1101/182337.
Notes
-----
Larger `alpha` values indicate that the total sleeping time is accumulated with
a larger portion of shorter sleep bouts.
The power law scaling factor is computer per [1]_:
.. math:: 1 + \frac{n_{sleep}}{\sum_{i}\log{t_i / \left(min(t) - 0.5\right)}}
where :math:`n_{sleep}` is the number of sleep bouts, :math:`t_i` is the duration
of the :math:`ith` sleep bout, and :math:`min(t)` is the length of the shortest
sleep bout.
"""
if a is not None:
l, _, v = rle(a)
lens = l[v == voi]
else:
if lengths is None:
raise ValueError("One of `a` or `lengths` must be provided.")
lens = asarray(lengths)
if values is not None:
lens = lens[values == voi]
if lens.size == 0:
return 1.0
return 1 + lens.size / sum(log(lens / (lens.min() - 0.5))) | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/utility/fragmentation_endpoints.py | 0.929432 | 0.592991 | fragmentation_endpoints.py | pypi |
from warnings import warn
from numpy import argmax, abs, mean, cos, arcsin, sign, zeros_like
__all__ = ["correct_accelerometer_orientation"]
def correct_accelerometer_orientation(accel, v_axis=None, ap_axis=None):
r"""
Applies the correction for acceleration from [1]_ to better align acceleration with the human
body anatomical axes. This correction requires that the original device measuring accleration
is somewhat closely aligned with the anatomical axes already, due to required assumptions.
Quality of the correction will degrade the farther from aligned the input acceleration is.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values, in units of "g".
v_axis : {None, int}, optional
Vertical axis for `accel`. If not provided (default of None), this will be guessed as the
axis with the largest mean value.
ap_axis : {None, int}, optional
Anterior-posterior axis for `accel`. If not provided (default of None), the ML and AP axes
will not be picked. This will have a slight effect on the correction.
Returns
-------
co_accel : numpy.ndarray
(N, 3) array of acceleration with best alignment to the human anatomical axes
Notes
-----
If `v_axis` is not provided (`None`), it is guessed as the largest mean valued axis (absolute
value). While this should work for most cases, it will fail if there is significant
acceleration in the non-vertical axes. As such, if there are very large accelerations present,
this value should be provided.
If `ap_axis` is not provided, it is guessed as the axis with the most similar autocovariance
to the vertical axes.
The correction algorithm from [1]_ starts by using simple trigonometric identities to correct
the measured acceleration per
.. math::
a_A = a_a\cos{\theta_a} - sign(a_v)a_v\sin{\theta_a}
a_V' = sign(a_v)a_a\sin{\theta_a} + a_v\cos{\theta_a}
a_M = a_m\cos{\theta_m} - sign(a_v)a_V'\sin{\theta_m}
a_V = sign(a_v)a_m\sin{\theta_m} + a_V'\cos{\theta_m}
where $a_i$ is the measured $i$ direction acceleration, $a_I$ is the corrected $I$ direction
acceleration ($i/I=[a/A, m/M, v/V]$, $a$ is anterior-posterior, $m$ is medial-lateral, and
$v$ is vertical), $a_V'$ is a provisional estimate of the corrected vertical acceleration.
$\theta_{a/m}$ are the angles between the measured AP and ML axes and the horizontal plane.
Through some manipulation, [1]_ arrives at the simplification that best estimates for these
angles per
.. math::
\sin{\theta_a} = \bar{a}_a
\sin{\theta_m} = \bar{a}_m
This is the part of the step that requires acceleration to be in "g", as well as mostly
already aligned. If significantly out of alignment, then this small-angle relationship
with sine starts to fall apart, and the correction will not be as appropriate.
References
----------
.. [1] R. Moe-Nilssen, “A new method for evaluating motor control in gait under real-life
environmental conditions. Part 1: The instrument,” Clinical Biomechanics, vol. 13, no.
4–5, pp. 320–327, Jun. 1998, doi: 10.1016/S0268-0033(98)00089-8.
"""
if v_axis is None:
v_axis = argmax(abs(mean(accel, axis=0)))
else:
if not (0 <= v_axis < 3):
raise ValueError("v_axis must be in {0, 1, 2}")
if ap_axis is None:
ap_axis, ml_axis = [i for i in range(3) if i != v_axis]
else:
if not (0 <= ap_axis < 3):
raise ValueError("ap_axis must be in {0, 1, 2}")
ml_axis = [i for i in range(3) if i not in [v_axis, ap_axis]][0]
s_theta_a = mean(accel[:, ap_axis])
s_theta_m = mean(accel[:, ml_axis])
# make sure the theta values are in range
if s_theta_a < -1 or s_theta_a > 1 or s_theta_m < -1 or s_theta_m > 1:
warn("Accel. correction angles outside possible range [-1, 1]. Not correcting.")
return accel
c_theta_a = cos(arcsin(s_theta_a))
c_theta_m = cos(arcsin(s_theta_m))
v_sign = sign(mean(accel[:, v_axis]))
co_accel = zeros_like(accel)
# correct ap axis acceleration
co_accel[:, ap_axis] = (
accel[:, ap_axis] * c_theta_a - v_sign * accel[:, v_axis] * s_theta_a
)
# provisional correction for vertical axis
co_accel[:, v_axis] = (
v_sign * accel[:, ap_axis] * s_theta_a + accel[:, v_axis] * c_theta_a
)
# correct ml axis acceleration
co_accel[:, ml_axis] = (
accel[:, ml_axis] * c_theta_m - v_sign * co_accel[:, v_axis] * s_theta_m
)
# final correction for vertical axis
co_accel[:, v_axis] = (
v_sign * accel[:, ml_axis] * s_theta_m + co_accel[:, v_axis] * c_theta_m
)
return co_accel | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/utility/orientation.py | 0.968456 | 0.790692 | orientation.py | pypi |
from skdh.activity import metrics
def get_available_cutpoints(name=None):
"""
Print the available cutpoints for activity level segmentation, or the
thresholds for a specific set of cutpoints.
Parameters
----------
name : {None, str}, optional
The name of the cupoint values to print. If None, will print all
the available cutpoint options.
"""
if name is None:
for k in _base_cutpoints:
print(k)
else:
cuts = _base_cutpoints[name]
print(f"{name}\n{'-' * 15}")
print(f"Metric: {cuts['metric']}")
for level in ["sedentary", "light", "moderate", "vigorous"]:
lthresh, uthresh = get_level_thresholds(level, cuts)
print(f"{level} range [g]: {lthresh:0.3f} -> {uthresh:0.3f}")
def get_level_thresholds(level, cutpoints):
if level.lower() in ["sed", "sedentary"]:
return -1e5, cutpoints["sedentary"]
elif level.lower() == "light":
return cutpoints["sedentary"], cutpoints["light"]
elif level.lower() in ["mod", "moderate"]:
return cutpoints["light"], cutpoints["moderate"]
elif level.lower() in ["vig", "vigorous"]:
return cutpoints["moderate"], 1e5
elif level.lower() == "mvpa":
return cutpoints["light"], 1e5
elif level.lower() == "slpa": # sedentary-light phys. act.
return -1e5, cutpoints["light"]
else:
raise ValueError(f"Activity level label [{level}] not recognized.")
def get_metric(name):
return getattr(metrics, name)
# ==========================================================
# Activity cutpoints
_base_cutpoints = {}
_base_cutpoints["esliger_lwrist_adult"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 217 / 80 / 60, # paper at 80hz, summed for each minute long window
"light": 644 / 80 / 60,
"moderate": 1810 / 80 / 60,
}
_base_cutpoints["esliger_rwirst_adult"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 386 / 80 / 60, # paper at 80hz, summed for each 1min window
"light": 439 / 80 / 60,
"moderate": 2098 / 80 / 60,
}
_base_cutpoints["esliger_lumbar_adult"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 77 / 80 / 60, # paper at 80hz, summed for each 1min window
"light": 219 / 80 / 60,
"moderate": 2056 / 80 / 60,
}
_base_cutpoints["schaefer_ndomwrist_child6-11"] = {
"metric": "metric_bfen",
"kwargs": {"low_cutoff": 0.2, "high_cutoff": 15, "trim_zero": False},
"sedentary": 0.190,
"light": 0.314,
"moderate": 0.998,
}
_base_cutpoints["phillips_rwrist_child8-14"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 6 / 80, # paper at 80hz, summed for each 1s window
"light": 21 / 80,
"moderate": 56 / 80,
}
_base_cutpoints["phillips_lwrist_child8-14"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 7 / 80,
"light": 19 / 80,
"moderate": 60 / 80,
}
_base_cutpoints["phillips_hip_child8-14"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 3 / 80,
"light": 16 / 80,
"moderate": 51 / 80,
}
_base_cutpoints["vaha-ypya_hip_adult"] = {
"metric": "metric_mad",
"kwargs": {},
"light": 0.091, # originally presented in mg
"moderate": 0.414,
}
_base_cutpoints["hildebrand_hip_adult_actigraph"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0474,
"light": 0.0691,
"moderate": 0.2587,
}
_base_cutpoints["hildebrand_hip_adult_geneactv"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0469,
"light": 0.0687,
"moderate": 0.2668,
}
_base_cutpoints["hildebrand_wrist_adult_actigraph"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0448,
"light": 0.1006,
"moderate": 0.4288,
}
_base_cutpoints["hildebrand_wrist_adult_geneactiv"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0458,
"light": 0.0932,
"moderate": 0.4183,
}
_base_cutpoints["hildebrand_hip_child7-11_actigraph"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0633,
"light": 0.1426,
"moderate": 0.4646,
}
_base_cutpoints["hildebrand_hip_child7-11_geneactiv"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0641,
"light": 0.1528,
"moderate": 0.5143,
}
_base_cutpoints["hildebrand_wrist_child7-11_actigraph"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0356,
"light": 0.2014,
"moderate": 0.707,
}
_base_cutpoints["hildebrand_wrist_child7-11_geneactiv"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0563,
"light": 0.1916,
"moderate": 0.6958,
}
_base_cutpoints["migueles_wrist_adult"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.050,
"light": 0.110,
"moderate": 0.440,
} | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/activity/cutpoints.py | 0.767385 | 0.270453 | cutpoints.py | pypi |
from numpy import maximum, abs, repeat, arctan, sqrt, pi
from numpy.linalg import norm
from scipy.signal import butter, sosfiltfilt
from skdh.utility import moving_mean
__all__ = [
"metric_anglez",
"metric_en",
"metric_enmo",
"metric_bfen",
"metric_hfen",
"metric_hfenplus",
"metric_mad",
]
def metric_anglez(accel, wlen, *args, **kwargs):
"""
Compute the angle between the accelerometer z axis and the horizontal plane.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
Returns
-------
anglez : numpy.ndarray
(N, ) array of angles between accelerometer z axis and horizontal plane in degrees.
"""
anglez = arctan(accel[:, 2] / sqrt(accel[:, 0] ** 2 + accel[:, 1] ** 2)) * (
180 / pi
)
return moving_mean(anglez, wlen, wlen)
def metric_en(accel, wlen, *args, **kwargs):
"""
Compute the euclidean norm.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
Returns
-------
en : numpy.ndarray
(N, ) array of euclidean norms.
"""
return moving_mean(norm(accel, axis=1), wlen, wlen)
def metric_enmo(accel, wlen, *args, take_abs=False, trim_zero=True, **kwargs):
"""
Compute the euclidean norm minus 1. Works best when the accelerometer data has been calibrated
so that devices at rest measure acceleration norms of 1g.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
take_abs : bool, optional
Use the absolute value of the difference between euclidean norm and 1g. Default is False.
trim_zero : bool, optional
Trim values to no less than 0. Default is True.
Returns
-------
enmo : numpy.ndarray
(N, ) array of euclidean norms minus 1.
"""
enmo = norm(accel, axis=1) - 1
if take_abs:
enmo = abs(enmo)
if trim_zero:
return moving_mean(maximum(enmo, 0), wlen, wlen)
else:
return moving_mean(enmo, wlen, wlen)
def metric_bfen(accel, wlen, fs, low_cutoff=0.2, high_cutoff=15, **kwargs):
"""
Compute the band-pass filtered euclidean norm.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
fs : float
Sampling frequency of `accel` in Hz.
low_cutoff : float, optional
Band-pass low cutoff in Hz. Default is 0.2Hz.
high_cutoff : float, optional
Band-pass high cutoff in Hz. Default is 15Hz
Returns
-------
bfen : numpy.ndarray
(N, ) array of band-pass filtered and euclidean normed accelerations.
"""
sos = butter(
4, [2 * low_cutoff / fs, 2 * high_cutoff / fs], btype="bandpass", output="sos"
)
# no reason to for trimming zeros as the norm after the filter will always
# be positive
return moving_mean(norm(sosfiltfilt(sos, accel, axis=0), axis=1), wlen, wlen)
def metric_hfen(accel, wlen, fs, low_cutoff=0.2, trim_zero=True, **kwargs):
"""
Compute the high-pass filtered euclidean norm.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
fs : float
Sampling frequency of `accel` in Hz.
low_cutoff : float, optional
High-pass cutoff in Hz. Default is 0.2Hz.
trim_zero : bool, optional
Trim values to no less than 0. Default is True.
Returns
-------
hfen : numpy.ndarray
(N, ) array of high-pass filtered and euclidean normed accelerations.
"""
sos = butter(4, 2 * low_cutoff / fs, btype="high", output="sos")
# no reason to for trimming zeros as the norm after the filter will always
# be positive
return moving_mean(norm(sosfiltfilt(sos, accel, axis=0), axis=1), wlen, wlen)
def metric_hfenplus(accel, wlen, fs, cutoff=0.2, trim_zero=True, **kwargs):
"""
Compute the high-pass filtered euclidean norm plus the low-pass filtered euclidean norm
minus 1g.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
fs : float
Sampling frequency of `accel` in Hz.
cutoff : float, optional
Cutoff in Hz for both high and low filters. Default is 0.2Hz.
trim_zero : bool, optional
Trim values to no less than 0. Default is True.
Returns
-------
hfenp : numpy.ndarray
(N, ) array of high-pass filtered acceleration norm added to the low-pass filtered
norm minus 1g.
"""
sos_low = butter(4, 2 * cutoff / fs, btype="low", output="sos")
sos_high = butter(4, 2 * cutoff / fs, btype="high", output="sos")
acc_high = norm(sosfiltfilt(sos_high, accel, axis=0), axis=1)
acc_low = norm(sosfiltfilt(sos_low, accel, axis=0), axis=1)
if trim_zero:
return moving_mean(maximum(acc_high + acc_low - 1, 0), wlen, wlen)
else:
return moving_mean(acc_high + acc_low - 1, wlen, wlen)
def metric_mad(accel, wlen, *args, **kwargs):
"""
Compute the Mean Amplitude Deviation metric for acceleration.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of accelerationes measured in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
Returns
-------
mad : numpy.ndarray
(N, ) array of computed MAD values.
"""
acc_norm = norm(accel, axis=1)
r_avg = repeat(moving_mean(acc_norm, wlen, wlen), wlen)
mad = moving_mean(abs(acc_norm[: r_avg.size] - r_avg), wlen, wlen)
return mad | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/activity/metrics.py | 0.956166 | 0.623835 | metrics.py | pypi |
from warnings import warn
from numpy import vstack, asarray, int_
from skdh.base import BaseProcess
from skdh.io.base import check_input_file
from skdh.io._extensions import read_geneactiv
class ReadBin(BaseProcess):
"""
Read a binary .bin file from a GeneActiv sensor into memory. Acceleration values are returned
in units of `g`. If providing a base and period value, included in the output will be the
indices to create windows starting at the `base` time, with a length of `period`.
Parameters
----------
bases : {None, int, list-like}, optional
Base hours [0, 23] in which to start a window of time. Default is None,
which will not do any windowing. Both `base` and `period` must be defined
in order to window. Can use multiple, but the number of `bases` must match
the number of `periods`.
periods : {None, int, list-like}, optional
Periods for each window, in [1, 24]. Defines the number of hours per window.
Default is None, which will do no windowing. Both `period` and `base` must
be defined to window. Can use multiple but the number of `periods` must
match the number of `bases`.
ext_error : {"warn", "raise", "skip"}, optional
What to do if the file extension does not match the expected extension (.bin).
Default is "warn". "raise" raises a ValueError. "skip" skips the file
reading altogether and attempts to continue with the pipeline.
Examples
========
Setup a reader with no windowing:
>>> reader = ReadBin()
>>> reader.predict('example.bin')
{'accel': ..., 'time': ...}
Setup a reader that does windowing between 8:00 AM and 8:00 PM (20:00):
>>> reader = ReadBin(bases=8, periods=12) # 8 + 12 = 20
>>> reader.predict('example.bin')
{'accel': ..., 'time': ..., 'day_ends': [130, 13951, ...]}
"""
def __init__(self, bases=None, periods=None, ext_error="warn"):
super().__init__(
# kwargs
bases=bases,
periods=periods,
ext_error=ext_error,
)
if ext_error.lower() in ["warn", "raise", "skip"]:
self.ext_error = ext_error.lower()
else:
raise ValueError("`ext_error` must be one of 'raise', 'warn', 'skip'.")
if (bases is None) and (periods is None):
self.window = False
self.bases = asarray([0]) # needs to be defined for passing to extensions
self.periods = asarray([12])
elif (bases is None) or (periods is None):
warn("One of base or period is None, not windowing", UserWarning)
self.window = False
self.bases = asarray([0])
self.periods = asarray([12])
else:
if isinstance(bases, int) and isinstance(periods, int):
bases = asarray([bases])
periods = asarray([periods])
else:
bases = asarray(bases, dtype=int_)
periods = asarray(periods, dtype=int_)
if ((0 <= bases) & (bases <= 23)).all() and (
(1 <= periods) & (periods <= 24)
).all():
self.window = True
self.bases = bases
self.periods = periods
else:
raise ValueError(
"Base must be in [0, 23] and period must be in [1, 23]"
)
@check_input_file(".bin")
def predict(self, file=None, **kwargs):
"""
predict(file)
Read the data from the GeneActiv file
Parameters
----------
file : {str, Path}
Path to the file to read. Must either be a string, or be able to be converted by
`str(file)`
Returns
-------
data : dict
Dictionary of the data contained in the file.
Raises
------
ValueError
If the file name is not provided
Notes
-----
The keys in `data` depend on which data the file contained. Potential keys are:
- `accel`: acceleration [g]
- `time`: timestamps [s]
- `light`: light values [unknown]
- `temperature`: temperature [deg C]
- `day_ends`: window indices
"""
super().predict(expect_days=False, expect_wear=False, file=file, **kwargs)
# read the file
n_max, fs, acc, time, light, temp, starts, stops = read_geneactiv(
file, self.bases, self.periods
)
results = {
self._time: time[:n_max],
self._acc: acc[:n_max, :],
self._temp: temp[:n_max],
"light": light[:n_max],
"fs": fs,
"file": file,
}
if self.window:
results[self._days] = {}
for i, data in enumerate(zip(self.bases, self.periods)):
strt = starts[stops[:, i] != 0, i]
stp = stops[stops[:, i] != 0, i]
results[self._days][(data[0], data[1])] = vstack((strt, stp)).T
kwargs.update(results)
return (kwargs, None) if self._in_pipeline else kwargs | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/io/geneactiv.py | 0.920652 | 0.650883 | geneactiv.py | pypi |
from pathlib import Path
import functools
from warnings import warn
from skdh.io.utility import FileSizeError
def check_input_file(
extension,
check_size=True,
ext_message="File extension [{}] does not match expected [{}]",
):
"""
Check the input file for existence and suffix.
Parameters
----------
extension : str
Expected file suffix, eg '.abc'.
check_size : bool, optional
Check file size is over 1kb. Default is True.
ext_message : str, optional
Message to print if the suffix does not match. Should take 2 format arguments
('{}'), the first for the actual file suffix, and the second for the
expected suffix.
"""
def decorator_check_input_file(func):
@functools.wraps(func)
def wrapper_check_input_file(self, file=None, **kwargs):
# check if the file is provided
if file is None:
raise ValueError("`file` must not be None.")
# make a path instance for ease of use
pfile = Path(file)
# make sure the file exists
if not pfile.exists():
raise FileNotFoundError(f"File {file} does not exist.")
# check that the file matches the expected extension
if pfile.suffix != extension:
if self.ext_error == "warn":
warn(ext_message.format(pfile.suffix, extension), UserWarning)
elif self.ext_error == "raise":
raise ValueError(ext_message.format(pfile.suffix, extension))
elif self.ext_error == "skip":
kwargs.update({"file": str(file)})
return (kwargs, None) if self._in_pipeline else kwargs
# check file size if desired
if check_size:
if pfile.stat().st_size < 1000:
raise FileSizeError("File is less than 1kb, nothing to read.")
# cast to a string
file = str(file)
return func(self, file=file, **kwargs)
return wrapper_check_input_file
return decorator_check_input_file | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/io/base.py | 0.78535 | 0.273797 | base.py | pypi |
from numpy import load as np_load
from skdh.base import BaseProcess
from skdh.io.base import check_input_file
class ReadNumpyFile(BaseProcess):
"""
Read a Numpy compressed file into memory. The file should have been
created by `numpy.savez`. The data contained is read in
unprocessed - ie acceleration is already assumed to be in units of
'g' and time in units of seconds. No day windowing is performed. Expected
keys are `time` and `accel`. If `fs` is present, it is used as well.
Parameters
----------
allow_pickle : bool, optional
Allow pickled objects in the NumPy file. Default is False, which is the safer option.
For more information see :py:meth:`numpy.load`.
ext_error : {"warn", "raise", "skip"}, optional
What to do if the file extension does not match the expected extension (.npz).
Default is "warn". "raise" raises a ValueError. "skip" skips the file
reading altogether and attempts to continue with the pipeline.
"""
def __init__(self, allow_pickle=False, ext_error="warn"):
super(ReadNumpyFile, self).__init__(
allow_pickle=allow_pickle, ext_error=ext_error
)
self.allow_pickle = allow_pickle
if ext_error.lower() in ["warn", "raise", "skip"]:
self.ext_error = ext_error.lower()
else:
raise ValueError("`ext_error` must be one of 'raise', 'warn', 'skip'.")
@check_input_file(".npz", check_size=True)
def predict(self, file=None, **kwargs):
"""
predict(file)
Read the data from a numpy compressed file.
Parameters
----------
file : {str, Path}
Path to the file to read. Must either be a string, or be able to be
converted by `str(file)`.
Returns
-------
data : dict
Dictionary of the data contained in the file.
Raises
------
ValueError
If the file name is not provided
Notes
-----
The keys in `data` depend on which data the file contained. Potential keys are:
- `accel`: acceleration [g]
- `time`: timestamps [s]
- `fs`: sampling frequency in Hz.
"""
super().predict(expect_days=False, expect_wear=False, file=file, **kwargs)
with np_load(file, allow_pickle=self.allow_pickle) as data:
kwargs.update(data) # pull everything in
# make sure that fs is saved properly
if "fs" in data:
kwargs["fs"] = data["fs"][()]
# check that time and accel are in the correct names
if self._time not in kwargs or self._acc not in kwargs:
raise ValueError(
f"Missing `{self._time}` or `{self._acc}` arrays in the file"
)
# make sure we return the file
kwargs.update({"file": file})
return (kwargs, None) if self._in_pipeline else kwargs | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/io/numpy_compressed.py | 0.870542 | 0.500977 | numpy_compressed.py | pypi |
from warnings import warn
from numpy import vstack, asarray, ascontiguousarray, minimum, int_
from skdh.base import BaseProcess
from skdh.io.base import check_input_file
from skdh.io._extensions import read_axivity
class UnexpectedAxesError(Exception):
pass
class ReadCwa(BaseProcess):
"""
Read a binary CWA file from an axivity sensor into memory. Acceleration is return in units of
'g' while angular velocity (if available) is returned in units of `deg/s`. If providing a base
and period value, included in the output will be the indices to create windows starting at
the `base` time, with a length of `period`.
Parameters
----------
bases : {None, int}, optional
Base hour [0, 23] in which to start a window of time. Default is None, which
will not do any windowing. Both `base` and `period` must be defined in order
to window.
periods : {None, int}, optional
Period for each window, in [1, 24]. Defines the number of hours per window.
Default is None, which will do no windowing. Both `period` and `base` must
be defined to window
ext_error : {"warn", "raise", "skip"}, optional
What to do if the file extension does not match the expected extension (.cwa).
Default is "warn". "raise" raises a ValueError. "skip" skips the file
reading altogether and attempts to continue with the pipeline.
Examples
--------
Setup a reader with no windowing:
>>> reader = ReadCwa()
>>> reader.predict('example.cwa')
{'accel': ..., 'time': ..., ...}
Setup a reader that does windowing between 8:00 AM and 8:00 PM (20:00):
>>> reader = ReadCwa(bases=8, periods=12) # 8 + 12 = 20
>>> reader.predict('example.cwa')
{'accel': ..., 'time': ..., 'day_ends': [130, 13951, ...], ...}
"""
def __init__(self, bases=None, periods=None, ext_error="warn"):
super().__init__(
# kwargs
bases=bases,
periods=periods,
ext_error=ext_error,
)
if ext_error.lower() in ["warn", "raise", "skip"]:
self.ext_error = ext_error.lower()
else:
raise ValueError("`ext_error` must be one of 'raise', 'warn', 'skip'.")
if (bases is None) and (periods is None):
self.window = False
self.bases = asarray([0]) # needs to be defined for passing to extensions
self.periods = asarray([12])
elif (bases is None) or (periods is None):
warn("One of base or period is None, not windowing", UserWarning)
self.window = False
self.bases = asarray([0])
self.periods = asarray([12])
else:
if isinstance(bases, int) and isinstance(periods, int):
bases = asarray([bases])
periods = asarray([periods])
else:
bases = asarray(bases, dtype=int_)
periods = asarray(periods, dtype=int_)
if ((0 <= bases) & (bases <= 23)).all() and (
(1 <= periods) & (periods <= 24)
).all():
self.window = True
self.bases = bases
self.periods = periods
else:
raise ValueError(
"Base must be in [0, 23] and period must be in [1, 23]"
)
@check_input_file(".cwa")
def predict(self, file=None, **kwargs):
"""
predict(file)
Read the data from the axivity file
Parameters
----------
file : {str, Path}
Path to the file to read. Must either be a string, or be able to be converted by
`str(file)`
Returns
-------
data : dict
Dictionary of the data contained in the file.
Raises
------
ValueError
If the file name is not provided
UnexpectedAxesError
If the number of axes returned is not 3, 6 or 9
Notes
-----
The keys in `data` depend on which data the file contained. Potential keys are:
- `accel`: acceleration [g]
- `gyro`: angular velocity [deg/s]
- `magnet`: magnetic field readings [uT]
- `time`: timestamps [s]
- `day_ends`: window indices
"""
super().predict(expect_days=False, expect_wear=False, file=file, **kwargs)
# read the file
fs, n_bad_samples, imudata, ts, temperature, starts, stops = read_axivity(
file, self.bases, self.periods
)
# end = None if n_bad_samples == 0 else -n_bad_samples
end = None
num_axes = imudata.shape[1]
gyr_axes = mag_axes = None
if num_axes == 3:
acc_axes = slice(None)
elif num_axes == 6:
gyr_axes = slice(3)
acc_axes = slice(3, 6)
elif num_axes == 9: # pragma: no cover :: don't have data to test this
gyr_axes = slice(3)
acc_axes = slice(3, 6)
mag_axes = slice(6, 9)
else: # pragma: no cover :: not expected to reach here only if file is corrupt
raise UnexpectedAxesError("Unexpected number of axes in the IMU data")
results = {
self._time: ts[:end],
"file": file,
"fs": fs,
self._temp: temperature[:end],
}
if acc_axes is not None:
results[self._acc] = ascontiguousarray(imudata[:end, acc_axes])
if gyr_axes is not None:
results[self._gyro] = ascontiguousarray(imudata[:end, gyr_axes])
if mag_axes is not None: # pragma: no cover :: don't have data to test this
results[self._mag] = ascontiguousarray(imudata[:end, mag_axes])
if self.window:
results[self._days] = {}
for i, data in enumerate(zip(self.bases, self.periods)):
strt = starts[stops[:, i] != 0, i]
stp = stops[stops[:, i] != 0, i]
results[self._days][(data[0], data[1])] = minimum(
vstack((strt, stp)).T, results[self._time].size - 1
)
kwargs.update(results)
return (kwargs, None) if self._in_pipeline else kwargs | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/io/axivity.py | 0.89796 | 0.60288 | axivity.py | pypi |
from sys import version_info
from numpy import isclose, where, diff, insert, append, ascontiguousarray, int_
from numpy.linalg import norm
from scipy.signal import butter, sosfiltfilt
import lightgbm as lgb
from skdh.utility import get_windowed_view
from skdh.utility.internal import rle
from skdh.features import Bank
if version_info >= (3, 7):
from importlib import resources
else: # pragma: no cover
import importlib_resources
def _resolve_path(mod, file):
if version_info >= (3, 7):
with resources.path(mod, file) as file_path:
path = file_path
else: # pragma: no cover
with importlib_resources.path(mod, file) as file_path:
path = file_path
return path
class DimensionMismatchError(Exception):
pass
def get_gait_classification_lgbm(gait_starts, gait_stops, accel, fs):
"""
Get classification of windows of accelerometer data using the LightGBM classifier
Parameters
----------
gait_starts : {None, numpy.ndarray}
Provided gait start indices.
gait_stops : {None, numpy.ndarray}
Provided gait stop indices.
accel : numpy.ndarray
(N, 3) array of acceleration values, in units of "g"
fs : float
Sampling frequency for the data
"""
if gait_starts is not None and gait_stops is not None:
return gait_starts, gait_stops
else:
if not isclose(fs, 50.0) and not isclose(fs, 20.0):
raise ValueError("fs must be either 50hz or 20hz.")
suffix = "50hz" if fs == 50.0 else "20hz"
wlen = int(fs * 3) # window length, 3 seconds
wstep = wlen # non-overlapping windows
thresh = 0.7 # mean + 1 stdev of best threshold for maximizing F1 score.
# used to try to minimized false positives
# band-pass filter
sos = butter(1, [2 * 0.25 / fs, 2 * 5 / fs], btype="band", output="sos")
accel_filt = ascontiguousarray(sosfiltfilt(sos, norm(accel, axis=1)))
# window, data will already be in c-contiguous layout
accel_w = get_windowed_view(accel_filt, wlen, wstep, ensure_c_contiguity=False)
# get the feature bank
feat_bank = Bank() # data is already windowed
feat_bank.load(_resolve_path("skdh.gait.model", "final_features.json"))
# compute the features
accel_feats = feat_bank.compute(accel_w, fs=fs, axis=1, index_axis=None)
# output shape is (18, 99), need to transpose when passing to classifier
# load the classification model
lgb_file = str(
_resolve_path(
"skdh.gait.model", f"lgbm_gait_classifier_no-stairs_{suffix}.lgbm"
)
)
bst = lgb.Booster(model_file=lgb_file)
# predict
gait_predictions = (
bst.predict(accel_feats.T, raw_score=False) > thresh
).astype(int_)
lengths, starts, vals = rle(gait_predictions)
bout_starts = starts[vals == 1]
bout_stops = bout_starts + lengths[vals == 1]
# convert to actual values that match up with data
bout_starts *= wstep
bout_stops = bout_stops * wstep + (
wlen - wstep
) # account for edges, if windows overlap
return bout_starts.astype("int"), bout_stops.astype("int") | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/gait/get_gait_classification.py | 0.551574 | 0.30767 | get_gait_classification.py | pypi |
from numpy import fft, argmax, std, abs, argsort, corrcoef, mean, sign
from scipy.signal import detrend, butter, sosfiltfilt, find_peaks
from scipy.integrate import cumtrapz
from pywt import cwt
from skdh.utility import correct_accelerometer_orientation
from skdh.gait.gait_endpoints import gait_endpoints
def get_cwt_scales(use_optimal_scale, vertical_velocity, original_scale, fs):
"""
Get the CWT scales for the IC and FC events.
Parameters
----------
use_optimal_scale : bool
Use the optimal scale based on step frequency.
vertical_velocity : numpy.ndarray
Vertical velocity, in units of "g".
original_scale : int
The original/default scale for the CWT.
fs : float
Sampling frequency, in Hz.
Returns
-------
scale1 : int
First scale for the CWT. For initial contact events.
scale2 : int
Second scale for the CWT. For final contact events.
"""
if use_optimal_scale:
coef_scale_original, _ = cwt(vertical_velocity, original_scale, "gaus1")
F = abs(fft.rfft(coef_scale_original[0]))
# compute an estimate of the step frequency
step_freq = argmax(F) / vertical_velocity.size * fs
# IC scale: -10 * sf + 56
# FC scale: -52 * sf + 131
# TODO verify the FC scale equation. This it not in the paper but is a
# guess from the graph
# original fs was 250hz, hence the conversion
scale1 = min(max(round((-10 * step_freq + 56) * (fs / 250)), 1), 90)
scale2 = min(max(round((-52 * step_freq + 131) * (fs / 250)), 1), 90)
# scale range is between 1 and 90
else:
scale1 = scale2 = original_scale
return scale1, scale2
def get_gait_events(
accel,
fs,
ts,
orig_scale,
filter_order,
filter_cutoff,
corr_accel_orient,
use_optimal_scale,
):
"""
Get the bouts of gait from the acceleration during a gait bout
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration during the gait bout.
fs : float
Sampling frequency for the acceleration.
ts : numpy.ndarray
Array of timestmaps (in seconds) corresponding to acceleration sampling times.
orig_scale : int
Original scale for the CWT.
filter_order : int
Low-pass filter order.
filter_cutoff : float
Low-pass filter cutoff in Hz.
corr_accel_orient : bool
Correct the accelerometer orientation.
use_optimal_scale : bool
Use the optimal scale based on step frequency.
Returns
-------
init_contact : numpy.ndarray
Indices of initial contacts
final_contact : numpy.ndarray
Indices of final contacts
vert_accel : numpy.ndarray
Filtered vertical acceleration
v_axis : int
The axis corresponding to the vertical acceleration
"""
assert accel.shape[0] == ts.size, "`vert_accel` and `ts` size must match"
# figure out vertical axis on a per-bout basis
acc_mean = mean(accel, axis=0)
v_axis = argmax(abs(acc_mean))
va_sign = sign(acc_mean[v_axis]) # sign of the vertical acceleration
# correct acceleration orientation if set
if corr_accel_orient:
# determine AP axis
ac = gait_endpoints._autocovariancefn(
accel, min(accel.shape[0] - 1, 1000), biased=True, axis=0
)
ap_axis = argsort(corrcoef(ac.T)[v_axis])[-2] # last is autocorrelation
accel = correct_accelerometer_orientation(accel, v_axis=v_axis, ap_axis=ap_axis)
vert_accel = detrend(accel[:, v_axis]) # detrend data just in case
# low-pass filter if we can
if 0 < (2 * filter_cutoff / fs) < 1:
sos = butter(filter_order, 2 * filter_cutoff / fs, btype="low", output="sos")
# multiply by 1 to ensure a copy and not a view
filt_vert_accel = sosfiltfilt(sos, vert_accel)
else:
filt_vert_accel = vert_accel * 1
# first integrate the vertical accel to get velocity
vert_velocity = cumtrapz(filt_vert_accel, x=ts - ts[0], initial=0)
# get the CWT scales
scale1, scale2 = get_cwt_scales(use_optimal_scale, vert_velocity, orig_scale, fs)
coef1, _ = cwt(vert_velocity, [scale1, scale2], "gaus1")
"""
Find the local minima in the signal. This should technically always require using
the negative signal in "find_peaks", however the way PyWavelets computes the
CWT results in the opposite signal that we want.
Therefore, if the sign of the acceleration was negative, we need to use the
positve coefficient signal, and opposite for positive acceleration reading.
"""
init_contact, *_ = find_peaks(-va_sign * coef1[0], height=0.5 * std(coef1[0]))
coef2, _ = cwt(coef1[1], scale2, "gaus1")
"""
Peaks are the final contact points
Same issue as above
"""
final_contact, *_ = find_peaks(-va_sign * coef2[0], height=0.5 * std(coef2[0]))
return init_contact, final_contact, filt_vert_accel, v_axis | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/gait/get_gait_events.py | 0.870625 | 0.564399 | get_gait_events.py | pypi |
from numpy import (
max,
min,
mean,
arccos,
sum,
array,
sin,
cos,
full,
nan,
arctan2,
unwrap,
pi,
sign,
diff,
abs,
zeros,
cross,
)
from numpy.linalg import norm
from skdh.utility.internal import rle
def get_turns(gait, accel, gyro, fs, n_strides):
"""
Get the location of turns, to indicate if steps occur during a turn.
Parameters
----------
gait : dictionary
Dictionary of gait values needed for computation or the results.
accel : numpy.ndarray
Acceleration in units of 'g', for the current gait bout.
gyro : numpy.ndarray
Angular velocity in units of 'rad/s', for the current gait bout.
fs : float
Sampling frequency, in Hz.
n_strides : int
Number of strides in the current gait bout.
Notes
-----
Values indicate turns as follows:
- -1: Turns not detected (lacking angular velocity data)
- 0: No turn found
- 1: Turn overlaps with either Initial or Final contact
- 2: Turn overlaps with both Initial and Final contact
References
----------
.. [1] M. H. Pham et al., “Algorithm for Turning Detection and Analysis
Validated under Home-Like Conditions in Patients with Parkinson’s Disease
and Older Adults using a 6 Degree-of-Freedom Inertial Measurement Unit at
the Lower Back,” Front. Neurol., vol. 8, Apr. 2017,
doi: 10.3389/fneur.2017.00135.
"""
# first check if we can detect turns
if gyro is None or n_strides < 1:
gait["Turn"].extend([-1] * n_strides)
return
# get the first available still period to start the yaw tracking
n = int(0.05 * fs) # number of samples to use for still period
min_slice = None
for i in range(int(2 * fs)):
tmp = norm(accel[i : i + n], axis=1)
acc_range = max(tmp) - min(tmp)
if acc_range < (0.2 / 9.81): # range defined by the Pham paper
min_slice = accel[i : i + n]
break
if min_slice is None:
min_slice = accel[:n]
# compute the mean value over that time frame
acc_init = mean(min_slice, axis=0)
# compute the initial angle between this vector and global frame
phi = arccos(sum(acc_init * array([0, 0, 1])) / norm(acc_init))
# create the rotation matrix/rotations from sensor frame to global frame
gsZ = array([sin(phi), cos(phi), 0.0])
gsX = array([1.0, 0.0, 0.0])
gsY = cross(gsZ, gsX)
gsY /= norm(gsY)
gsX = cross(gsY, gsZ)
gsX /= norm(gsX)
gsR = array([gsX, gsY, gsZ])
# iterate over the gait bout
alpha = full(gyro.shape[0], nan) # allocate the yaw angle around vertical axis
alpha[0] = arctan2(gsR[2, 0], gsR[1, 0])
for i in range(1, gyro.shape[0]):
theta = norm(gyro[i]) / fs
c = cos(theta)
s = sin(theta)
t = 1 - c
wx = gyro[i, 0]
wy = gyro[i, 1]
wz = gyro[i, 2]
update_R = array(
[
[t * wx**2 + c, t * wx * wy + s * wz, t * wx * wz - s * wy],
[t * wx * wy - s * wz, t * wy**2 + c, t * wy * wz + s * wx],
[t * wx * wz + s * wy, t * wy * wz - s * wx, t * wz**2 + c],
]
)
gsR = update_R @ gsR
alpha[i] = arctan2(gsR[2, 0], gsR[1, 0])
# unwrap the angle so there are no discontinuities
alpha = unwrap(alpha, period=pi)
# get the sign of the difference as initial turn indication
turns = sign(diff(alpha))
# get the angles of the turns
lengths, starts, values = rle(turns == 1)
turn_angles = abs(alpha[starts + lengths] - alpha[starts])
# find hesitations in turns
mask = (lengths / fs) < 0.5 # less than half a second
mask[1:-1] &= turn_angles[:-2] >= (pi / 180 * 10) # adjacent turns > 10 degrees
mask[1:-1] &= turn_angles[2:] >= (pi / 180 * 10)
# one adjacent turn greater than 45 degrees
mask[1:-1] &= (turn_angles[:-2] > pi / 4) | (turn_angles[2:] >= pi / 4)
# magnitude of hesitation less than 10% of turn angle
mask[1:-1] = turn_angles[1:-1] < (0.1 * (turn_angles[:-2] + turn_angles[2:]))
# set hesitation turns to match surrounding
for l, s in zip(lengths[mask], starts[mask]):
turns[s : s + l] = turns[s - 1]
# enforce the time limit (0.1 - 10s) and angle limit (90 deg)
lengths, starts, values = rle(turns == 1)
mask = abs(alpha[starts + lengths] - alpha[starts]) < (pi / 2) # exclusion mask
mask |= ((lengths / fs) < 0.1) & ((lengths / fs) > 10)
for l, s in zip(lengths[mask], starts[mask]):
turns[s : s + l] = 0
# final list of turns
lengths, starts, values = rle(turns != 0)
# mask for strides in turn
in_turn = zeros(n_strides, dtype="int")
for d, s in zip(lengths[values == 1], starts[values == 1]):
in_turn += (gait["IC"][-n_strides:] > s) & (gait["IC"][-n_strides:] < (s + d))
in_turn += (gait["FC"][-n_strides:] > s) & (gait["FC"][-n_strides:] < (s + d))
gait["Turn"].extend(in_turn) | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/gait/get_turns.py | 0.835651 | 0.590455 | get_turns.py | pypi |
import functools
import logging
from numpy import zeros, roll, full, nan, bool_, float_
def basic_asymmetry(f):
@functools.wraps(f)
def run_basic_asymmetry(self, *args, **kwargs):
f(self, *args, **kwargs)
self._predict_asymmetry(*args, **kwargs)
return run_basic_asymmetry
class GaitBoutEndpoint:
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __init__(self, name, logname, depends=None):
"""
Bout level endpoint base class
Parameters
----------
name : str
Name of the endpoint
depends : Iterable
Any other endpoints that are required to be computed beforehand
"""
self.name = name
self.logger = logging.getLogger(logname)
self.k_ = f"BOUTPARAM:{self.name}"
self._depends = depends
def predict(self, fs, leg_length, gait, gait_aux):
"""
Predict the bout level gait endpoint
Parameters
----------
fs : float
Sampling frequency in Hz
leg_length : {None, float}
Leg length in meters
gait : dict
Dictionary of gait items and results. Modified in place to add the endpoint being
calculated
gait_aux : dict
Dictionary of acceleration, velocity, and position data for bouts, and the mapping
from step to bout and inertial data
"""
if self.k_ in gait:
return
if self._depends is not None:
for param in self._depends:
param().predict(fs, leg_length, gait, gait_aux)
self._predict(fs, leg_length, gait, gait_aux)
class GaitEventEndpoint:
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __init__(self, name, logname, depends=None):
"""
Gait endpoint base class
Parameters
----------
name : str
Name of the endpoint
"""
self.name = name
self.logger = logging.getLogger(logname)
self.k_ = f"PARAM:{self.name}"
self._depends = depends
@staticmethod
def _get_mask(gait, offset):
if offset not in [1, 2]:
raise ValueError("invalid offset")
mask = zeros(gait["IC"].size, dtype=bool_)
mask[:-offset] = (gait["Bout N"][offset:] - gait["Bout N"][:-offset]) == 0
# account for non-continuous gait bouts
mask &= gait["forward cycles"] >= offset
return mask
def predict(self, fs, leg_length, gait, gait_aux):
"""
Predict the gait event-level endpoint
Parameters
----------
fs : float
Sampling frequency in Hz
leg_length : {None, float}
Leg length in meters
gait : dict
Dictionary of gait items and results. Modified in place to add the endpoint being
calculated
gait_aux : dict
Dictionary of acceleration, velocity, and position data for bouts, and the mapping
from step to bout and inertial data
"""
if self.k_ in gait:
return
if self._depends is not None:
for param in self._depends:
param().predict(fs, leg_length, gait, gait_aux)
self._predict(fs, leg_length, gait, gait_aux)
def _predict_asymmetry(self, dt, leg_length, gait, gait_aux):
asy_name = f"{self.k_} asymmetry"
gait[asy_name] = full(gait["IC"].size, nan, dtype=float_)
mask = self._get_mask(gait, 1)
mask_ofst = roll(mask, 1)
gait[asy_name][mask] = gait[self.k_][mask_ofst] - gait[self.k_][mask]
def _predict_init(self, gait, init=True, offset=None):
if init:
gait[self.k_] = full(gait["IC"].size, nan, dtype=float_)
if offset is not None:
mask = self._get_mask(gait, offset)
mask_ofst = roll(mask, offset)
return mask, mask_ofst | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/gait/gait_endpoints/base.py | 0.899953 | 0.286144 | base.py | pypi |
import numpy as np
from .._commonfuncs import LocalEstimator
from scipy.spatial.distance import pdist, squareform
class TLE(LocalEstimator):
"""Intrinsic dimension estimation using the Tight Local intrinsic dimensionality Estimator algorithm. [Amsaleg2019]_ [IDRadovanović]_
Parameters
----------
epsilon: float
"""
_N_NEIGHBORS = 20
def __init__(
self, epsilon=1e-4,
):
self.epsilon = epsilon
def _fit(self, X, dists, knnidx):
self.dimension_pw_ = np.zeros(len(X))
for i in range(len(X)):
self.dimension_pw_[i] = self._idtle(X[knnidx[i, :]], dists[[i], :])
def _idtle(self, nn, dists):
# nn - matrix of nearest neighbors (n_neighbors x d), sorted by distance
# dists - nearest-neighbor distances (1 x n_neighbors), sorted
r = dists[0, -1] # distance to n_neighbors-th neighbor
# Boundary case 1: If $r = 0$, this is fatal, since the neighborhood would be degenerate.
if r == 0:
raise ValueError("All k-NN distances are zero!")
# Main computation
n_neighbors = dists.shape[1]
V = squareform(pdist(nn))
Di = np.tile(dists.T, (1, n_neighbors))
Dj = Di.T
Z2 = 2 * Di ** 2 + 2 * Dj ** 2 - V ** 2
S = (
r
* (
((Di ** 2 + V ** 2 - Dj ** 2) ** 2 + 4 * V ** 2 * (r ** 2 - Di ** 2))
** 0.5
- (Di ** 2 + V ** 2 - Dj ** 2)
)
/ (2 * (r ** 2 - Di ** 2))
)
T = (
r
* (
((Di ** 2 + Z2 - Dj ** 2) ** 2 + 4 * Z2 * (r ** 2 - Di ** 2)) ** 0.5
- (Di ** 2 + Z2 - Dj ** 2)
)
/ (2 * (r ** 2 - Di ** 2))
)
# handle case of repeating k-NN distances
Dr = (dists == r).squeeze()
S[Dr, :] = r * V[Dr, :] ** 2 / (r ** 2 + V[Dr, :] ** 2 - Dj[Dr, :] ** 2)
T[Dr, :] = r * Z2[Dr, :] / (r ** 2 + Z2[Dr, :] - Dj[Dr, :] ** 2)
# Boundary case 2: If $u_i = 0$, then for all $1\leq j\leq n_neighbors$ the measurements $s_{ij}$ and $t_{ij}$ reduce to $u_j$.
Di0 = (Di == 0).squeeze()
T[Di0] = Dj[Di0]
S[Di0] = Dj[Di0]
# Boundary case 3: If $u_j = 0$, then for all $1\leq j\leq n_neighbors$ the measurements $s_{ij}$ and $t_{ij}$ reduce to $\frac{r v_{ij}}{r + v_{ij}}$.
Dj0 = (Dj == 0).squeeze()
T[Dj0] = r * V[Dj0] / (r + V[Dj0])
S[Dj0] = r * V[Dj0] / (r + V[Dj0])
# Boundary case 4: If $v_{ij} = 0$, then the measurement $s_{ij}$ is zero and must be dropped. The measurement $t_{ij}$ should be dropped as well.
V0 = (V == 0).squeeze()
np.fill_diagonal(V0, False)
# by setting to r, $t_{ij}$ will not contribute to the sum s1t
T[V0] = r
# by setting to r, $s_{ij}$ will not contribute to the sum s1s
S[V0] = r
# will subtract twice this number during ID computation below
nV0 = np.sum(V0)
# Drop T & S measurements below epsilon (V4: If $s_{ij}$ is thrown out, then for the sake of balance, $t_{ij}$ should be thrown out as well (or vice versa).)
TSeps = (T < self.epsilon) | (S < self.epsilon)
np.fill_diagonal(TSeps, 0)
nTSeps = np.sum(TSeps)
T[TSeps] = r
T = np.log(T / r)
S[TSeps] = r
S = np.log(S / r)
np.fill_diagonal(T, 0) # delete diagonal elements
np.fill_diagonal(S, 0)
# Sum over the whole matrices
s1t = np.sum(T)
s1s = np.sum(S)
# Drop distances below epsilon and compute sum
Deps = dists < self.epsilon
nDeps = np.sum(Deps, dtype=int)
dists = dists[nDeps:]
s2 = np.sum(np.log(dists / r))
# Compute ID, subtracting numbers of dropped measurements
ID = -2 * (n_neighbors ** 2 - nTSeps - nDeps - nV0) / (s1t + s1s + 2 * s2)
return ID | /scikit-dimension-0.3.3.tar.gz/scikit-dimension-0.3.3/skdim/id/_TLE.py | 0.898555 | 0.875734 | _TLE.py | pypi |
import numpy as np
import warnings
from .._commonfuncs import get_nn, GlobalEstimator
from scipy.optimize import minimize
from sklearn.utils.validation import check_array
class MiND_ML(GlobalEstimator):
# SPDX-License-Identifier: MIT, 2017 Kerstin Johnsson [IDJohnsson]_
"""Intrinsic dimension estimation using the MiND_MLk and MiND_MLi algorithms. [Rozza2012]_ [IDJohnsson]_
Parameters
----------
k: int, default=20
Neighborhood parameter for ver='MLk' or ver='MLi'.
ver: str
'MLk' or 'MLi'. See the reference paper
"""
def __init__(self, k=20, D=10, ver="MLk"):
self.k = k
self.D = D
self.ver = ver
def fit(self, X, y=None):
"""A reference implementation of a fitting function.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
The training input samples.
y : dummy parameter to respect the sklearn API
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, ensure_min_features=2)
if self.k + 1 >= len(X):
warnings.warn("k+1 >= len(X), using k+1 = len(X)-1")
self.dimension_ = self._MiND_MLx(X)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def _MiND_MLx(self, X):
nbh_data, idx = get_nn(X, min(self.k + 1, len(X) - 1))
# if (self.ver == 'ML1'):
# return self._MiND_ML1(nbh_data)
rhos = nbh_data[:, 0] / nbh_data[:, -1]
d_MIND_MLi = self._MiND_MLi(rhos)
if self.ver == "MLi":
return d_MIND_MLi
d_MIND_MLk = self._MiND_MLk(rhos, d_MIND_MLi)
if self.ver == "MLk":
return d_MIND_MLk
else:
raise ValueError("Unknown version: ", self.ver)
# @staticmethod
# def _MiND_ML1(nbh_data):
# n = len(nbh_data)
# #need only squared dists to first 2 neighbors
# dists2 = nbh_data[:, :2]**2
# s = np.sum(np.log(dists2[:, 0]/dists2[:, 1]))
# ID = -2/(s/n)
# return ID
def _MiND_MLi(self, rhos):
# MiND MLi MLk REVERSED COMPARED TO R TO CORRESPOND TO PAPER
N = len(rhos)
d_lik = np.array([np.nan] * self.D)
for d in range(self.D):
d_lik[d] = self._lld(d + 1, rhos, N)
return np.argmax(d_lik) + 1
def _MiND_MLk(self, rhos, dinit):
# MiND MLi MLk REVERSED COMPARED TO R TO CORRESPOND TO PAPER
res = minimize(
fun=self._nlld,
x0=np.array([dinit]),
jac=self._nlld_gr,
args=(rhos, len(rhos)),
method="L-BFGS-B",
bounds=[(0, self.D)],
)
return res["x"][0]
def _nlld(self, d, rhos, N):
return -self._lld(d, rhos, N)
def _lld(self, d, rhos, N):
if d == 0:
return np.array([-1e30])
else:
return (
N * np.log(self.k * d)
+ (d - 1) * np.sum(np.log(rhos))
+ (self.k - 1) * np.sum(np.log(1 - rhos ** d))
)
def _nlld_gr(self, d, rhos, N):
if d == 0:
return np.array([-1e30])
else:
return -(
N / d
+ np.sum(
np.log(rhos)
- (self.k - 1) * (rhos ** d) * np.log(rhos) / (1 - rhos ** d)
)
) | /scikit-dimension-0.3.3.tar.gz/scikit-dimension-0.3.3/skdim/id/_MiND_ML.py | 0.824638 | 0.49939 | _MiND_ML.py | pypi |
import numpy as np
from scipy.spatial.distance import pdist, squareform
from sklearn.utils.validation import check_array
from .._commonfuncs import GlobalEstimator
class KNN(GlobalEstimator):
# SPDX-License-Identifier: MIT, 2017 Kerstin Johnsson [IDJohnsson]_
"""Intrinsic dimension estimation using the kNN algorithm. [Carter2010]_ [IDJohnsson]_
This is a simplified version of the kNN dimension estimation method described by Carter et al. (2010),
the difference being that block bootstrapping is not used.
Parameters
----------
X: 2D numeric array
A 2D data set with each row describing a data point.
k: int
Number of distances to neighbors used at a time.
ps: 1D numeric array
Vector with sample sizes; each sample size has to be larger than k and smaller than nrow(data).
M: int, default=1
Number of bootstrap samples for each sample size.
gamma: int, default=2
Weighting constant.
"""
def __init__(self, k=None, ps=None, M=1, gamma=2):
self.k = k
self.ps = ps
self.M = M
self.gamma = gamma
def fit(self, X, y=None):
"""A reference implementation of a fitting function.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
The training input samples.
y : dummy parameter to respect the sklearn API
Returns
-------
self: object
Returns self.
self.dimension_: float
The estimated intrinsic dimension
self.residual_: float
Residuals
"""
self._k = 2 if self.k is None else self.k
self._ps = np.arange(self._k + 1, self._k + 5) if self.ps is None else self.ps
X = check_array(X, ensure_min_samples=self._k + 1, ensure_min_features=2)
self.dimension_, self.residual_ = self._knnDimEst(X)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def _knnDimEst(self, X):
n = len(X)
Q = len(self._ps)
if min(self._ps) <= self._k or max(self._ps) > n:
raise ValueError("ps must satisfy k<ps<len(X)")
# Compute the distance between any two points in the X set
dist = squareform(pdist(X))
# Compute weighted graph length for each sample
L = np.zeros((Q, self.M))
for i in range(Q):
for j in range(self.M):
samp_ind = np.random.randint(0, n, self._ps[i])
for l in samp_ind:
L[i, j] += np.sum(
np.sort(dist[l, samp_ind])[1 : (self._k + 1)] ** self.gamma
)
# Add the weighted sum of the distances to the k nearest neighbors.
# We should not include the sample itself, to which the distance is
# zero.
# Least squares solution for m
d = X.shape[1]
epsilon = np.repeat(np.nan, d)
for m0, m in enumerate(np.arange(1, d + 1)):
alpha = (m - self.gamma) / m
ps_alpha = self._ps ** alpha
hat_c = np.sum(ps_alpha * np.sum(L, axis=1)) / (
np.sum(ps_alpha ** 2) * self.M
)
epsilon[m0] = np.sum(
(L - np.tile((hat_c * ps_alpha)[:, None], self.M)) ** 2
)
# matrix(vec, nrow = length(vec), ncol = b) is a matrix with b
# identical columns equal to vec
# sum(matr) is the sum of all elements in the matrix matr
de = np.argmin(epsilon) + 1 # Missing values are discarded
return de, epsilon[de - 1] | /scikit-dimension-0.3.3.tar.gz/scikit-dimension-0.3.3/skdim/id/_KNN.py | 0.923846 | 0.760384 | _KNN.py | pypi |
from sklearn.utils.validation import check_array
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances_chunked
from sklearn.linear_model import LinearRegression
from .._commonfuncs import get_nn, GlobalEstimator
class TwoNN(GlobalEstimator):
# SPDX-License-Identifier: MIT, 2019 Francesco Mottes [IDMottes]_
"""Intrinsic dimension estimation using the TwoNN algorithm. [Facco2019]_ [IDFacco]_ [IDMottes]_
Parameters
----------
discard_fraction: float
Fraction (between 0 and 1) of largest distances to discard (heuristic from the paper)
dist: bool
Whether data is a precomputed distance matrix
Attributes
----------
x_: 1d array
np.array with the -log(mu) values.
y_: 1d array
np.array with the -log(F(mu_{sigma(i)})) values.
"""
def __init__(self, discard_fraction: float = 0.1, dist: bool = False):
self.discard_fraction = discard_fraction
self.dist = dist
def fit(self, X, y=None):
"""A reference implementation of a fitting function.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
A data set for which the intrinsic dimension is estimated.
y : dummy parameter to respect the sklearn API
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, ensure_min_features=2)
self.dimension_, self.x_, self.y_ = self._twonn(X)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def _twonn(self, X):
"""
Calculates intrinsic dimension of the provided data points with the TWO-NN algorithm.
-----------
Parameters:
X : 2d array-like
2d data matrix. Samples on rows and features on columns.
return_xy : bool (default=False)
Whether to return also the coordinate vectors used for the linear fit.
discard_fraction : float between 0 and 1
Fraction of largest distances to discard (heuristic from the paper)
dist : bool (default=False)
Whether data is a precomputed distance matrix
-----------
Returns:
d : float
Intrinsic dimension of the dataset according to TWO-NN.
x : 1d np.array (optional)
Array with the -log(mu) values.
y : 1d np.array (optional)
Array with the -log(F(mu_{sigma(i)})) values.
-----------
References:
E. Facco, M. d’Errico, A. Rodriguez & A. Laio
Estimating the intrinsic dimension of datasets by a minimal neighborhood information (https://doi.org/10.1038/s41598-017-11873-y)
"""
N = len(X)
if self.dist:
r1, r2 = X[:, 0], X[:, 1]
_mu = r2 / r1
# discard the largest distances
mu = _mu[np.argsort(_mu)[: int(N * (1 - self.discard_fraction))]]
else:
# mu = r2/r1 for each data point
# relatively high dimensional data, use distance matrix generator
if X.shape[1] > 25:
distmat_chunks = pairwise_distances_chunked(X)
_mu = np.zeros((len(X)))
i = 0
for x in distmat_chunks:
x = np.sort(x, axis=1)
r1, r2 = x[:, 1], x[:, 2]
_mu[i : i + len(x)] = r2 / r1
i += len(x)
# discard the largest distances
mu = _mu[np.argsort(_mu)[: int(N * (1 - self.discard_fraction))]]
else: # relatively low dimensional data, search nearest neighbors directly
dists, _ = get_nn(X, k=2)
r1, r2 = dists[:, 0], dists[:, 1]
_mu = r2 / r1
# discard the largest distances
mu = _mu[np.argsort(_mu)[: int(N * (1 - self.discard_fraction))]]
# Empirical cumulate
Femp = np.arange(int(N * (1 - self.discard_fraction))) / N
# Fit line
lr = LinearRegression(fit_intercept=False)
lr.fit(np.log(mu).reshape(-1, 1), -np.log(1 - Femp).reshape(-1, 1))
d = lr.coef_[0][0] # extract slope
return (
d,
np.log(mu).reshape(-1, 1),
-np.log(1 - Femp).reshape(-1, 1),
) | /scikit-dimension-0.3.3.tar.gz/scikit-dimension-0.3.3/skdim/id/_TwoNN.py | 0.963857 | 0.798344 | _TwoNN.py | pypi |
import warnings
import numpy as np
from sklearn.metrics import pairwise_distances_chunked
from .._commonfuncs import get_nn, GlobalEstimator
from sklearn.utils.validation import check_array
class CorrInt(GlobalEstimator):
"""Intrinsic dimension estimation using the Correlation Dimension. [Grassberger1983]_ [IDHino]_
Parameters
----------
k1: int
First neighborhood size considered
k2: int
Last neighborhood size considered
DM: bool, default=False
Is the input a precomputed distance matrix (dense)
"""
def __init__(self, k1=10, k2=20, DM=False):
self.k1 = k1
self.k2 = k2
self.DM = DM
def fit(self, X, y=None):
"""A reference implementation of a fitting function.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
The training input samples.
y : dummy parameter to respect the sklearn API
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, ensure_min_features=2)
if self.k2 >= len(X):
warnings.warn("k2 larger or equal to len(X), using len(X)-1")
self.k2 = len(X) - 1
if self.k1 >= len(X) or self.k1 > self.k2:
warnings.warn("k1 larger than k2 or len(X), using k2-1")
self.k1 = self.k2 - 1
self.dimension_ = self._corrint(X)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def _corrint(self, X):
n_elements = len(X) ** 2 # number of elements
dists, _ = get_nn(X, self.k2)
if self.DM is False:
chunked_distmat = pairwise_distances_chunked(X)
else:
chunked_distmat = X
r1 = np.median(dists[:, self.k1 - 1])
r2 = np.median(dists[:, self.k2 - 1])
n_diagonal_entries = len(X) # remove diagonal from sum count
s1 = -n_diagonal_entries
s2 = -n_diagonal_entries
for chunk in chunked_distmat:
s1 += (chunk < r1).sum()
s2 += (chunk < r2).sum()
Cr = np.array([s1 / n_elements, s2 / n_elements])
estq = np.diff(np.log(Cr)) / np.log(r2 / r1)
return estq[0] | /scikit-dimension-0.3.3.tar.gz/scikit-dimension-0.3.3/skdim/id/_CorrInt.py | 0.882453 | 0.662309 | _CorrInt.py | pypi |
The MIT License (MIT)<br>
Copyright (c) 2017 Massachusetts Institute of Technology<br>
Author: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
Framework - Offloading Pipeline Processing to Amazon Demo
=====================
This notebook demonstrates offloading work to an amazon server
Initial imports
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (14.0, 3.0)
import re
```
skdaccess imports
```
from skdaccess.framework.param_class import *
from skdaccess.geo.groundwater import DataFetcher as GWDF
```
skdiscovery imports
```
from skdiscovery.data_structure.framework import DiscoveryPipeline
from skdiscovery.data_structure.framework.stagecontainers import *
from skdiscovery.data_structure.table.filters import MedianFilter
from skdiscovery.data_structure.generic.accumulators import DataAccumulator
```
Configure groundwater data fetcher
```
# Setup time range
start_date = '2000-01-01'
end_date = '2015-12-31'
# Select station
station_id = 340503117104104
# Create Data Fetcher
gwdf = GWDF([AutoList([station_id])],start_date,end_date)
```
Create Pipeline
```
ap_window = AutoParamListCycle([1,
15,
40,
70,
150,
300])
fl_median = MedianFilter('Median Filter',[ap_window],interpolate=False)
sc_median = StageContainer(fl_median)
acc_data = DataAccumulator('Data Accumulator',[])
sc_data = StageContainer(acc_data)
pipeline = DiscoveryPipeline(gwdf,[sc_median, sc_data])
```
Display pipeline
```
pipeline.plotPipelineInstance()
```
Run the pipeline, offloading the processing to a node on Amazon.
While running, the amazon node can display the jobs:
![Amazon Node](images/amazon_run.png)
```
pipeline.run(num_runs=6,amazon=True)
```
Plot the results
```
# Get the results
results = pipeline.getResults()
metadata = pipeline.getMetadataHistory()
# Loop over each pipeline run
for index,(run,info) in enumerate(zip(results,metadata)):
# Plot the depth to water level
plt.plot(run['Data Accumulator'][340503117104104].loc[:,'Median Depth to Water']);
# Set xlabel
plt.xlabel('Date');
# Set ylabel
plt.ylabel("Depth to Water Level");
# Set title
plt.title('Median Filter Window: ' + re.search("\[\'(.*)\'\]",info[1]).group(1) + ' Days');
#Create new figure
plt.figure();
```
| /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/examples/Amazon_Offload.ipynb | 0.555918 | 0.699036 | Amazon_Offload.ipynb | pypi |
from skdiscovery.data_structure.framework import PipelineItem
import numpy as np
from sklearn.decomposition import PCA
from sklearn.decomposition import FastICA
class General_Component_Analysis(PipelineItem):
'''
Performs either ICA or PCA analysis on series data
'''
def __init__(self, str_description, ap_paramList):
'''
Initialize Analysis object.
@param str_description: String description of analysis
@param ap_paramList[num_components]: Number of components
@param ap_paramList[component_type]: Type of component analysis (CA); either PCA or ICA
@param ap_paramList[start_time]: Starting time for CA
@param ap_paramList[end_time]: ending time for CA
@param ap_paramList[labels]: Optional list of label names
'''
self.str_description = str_description
self.ap_paramList = ap_paramList
self.ap_paramNames = ['n_components','component_type','start_time','end_time','label_names']
self.results = dict()
def process(self, obj_data):
'''
Perform component analysis on data:
Results are added to the data wrapper as a dictionary with
results['CA'] = Eigenvenctors
results['Projection'] = Projection on to the eigenvectors
@param obj_data: Data wrapper containing the data
'''
num_components = self.ap_paramList[0]()
component_type = self.ap_paramList[1]()
start_time = self.ap_paramList[2]()
end_time = self.ap_paramList[3]()
results = dict()
results['start_date'] = start_time
results['end_date'] = end_time
if len(self.ap_paramList) >= 5:
label_names = self.ap_paramList[4]()
else:
label_names = None
cut_data = []
for label, data, err in obj_data.getIterator():
if label_names == None or label in label_names:
cut_data.append(data[start_time:end_time])
cut_data = np.array(cut_data)
if len(cut_data) > 0:
if component_type == 'ICA' :
ca = FastICA(n_components = num_components)
else:
ca = PCA(n_components = num_components)
time_projection = ca.fit_transform(cut_data.T)
results['CA'] = ca
results['Projection'] = time_projection
else:
results['CA'] = None
results['Projection'] = None
obj_data.addResult(self.str_description, results) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/series/analysis/gca.py | 0.705278 | 0.32142 | gca.py | pypi |
import collections
import numpy as np
import scipy.optimize as optimize
import skdaccess.utilities.pbo_util as pbo_utils
from skdiscovery.data_structure.framework import PipelineItem
from skdiscovery.utilities.patterns import pbo_tools
from skdiscovery.utilities.patterns.pbo_tools import SourceWrapper, MogiVectors
class Mogi_Inversion(PipelineItem):
'''
Perform a Mogi source inversion on a set of gps series data
The source is assumed to be a Mogi source (point source), but other source models can be selected.
Assumes directions are named ('dN', 'dE', 'dU').
'''
def __init__(self, str_description, ap_paramList):
'''
Initialize Mogi analysis item
@param str_description: Description of the item
@param ap_paramList[h_pca_name]: Name of the pca computed by General_Component_Analysis. Gets start and end date from the PCA fit.
@param ap_paramList[source_type]: Type of magma chamber source model to use (mogi [default],finite_sphere,closed_pipe,constant_open_pipe,rising_open_pipe,sill)
'''
super(Mogi_Inversion, self).__init__(str_description, ap_paramList)
self.ap_paramNames = ['pca_name','source_type']
def FitPCA(self, hPCA_Proj):
'''
Determine the timing of the inflation event.
Uses the first component of the pca projection and
fits A * arctan( (t - t0) / c ) + B to the first pca projection.
@param hPCA_Proj: The sklearn PCA projection
@return [t0, c]
'''
fitfunc = lambda p,t: p[0]*np.arctan((t-p[1])/p[2])+p[3]
errfunc = lambda p,x,y: fitfunc(p,x) - y
dLen = len(hPCA_Proj[:,0])
pA, success = optimize.leastsq(errfunc,[1.,dLen/2.,1.,0.],args=(np.arange(dLen),hPCA_Proj[:,0]))
ct = pA[1:3]
return ct, pA[0]
def FitTimeSeries(self, pd_series, ct):
'''
Fits the amplitude and offset of an inflation event given the time and length of the event.
Fits A and B in A * arctan( (t - t0) / c) + B
@param pd_series: Time series to be fit
@param ct: [t0, c]
@return Amplitude of fit
'''
fitfunc2 = lambda p,c,t: p[0]*np.arctan((t-c[0])/c[1])+p[1]
errfunc2 = lambda p,c,x,y: fitfunc2(p,c,x) - y
dLen = len(pd_series)
pA, pcov = optimize.leastsq(errfunc2,[1.,0.],args=(ct,np.arange(dLen),pd_series))
# res = fitfunc2(pA,ct,np.arange(dLen))[-1]-fitfunc2(pA,ct,np.arange(dLen))[0]
res = pA[0]*np.pi
s_sq = (errfunc2(pA,ct,np.arange(dLen),pd_series)**2).sum()/(len(pd_series)-2)
pcov = pcov * s_sq
error = []
for i in range(len(pA)):
try:
error.append(np.absolute(pcov[i][i])**0.5)
except:
error.append( 0.00 )
perr_leastsq = np.array(error)
return res, perr_leastsq
def process(self, obj_data):
'''
Finds the magma source (default-mogi) from PBO GPS data.
Assumes time series columns are named ('dN', 'dE', 'dU'). Predicts location of the
magma source using scipy.optimize.curve_fit
The location of the magma source is stored in the data wrapper as a list
res[0] = latitude
res[1] = longitude
res[2] = source depth (km)
res[3] = volume change (meters^3)
res[4] = extra parameters (depends on mogi fit type)
@param obj_data: Data object containing the results from the PCA stage
'''
h_pca_name = self.ap_paramList[0]()
if len(self.ap_paramList)>=2:
exN = {'mogi':0,'finite_sphere':1,'closed_pipe':1,'constant_open_pipe':1,'rising_open_pipe':2,'sill':0}
try:
mag_source = getattr(pbo_tools,self.ap_paramList[1]().lower())
ExScParams = tuple(np.ones((exN[self.ap_paramList[1]().lower()],)))
except:
mag_source = pbo_tools.mogi
ExScParams = ()
print('No source type called '+self.ap_paramList[1]()+', defaulting to a Mogi source.')
else:
mag_source = pbo_tools.mogi
ExScParams = ()
mag_source = SourceWrapper(mag_source)
projection = obj_data.getResults()[h_pca_name]['Projection']
start_date = obj_data.getResults()[h_pca_name]['start_date']
end_date = obj_data.getResults()[h_pca_name]['end_date']
ct, pca_amp = self.FitPCA(projection)
pca_amp *= np.pi
tp_directions = ('dN', 'dE', 'dU')
xvs = []
yvs = []
zvs = []
for label, data, err in obj_data.getIterator():
if label in tp_directions:
distance,f_error = self.FitTimeSeries(data.loc[start_date:end_date], ct)
if label == tp_directions[1]:
xvs.append(distance)
elif label == tp_directions[0]:
yvs.append(distance)
elif label == tp_directions[2]:
zvs.append(distance)
else:
print('Ignoring column: ', label)
xvs = np.array(xvs)*1e-6
yvs = np.array(yvs)*1e-6
zvs = np.array(zvs)*1e-6
ydata = np.hstack((xvs, yvs,zvs)).T
station_list = obj_data.get().minor_axis
meta_data = obj_data.info()
station_coords = pbo_utils.getStationCoords(meta_data, station_list)
dimensions = ('x','y','z')
xdata = []
for dim in dimensions:
for coord in station_coords:
xdata.append((dim, coord[0], coord[1]))
coord_range = np.array(pbo_utils.getLatLonRange(meta_data, station_list))
lat_guess = np.mean(coord_range[0,:])
lon_guess = np.mean(coord_range[1,:])
fit = optimize.curve_fit(mag_source, xdata, ydata, (lat_guess, lon_guess, 5, 1e-4)+ExScParams)
res = collections.OrderedDict()
res['lat'] = fit[0][0]
res['lon'] = fit[0][1]
res['depth'] = fit[0][2]
res['amplitude'] = fit[0][3]
if len(fit[0])>4:
res['ex_params'] = fit[0][4:]
else:
res['ex_params'] = np.nan
res['pca_amplitude'] = pca_amp
if len(self.ap_paramList)>=2:
res['source_type'] = self.ap_paramList[1]().lower()
else:
res['source_type'] = 'mogi'
obj_data.addResult(self.str_description, res)
# lat_fit_range = (np.min(lat_list)-0.15, np.max(lat_list)+0.15)
# lon_fit_range = (np.min(lon_list)-0.15, np.max(lon_list)+0.15)
# res = optimize.brute(self.mogi, (lat_fit_range, lon_fit_range,
# (1,10), (1e-5, 1e-3)),
# args = (xvs*1e-6, yvs*1e-6, zvs*1e-6,
# station_list, meta_data)) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/series/analysis/mogi.py | 0.677154 | 0.442215 | mogi.py | pypi |
from skdiscovery.data_structure.framework import PipelineItem
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
class OffsetDetrend(PipelineItem):
'''
Trend filter that fits a stepwise function to linearly detrended series data
On detrended data this filter fits a stepwise function (number of
steps provided by the user) to correct the linear fit by
accounting for discontinuous offsets, such as due to a change in
the antenna or from an earthquake. The final linear fit handles
each portion of the offset independently. If the number of
discontinuities is not provided as an autoparam, the filter
assumes a single discontinuity.
'''
def __init__(self, str_description, ap_paramList = [], labels=None, column_names=None, time_point=None, time_interval=None):
'''
Initialize OffsetDetrend filter
@param str_description: String describing filter
@param ap_paramList[step_count]: Number of steps to remove from data (Default: 1)
@param labels: List of labels used to select data to be removed (using None will apply to all labels)
@param column_names: List of column names to select data to be removed (using None will apply to all columns)
@param time_point: Time of offset
@param time_interval: Interval within which the offset occurs
'''
self.labels = labels
self.column_names = column_names
self.time_point = time_point
if time_interval == None:
self.time_interval = [-500,500]
else:
if type(time_interval) == int:
self.time_interval = [-time_interval,time_interval]
else:
self.time_interval = time_interval
self.ap_paramNames = ['step_count']
super(OffsetDetrend, self).__init__(str_description, ap_paramList)
def process(self, obj_data):
'''
Apply offset estimation and detrending filter to data set.
@param obj_data: Input data. Changes are made in place.
'''
labels = self.labels
column_names = self.column_names
# user provided number of steps/offsets in the data
step_count = 1
if len(self.ap_paramList) != 0:
step_count = self.ap_paramList[0]()
for label, data, err in obj_data.getIterator():
if (labels is None or label in labels) and \
(column_names is None or data.name in column_names):
# keep track of the time index and the location of nan's
tindex = data.index
reind = np.array(np.isnan(data))
# a temporary time index and data array without nan's
nts = np.arange(len(data))
nts = np.delete(nts,nts[reind])
nys = data[reind==False]
# Decision Tree Regressor for finding the discontinuities
regr_1 = DecisionTreeRegressor(max_depth=step_count)
if self.time_point == None:
regr_1.fit(nts[:,np.newaxis], nys)
else:
# make time_point (a string) into an index
time_point = np.where(tindex==self.time_point)[0][0]
regr_1.fit(nts[(time_point+self.time_interval[0]):(time_point+self.time_interval[1]),np.newaxis],
nys[(time_point+self.time_interval[0]):(time_point+self.time_interval[1])])
r1 = regr_1.predict(nts[:,np.newaxis])
# offset the discontinuity to be continous and fit a single line
# (using median of 5 points on either side of discontinuity)
nys[r1==r1[-1]] += np.median(nys[r1==r1[0]][-5:-1]) - np.median(nys[r1==r1[-1]][0:5])
z3 = np.polyfit(nts, nys, 1)
# make the data into a pd series and correctly index
x3 = pd.Series(data=nys-(z3[0]*nts+z3[1]),index=tindex[reind==False])
x3 = x3.reindex(tindex)
# and then use that to update in place
data.update(x3) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/series/filters/offset_detrend.py | 0.736211 | 0.637905 | offset_detrend.py | pypi |
from collections import OrderedDict
from skdiscovery.data_structure.framework.base import PipelineItem
from skdiscovery.utilities.patterns.image_tools import divideIntoSquares
import numpy as np
class TileImage(PipelineItem):
'''
Create several smaller images from a larger image
'''
def __init__(self, str_description, ap_paramList, size, min_deviation=None, min_fraction=None, deviation_as_percent=False):
'''
Initialize TileImage item
@param str_description: String description of item
@param ap_paramList[stride]: Distance between neighboring tiles
@param size: Size of tile (length of one side of a square)
@param min_deviation = Minimum deviation to use when determining to keep tile
@param min_fraction: Minimum fraction of pixels above min_deviation needed to keep tile
@param deviation_as_percent: Treat min_deviation as a percentage of the max value of the original image
'''
if deviation_as_percent and min_deviation is None:
raise RuntimeError('Must supply min_deviation when deviation_as_percent is True')
self.size = size
self._min_deviation = min_deviation
self._min_fraction = min_fraction
self._deviation_as_percent = deviation_as_percent
super(TileImage, self).__init__(str_description, ap_paramList)
def process(self, obj_data):
'''
Genrate new images by tiling input images
@param obj_data: Input image wrapper
'''
stride = self.ap_paramList[0]()
if len(self.ap_paramList) > 1:
threshold_function = self.ap_paramList[1]()
else:
threshold_function = None
if threshold_function is not None and self._min_fraction is None:
raise RuntimeError('Must supply min_fraction with threshold function')
if threshold_function is not None and self._min_deviation is not None:
raise RuntimeError('Cannot supply both min_deviation and threshold function')
results = OrderedDict()
metadata = OrderedDict()
for label, data in obj_data.getIterator():
extents, patches = divideIntoSquares(data, self.size, stride)
if self._deviation_as_percent:
min_deviation = self._min_deviation * np.max(data)
else:
min_deviation = self._min_deviation
if self._min_fraction is not None:
if min_deviation is not None:
valid_index = np.count_nonzero(np.abs(patches) < min_deviation, axis=(1,2)) / np.prod(patches.shape[1:]) > self._min_fraction
else:
threshold = threshold_function(np.abs(data))
threshold_data = np.abs(patches.copy())
threshold_data[threshold_data < threshold] = np.nan
valid_index = np.count_nonzero(~np.isnan(threshold_data), axis=(1,2)) / np.prod(patches.shape[1:]) > self._min_fraction
patches = patches[valid_index]
extents = extents[valid_index]
try:
metadata[label] = obj_data.info(label)
except TypeError:
pass
for index in range(0,patches.shape[0]):
new_label = label + ', Square: ' + str(index)
results[new_label] = patches[index, ...]
metadata[new_label] = OrderedDict()
metadata[new_label]['extent'] = extents[index,...]
obj_data.update(results)
obj_data.updateMetadata(metadata) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/image/generate/tile_image.py | 0.77223 | 0.371507 | tile_image.py | pypi |
from skdiscovery.data_structure.framework.base import PipelineItem
from skdiscovery.data_structure.framework import DiscoveryPipeline
from skdiscovery.data_structure.generic.accumulators import DataAccumulator
from skdiscovery.data_structure.table.filters import CalibrateGRACE, Resample, CalibrateGRACEMascon
from skdiscovery.data_structure.framework.stagecontainers import *
from skdaccess.framework.param_class import *
from skdaccess.geo.grace import DataFetcher as GDF
from skdaccess.geo.grace.mascon.cache import DataFetcher as MasconDF
from skdaccess.geo.gldas import DataFetcher as GLDASDF
import numpy as np
class GraceFusion(PipelineItem):
'''
Fuses GRACE equivelent water depth time series
Works on table data (original data from http://grace.jpl.nasa.gov/data/get-data/monthly-mass-grids-land/)
'''
def __init__(self, str_description, ap_paramList, metadata, column_data_name = 'Grace', column_error_name = 'Grace_Uncertainty'):
'''
Initialize Grace Fusion item
@param str_description: String describing item
@param ap_paramList[gldas]: How to use of the global land data assimilation water model
@param ap_paramList[mascons]: Boolean indicating if the mascon solution should be used
@param ap_paramList[apply_scale_factor]: Boolean indicating if the scaling factors shoud be applied
@param metadata: Metadata that contains lat,lon coordinates based on data labels
@param column_data_name: Name of column for GRACE data
@param column_error_name: Grace Uncertainty column name
'''
super(GraceFusion, self).__init__(str_description, ap_paramList)
self.metadata = metadata.copy()
self.column_data_name = column_data_name
self.column_error_name = column_error_name
# remove_sm_and_snow
self._tileCache = None
def process(self, obj_data):
'''
Adds columns for GRACE data and uncertainties
@param obj_data: Input DataWrapper, will be modified in place
'''
# Only perform fusion if data exists
gldas = self.ap_paramList[0]()
use_mascons = self.ap_paramList[1]()
apply_scale_factor = self.ap_paramList[2]()
if obj_data.getLength() > 0:
start_date = None
end_date = None
for label, data in obj_data.getIterator():
try:
lat = self.metadata[label]['Lat']
lon = self.metadata[label]['Lon']
except:
lat = self.metadata.loc[label,'Lat']
lon = self.metadata.loc[label,'Lon']
locations = [(lat,lon)]
if start_date == None:
start_date = data.index[0]
end_date = data.index[-1]
else:
if start_date != data.index[0] \
or end_date != data.index[-1]:
raise RuntimeError("Varying starting and ending dates not supported")
al_locations = AutoList(locations)
al_locations_gldas = AutoList(locations)
if use_mascons == False:
graceDF = GDF([al_locations], start_date, end_date)
else:
graceDF = MasconDF([al_locations], start_date, end_date)
gldasDF = GLDASDF([al_locations_gldas], start_date, end_date)
def getData(datafetcher, pipe_type):
ac_data = DataAccumulator('Data',[])
sc_data = StageContainer(ac_data)
fl_grace = CalibrateGRACE('Calibrate', apply_scale_factor = apply_scale_factor)
sc_grace = StageContainer(fl_grace)
fl_mascon = CalibrateGRACEMascon('CalibrateMascon', apply_scale_factor = apply_scale_factor)
sc_mascon = StageContainer(fl_mascon)
fl_resample = Resample('Resample',start_date, end_date)
sc_resample = StageContainer(fl_resample)
if pipe_type == 'grace':
pipeline = DiscoveryPipeline(datafetcher, [sc_grace, sc_resample, sc_data])
elif pipe_type == 'mascon':
pipeline = DiscoveryPipeline(datafetcher, [sc_mascon, sc_resample, sc_data])
elif pipe_type == 'gldas':
pipeline = DiscoveryPipeline(datafetcher, [sc_resample, sc_data])
else:
raise RuntimeError('pipe_type: ' + str(pipe_type) + ' not understood')
pipeline.run(num_cores=1)
key = list(pipeline.getResults(0)['Data'].keys())[0]
return pipeline.getResults(0)['Data'][key]
# Load GRACE data
if use_mascons == False:
grace_data = getData(graceDF, 'grace')
else:
grace_data = getData(graceDF, 'mascon')
if gldas.lower() == 'off':
# We are not removing sm and snow
obj_data.addColumn(label, self.column_data_name, grace_data['EWD'])
obj_data.addColumn(label, self.column_error_name, grace_data['EWD_Error'])
elif gldas.lower() == 'remove':
# If we are removing sm and snow
gldas_data = getData(gldasDF, 'gldas')
grace = grace_data['Data']
gldas = gldas_data['Data']
grace_index = grace.index
grace.dropna(inplace=True)
# If no grace data available, no need to remove gldas
if len(grace) == 0:
continue
# Get matching start and end
start_grace = grace.index[0]
end_grace = grace.index[-1]
start_gldas = gldas.index[0]
end_gldas = gldas.index[-1]
start_month = np.max([start_gldas,start_grace]).strftime('%Y-%m')
end_month = np.min([end_gldas,end_grace]).strftime('%Y-%m')
# Convert gldas to a data frame
# and save index
# gldas = gldas.loc[:,:,'GLDAS'].copy()
gldas.loc[:,'Date'] = gldas.index
# Index GLDAS data by month
new_index = [date.strftime('%Y-%m') for date in gldas.index]
gldas.loc[:,'Month'] = new_index
gldas.set_index('Month',inplace=True)
# select only months that are also in GRACE
cut_gldas = gldas.loc[[date.strftime('%Y-%m') for date in grace.loc[start_month:end_month,:].index],:]
# index GLDAS data to GRACE dates
cut_gldas.loc[:,'Grace Index'] = grace.loc[start_month:end_month,:].index
cut_gldas.set_index('Grace Index', inplace=True)
# Calculate distance between days
offset_days = cut_gldas.index - cut_gldas.loc[:,'Date']
offset_days = offset_days.apply(lambda t: t.days)
cut_gldas.loc[:,'Offset'] = offset_days
# Remove any data where the difference between gldas and grace are > 10 days
cut_gldas = cut_gldas[np.abs(cut_gldas.loc[:,'Offset']) < 10].copy()
# Select appropriate Grace Data
cut_grace = grace.loc[cut_gldas.index,:]
# Remove contribution of snow + sm to GRACE
cut_grace.loc[:,'Grace'] = cut_grace.loc[:,'Grace'] - cut_gldas['GLDAS']
# Now restore to original index, filling in with NaN's
grace = cut_grace.reindex(grace_index)
# index, place the result back into the
grace_data[key]['Data'] = grace
# All the snow and sm contribution has been removed,
# so the dictionary can now be returned
obj_data.addColumn(label, self.column_data_name, grace_data['EWD'])
obj_data.addColumn(label, self.column_error_name, grace_data['EWD_Error'])
elif gldas.lower() == 'only':
obj_data.addColumn(label, self.column_data_name, ['EWD'])
else:
raise ValueError('Did not understand gldas option: ' + gldas.lower()) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/fusion/grace.py | 0.675122 | 0.286063 | grace.py | pypi |
from skdiscovery.data_structure.framework import PipelineItem
import numpy as np
from sklearn.decomposition import PCA
from sklearn.decomposition import FastICA
class General_Component_Analysis(PipelineItem):
'''
Performs a general component analysis on table data.
Currently, the two built-in types of analysis are either ICA or PCA.
'''
def __init__(self, str_description, ap_paramList, n_components, column_names, **kwargs):
'''
Initialize Analysis object
@param str_description: String description of analysis
@param ap_paramList[component_type]: Type of CA; either PCA or ICA
@param ap_paramList[start_time]: Starting time for CA
@param ap_paramList[end_time]: ending time for CA
@param n_components: Number of components to compute
@param column_names: Columns names to use
@param kwargs: Extra keyword arguments to pass on to ICA (ignored for PCA)
'''
self.str_description = str_description
self.ap_paramList = ap_paramList
self.ap_paramNames = ['component_type','start_time','end_time']
self.n_components = n_components
self.column_names = column_names
self.kwargs = kwargs
self.results = dict()
def process(self, obj_data):
'''
Perform component analysis on data
Results are added to the data wrapper as a dictionary with
results['CA'] = Eigenvenctors
results['Projection'] = Projection on to the eigenvectors
@param obj_data: Data wrapper
'''
component_type = self.ap_paramList[0]()
start_time = self.ap_paramList[1]()
end_time = self.ap_paramList[2]()
num_components = self.n_components
results = dict()
results['start_date'] = start_time
results['end_date'] = end_time
cut_data = []
label_list = []
for label, data in obj_data.getIterator():
for column in self.column_names:
cut_data.append(data.loc[start_time:end_time, column])
label_list.append(label)
cut_data = np.array(cut_data)
if len(cut_data) > 0:
if component_type == 'ICA' :
ca = FastICA(n_components = num_components, **self.kwargs)
else:
ca = PCA(n_components = num_components)
time_projection = ca.fit_transform(cut_data.T)
results['CA'] = ca
results['Projection'] = time_projection
else:
results['CA'] = None
results['Projection'] = None
results['labels'] = label_list
obj_data.addResult(self.str_description, results) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/analysis/gca.py | 0.847021 | 0.349089 | gca.py | pypi |
# 3rd part imports
import numpy as np
import pandas as pd
from scipy.optimize import brute
from fastdtw import fastdtw
# scikit discovery imports
from skdiscovery.data_structure.framework import PipelineItem
from skdiscovery.utilities.patterns import trend_tools as tt
# Standard library imports
from collections import OrderedDict
class RotatePCA(PipelineItem):
"""
*** In Development *** Class for rotating PCA to seperate superimposed signals
"""
def __init__(self, str_description, ap_paramList, pca_name, model, norm=None, num_components=3):
'''
@param str_description: String description of this item
@param ap_paramList[fit_type]: Fitness test to use (either 'dtw' or 'remove')
@param ap_paramList[resolution]: Fitting resolution when using brute force
@param pca_name: Name of pca results
@param model: Model to compare to (used in dtw)
@param norm: Normalization to use when comparing data and model (if None, absolute differences are used)
@param num_components: Number of pca components to use
'''
self._pca_name = pca_name
self._model = tt.normalize(model)
self.norm = norm
if num_components not in (3,4):
raise NotImplementedError('Only 3 or 4 components implemented')
self.num_components = num_components
super(RotatePCA, self).__init__(str_description, ap_paramList)
def _rotate(self, col_vector, az, ay, ax):
'''
Rotate column vectors in three dimensions
Rx * Ry * Rz * col_vectors
@param col_vector: Data as a column vector
@param az: Z angle
@param ay: Y angle
@param ax: X angle
@return rotated column vectors
'''
rz = np.array([[np.cos(az), -np.sin(az), 0], [np.sin(az), np.cos(az), 0], [0, 0, 1]])
ry = np.array([[np.cos(ay), 0, np.sin(ay)], [0, 1, 0], [-np.sin(ay), 0, np.cos(ay)]])
rx = np.array([[ 1, 0, 0], [0, np.cos(ax), -np.sin(ax)], [0, np.sin(ax), np.cos(ax)]])
rot = rx @ ry @ rz
return rot @ col_vector
def _rotate4d(self, col_vector, rot_angles):
'''
Rotate column vectors in four dimensions
@param col_vector: Data as a column vector
@param rot_angles: Rotation angles ('xy', 'yz', 'zx', 'xw', 'yw', 'zw')
@return rotated column vectors
'''
index_list = []
index_list.append([0,1])
index_list.append([1,2])
index_list.append([0,2])
index_list.append([0,3])
index_list.append([1,3])
index_list.append([2,3])
# Two different types:
# left sine is negative: Type 0
# right sine is negative: Type 1
type_list = [0, 0, 1, 0, 1, 1]
rotation_dict = OrderedDict()
# The order of the rotation matrix is as follows:
# (see https://hollasch.github.io/ray4/Four-Space_Visualization_of_4D_Objects.html#s2.2)
label_list = ['xy', 'yz', 'zx', 'xw', 'yw', 'zw']
for angle, label, index, negative_type in zip(rot_angles, label_list, index_list, type_list):
ct = np.cos(angle)
st = np.sin(angle)
rotation_matrix = np.eye(4)
rotation_matrix[index[0], index[0]] = ct
rotation_matrix[index[1], index[1]] = ct
rotation_matrix[index[0], index[1]] = st
rotation_matrix[index[1], index[0]] = st
if negative_type == 0:
rotation_matrix[index[1], index[0]] *= -1
elif negative_type == 1:
rotation_matrix[index[0], index[1]] *= -1
else:
raise RuntimeError('Invalid value of negative_type')
rotation_dict[label]=rotation_matrix
rot_matrix = np.eye(4)
for label, matrix in rotation_dict.items():
rot_matrix = rot_matrix @ matrix
return rot_matrix @ col_vector
def _rollFastDTW(self, data, centered_tiled_model, model_size):
'''
Compute minimum fastdtw distance for a model to match length of real data at all possible phases
@param data: Real input data
@param centered_tiled_model: Model after being tiled to appropriate length and normalized (mean removed an scaled by standard devation)
@param model_size: Size of the original model (before tiling)
@return Index of minimum distance, minimum distance
'''
centered_data = tt.normalize(data)
fitness_values = [fastdtw(centered_data, np.roll(centered_tiled_model, i), dist=self.norm)[0] for i in range(model_size)]
min_index = np.argmin(fitness_values)
return min_index, fitness_values[min_index]
def _tileModel(self, in_model, new_size):
'''
Tile a model to increase its length
@param in_model: Input model
@param new_size: Size of tiled model
@return Tiled model
'''
num_models = int(np.ceil(new_size / len(in_model)))
return np.tile(in_model, num_models)[:new_size]
def _fitness(self, z, data, model, fit_type = 'dtw', num_components=3):
'''
Compute fitness of data given a model and rotation
@param z: Rotation angles
@param data: Input data
@param model: Input model
@param fit_type: Choose fitness computation between dynamic time warping ('dtw') or
by comparing to an seasonal and linear signal ('remove')
@param num_components: Number of pca components to use. Can be 3 or 4 for fit_type='dtw'
or 3 for fit_type='remove'
@return fitness value
'''
if num_components == 3:
new_data = self._rotate(data.as_matrix().T, *z)
elif num_components == 4:
new_data = self._rotate4d(data.as_matrix().T, z)
if fit_type == 'dtw':
return self._fitnessDTW(new_data, model, num_components)
elif fit_type == 'remove' and num_components == 3:
return self._fitnessRemove(pd.DataFrame(new_data.T, columns=['PC1','PC2','PC3'],
index=data.index))
elif fit_type == 'remove':
raise NotImplementedError("The 'remove' fitness type only works with 3 components")
else:
raise NotImplementedError('Only "dtw" and "remove" fitness types implemented')
def _fitnessDTW(self, new_data, model, num_components=3):
'''
Compute fitness value using dynamic time warping
@param new_data: Input data
@param model: Input model
@param: Number of pca components to use (3 or 4)
@return fitness value using dynamic time warping
'''
tiled_model = tt.normalize(self._tileModel(model, new_data.shape[1]))
roll, primary_results = self._rollFastDTW(new_data[num_components-1,:], tiled_model, len(model))
# pc1_results = np.min([fastdtw(tt.normalize(new_data[0,:]), np.roll(tiled_model, roll))[0],
# fastdtw(-tt.normalize(-new_data[0,:]), np.roll(tiled_model, roll))[0]])
# pc2_results = np.min([fastdtw(tt.normalize(new_data[1,:]), np.roll(tiled_model, roll))[0],
# fastdtw(tt.normalize(-new_data[1,:]), np.roll(tiled_model, roll))[0]])
other_pc_results = 0
for i in range(num_components-1):
other_pc_results += self._rollFastDTW(new_data[i,:], tiled_model, len(model))[1]
return primary_results - other_pc_results
def _fitnessRemove(self, new_data):
'''
fitness value determined by how well seasonal and linear signals can be removed frm first two components
@param new_data: Input data
@return fitness value determined by comparison of first two components to seasonal and linear signals
'''
linear_removed = tt.getTrend(new_data['PC1'].asfreq('D'))[0]
annual_removed = tt.sinuFits(new_data['PC2'].asfreq('D'), 1, 1)
return linear_removed.var() + annual_removed.var()
def process(self, obj_data):
'''
Compute rotation angles for PCA
@param obj_data: Input table data wrapper
'''
fit_type = self.ap_paramList[0]()
resolution = self.ap_paramList[1]()
pca_results = obj_data.getResults()[self._pca_name]
date_range = pd.date_range(pca_results['start_date'], pca_results['end_date'])
column_names = ['PC' + str(i+1) for i in range(pca_results['CA'].n_components)]
pca = pd.DataFrame(data = pca_results['Projection'], index = date_range, columns=column_names)
pca.index.name='Date'
pca = pca.loc[:,['PC' + str(i+1) for i in range(self.num_components)]]
end_point = 360 - (360/resolution)
if self.num_components == 3:
num_ranges = 3
elif self.num_components == 4:
num_ranges = 4
else:
raise ValueError('Wrong number of components')
ranges = []
for i in range(num_ranges):
ranges.append((0, np.deg2rad(end_point)))
new_angles = brute(func=self._fitness,
ranges=ranges,
Ns=resolution,
args=(pca, self._model, fit_type, self.num_components))
final_score = self._fitness(new_angles, pca, self._model, fit_type, self.num_components)
rotated_pcs = pd.DataFrame(self._rotate(pca.T, *new_angles).T, index=pca.index, columns = pca.columns)
results = OrderedDict()
results['rotation_angles'] = new_angles
results['rotated_pcs'] = rotated_pcs
results['final_score'] = final_score
results['rotated_components'] = self._rotate(pca_results['CA'].components_, *new_angles)
obj_data.addResult(self.str_description, results) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/analysis/rotate_pca.py | 0.761006 | 0.525125 | rotate_pca.py | pypi |
from skdiscovery.data_structure.framework.base import PipelineItem
import numpy as np
import pandas as pd
from statsmodels.robust import mad
class MIDAS(PipelineItem):
'''
*In Development* A basic MIDAS trend estimator
See http://onlinelibrary.wiley.com/doi/10.1002/2015JB012552/full
'''
def __init__(self, str_description,column_names = None):
'''
Initiatlize the MIDAS filtering item
@param str_description: String description of filter
@param column_names: List of column names to analyze
'''
super(MIDAS, self).__init__(str_description, [])
self.column_names = column_names
def process(self, obj_data):
'''
Apply the MIDAS estimator to generate velocity estimates
Adds the result to the data wrapper
@param obj_data: Data wrapper
'''
if self.column_names == None:
column_names = obj_data.getDefaultColumns()
else:
column_names = self.column_names
time_diff = pd.to_timedelta('365d')
results = dict()
for label, data in obj_data.getIterator():
start_date = data.index[0]
end_date = data.index[-1]
for column in column_names:
start_data = data.loc[start_date:(end_date-time_diff), column]
end_data = data.loc[start_date+time_diff:end_date, column]
offsets = end_data.values - start_data.values
offsets = offsets[~np.isnan(offsets)]
med_off = np.median(offsets)
mad_off = mad(offsets)
cut_offsets = offsets[np.logical_and(offsets < med_off + 2*mad_off,
offsets > med_off - 2*mad_off)]
final_vel = np.median(cut_offsets)
final_unc = np.sqrt(np.pi/2) * mad(cut_offsets) / np.sqrt(len(cut_offsets))
results[label] = pd.DataFrame([final_vel,final_unc], ['velocity', 'uncertainty'] ,[column])
obj_data.addResult(self.str_description, pd.Panel.fromDict(results,orient='minor')) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/analysis/midas.py | 0.841109 | 0.362743 | midas.py | pypi |
from skdiscovery.data_structure.framework import PipelineItem
import pandas as pd
import numpy as np
class Correlate(PipelineItem):
'''
Computes the correlation for table data and stores the result as a matrix.
'''
def __init__(self, str_description, column_names = None, local_match = False, correlation_type = 'pearson'):
'''
Initialize Correlate analysis item for use on tables
@param str_description: String describing analysis item
@param column_names: List of column names to correlate
@param local_match: Only correlate data on the same frames
@param correlation_type: Type of correlation to be passed to pandas ('pearson', 'kendall', 'spearman')
'''
super(Correlate, self).__init__(str_description,[])
self.column_names = column_names
self.local_match = local_match
self.corr_type = correlation_type
def process(self, obj_data):
'''
Computes the correlation between columns and stores the results in obj_data
@param obj_data: Data wrapper
'''
if self.column_names == None:
column_names = obj_data.getDefaultColumns()
else:
column_names = self.column_names
if self.local_match == False:
data = []
index = []
for label, data_in in obj_data.getIterator():
for column in column_names:
data.append(data_in[column])
index.append(label + '.' + column)
index = np.array(index)
result = []
for s1 in data:
row = []
for s2 in data:
row.append(s1.corr(s2, method=self.corr_type))
result.append(row)
obj_data.addResult(self.str_description, pd.DataFrame(result, index=index, columns=index))
else:
full_results = dict()
for label, data_in in obj_data.getIterator():
data = []
index = []
for column in column_names:
data.append(data_in[column])
index.append(column)
result = []
for s1 in data:
row = []
for s2 in data:
row.append(s1.corr(s2, method=self.corr_type))
result.append(row)
full_results[label] = pd.DataFrame(result,index=index,columns=index)
obj_data.addResult(self.str_description, pd.Panel.from_dict(full_results)) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/analysis/correlate.py | 0.708414 | 0.39158 | correlate.py | pypi |
import collections
import numpy as np
import scipy.optimize as optimize
import skdaccess.utilities.pbo_util as pbo_utils
from skdiscovery.data_structure.framework import PipelineItem
import skdiscovery.utilities.patterns.pbo_tools as pbo_tools
from skdiscovery.utilities.patterns.pbo_tools import SourceWrapper, MogiVectors
class Mogi_Inversion(PipelineItem):
'''
Perform a mogi source inversion on a set of gps table data
The source is assumed to be a mogi source (point source), but other source models can be selected.
Assumes directions are named ('dN', 'dE', 'dU').
'''
def __init__(self, str_description, ap_paramList, pca_name, column_names=['dN', 'dE', 'dU']):
'''
Initialize Mogi analysis item
@param str_description: Description of item
@param ap_paramList[source_type]: Type of magma chamber source model to use (default-mogi,finite_sphere,closed_pipe,constant_open_pipe,rising_open_pipe,sill)
@param pca_name: Name of pca result
@param column_names: The data direction column names
'''
self.pca_name = pca_name
self.column_names = column_names
super(Mogi_Inversion, self).__init__(str_description, ap_paramList)
self.ap_paramNames = ['source_type']
def FitPCA(self, hPCA_Proj):
'''
Determine the timing of the inflation event from the first component of the pca projection
fits A * arctan( (t - t0) / c ) + B to the first pca projection, in order to estimate
source amplitude parameters
@param hPCA_Proj: The sklearn PCA
@return ct: the t0, c, and B parameters from the fit
@return pA[0]: the fitted amplitude parameter
'''
fitfunc = lambda p,t: p[0]*np.arctan((t-p[1])/p[2])+p[3]
errfunc = lambda p,x,y: fitfunc(p,x) - y
dLen = len(hPCA_Proj[:,0])
pA, success = optimize.leastsq(errfunc,[1.,dLen/2.,1.,0.],args=(np.arange(dLen),hPCA_Proj[:,0]))
ct = pA[1:3]
return ct, pA[0]
def FitTimeSeries(self, pd_series, ct):
'''
Fits the amplitude and offset of an inflation event given the time and length of the event
Fits A and B in A * arctan( (t - t0) / c) + B
@param pd_series: Time series to be fit
@param ct: the time constants for the arctan
@return res: Amplitude of the fit
@return perr_leastsq: Error of the fit
'''
fitfunc2 = lambda p,c,t: p[0]*np.arctan((t-c[0])/c[1])+p[1]
errfunc2 = lambda p,c,x,y: fitfunc2(p,c,x) - y
dLen = len(pd_series)
pA, pcov = optimize.leastsq(errfunc2,[1.,0.],args=(ct,np.arange(dLen),pd_series))
# res = fitfunc2(pA,ct,np.arange(dLen))[-1]-fitfunc2(pA,ct,np.arange(dLen))[0]
res = pA[0]*np.pi
s_sq = (errfunc2(pA,ct,np.arange(dLen),pd_series)**2).sum()/(len(pd_series)-2)
pcov = pcov * s_sq
error = []
for i in range(len(pA)):
try:
error.append(np.absolute(pcov[i][i])**0.5)
except:
error.append( 0.00 )
perr_leastsq = np.array(error)
return res, perr_leastsq
def process(self, obj_data):
'''
Finds the magma source (default-mogi) from PBO GPS data.
Assumes time series columns are named ('dN', 'dE', 'dU'). Predicts the
location of the magma source using scipy.optimize.curve_fit
The result is added to the data wrapper as a list, with the four
elements describing the location of the magma source:
res[0] = latitude
res[1] = longitude
res[2] = source depth (km)
res[3] = volume change (meters^3)
@param obj_data:
'''
h_pca_name = self.pca_name
exN = {'mogi':0,'finite_sphere':1,'closed_pipe':1,'constant_open_pipe':1,'rising_open_pipe':1,'sill':0}
try:
mag_source = getattr(pbo_tools,self.ap_paramList[0]().lower())
ExScParams = tuple(np.ones((exN[self.ap_paramList[0]().lower()],)))
except:
mag_source = pbo_tools.mogi
ExScParams = ()
print('No source type called '+self.ap_paramList[0]()+', defaulting to a Mogi source.')
wrapped_mag_source = SourceWrapper(mag_source)
projection = obj_data.getResults()[h_pca_name]['Projection']
start_date = obj_data.getResults()[h_pca_name]['start_date']
end_date = obj_data.getResults()[h_pca_name]['end_date']
ct, pca_amp = self.FitPCA(projection)
pca_amp *= np.pi
xvs = []
yvs = []
zvs = []
label_list = []
for label, data in obj_data.getIterator():
label_list.append(label)
for column in self.column_names:
distance,f_error = self.FitTimeSeries(data.loc[start_date:end_date,column], ct)
if column == self.column_names[1]:
xvs.append(distance)
elif column == self.column_names[0]:
yvs.append(distance)
elif column == self.column_names[2]:
zvs.append(distance)
else:
print('Ignoring column: ', label)
xvs = np.array(xvs)*1e-6
yvs = np.array(yvs)*1e-6
zvs = np.array(zvs)*1e-6
ydata = np.hstack((xvs, yvs,zvs)).T
station_list = obj_data.get().keys()
meta_data = obj_data.info()
station_coords = pbo_utils.getStationCoords(meta_data, station_list)
dimensions = ('x','y','z')
xdata = []
for dim in dimensions:
for coord in station_coords:
xdata.append((dim, coord[0], coord[1]))
coord_range = np.array(pbo_utils.getLatLonRange(meta_data, station_list))
lat_guess = np.mean(coord_range[0,:])
lon_guess = np.mean(coord_range[1,:])
fit = optimize.curve_fit(wrapped_mag_source, xdata, ydata, (lat_guess, lon_guess, 5, 1e-4)+ExScParams)
res = collections.OrderedDict()
res['lat'] = fit[0][0]
res['lon'] = fit[0][1]
res['depth'] = fit[0][2]
res['amplitude'] = fit[0][3]
res['labels'] = label_list
if len(fit[0])>4:
res['ex_params'] = fit[0][4:]
else:
res['ex_params'] = np.nan
res['pca_amplitude'] = pca_amp
res['source_type'] = self.ap_paramList[0]().lower()
obj_data.addResult(self.str_description, res)
# lat_fit_range = (np.min(lat_list)-0.15, np.max(lat_list)+0.15)
# lon_fit_range = (np.min(lon_list)-0.15, np.max(lon_list)+0.15)
# res = optimize.brute(self.mogi, (lat_fit_range, lon_fit_range,
# (1,10), (1e-5, 1e-3)),
# args = (xvs*1e-6, yvs*1e-6, zvs*1e-6,
# station_list, meta_data)) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/analysis/mogi.py | 0.612657 | 0.453927 | mogi.py | pypi |
from skdiscovery.data_structure.framework import PipelineItem
import numpy as np
import matplotlib.pyplot as plt
import math
class Plotter(PipelineItem):
'''
Make a plot of table data
'''
def __init__(self, str_description, column_names=None, error_column_names = None, num_columns = 3, width=13, height=4, columns_together=False,
annotate_column = None, annotate_data = None, xlim = None, ylim = None, **kwargs):
'''
Initialize Plotter
@param str_description: String describing accumulator
@param column_names: Columns to be plot
@param error_column_names Columns containing uncertainties to be plot, no errorbars if None
@param num_columns: Number of columns to use when plotting data
@param width: Total width of all columns combined
@param height: Height of single row of plots
@param columns_together: If true, plot the columns on the same graph
@param annotate_column: Column of annotation data to use for annotation
@param annotate_data: Annotation data
@param xlim: The x limit
@param ylim: The y limit
@param **kwargs: Any additional keyword arguments are passed on to matplotlib
'''
self.xlim = xlim
self.ylim = ylim
self.kwargs = kwargs
self.num_columns = num_columns
self.height = height
self.width = width
self.column_names = column_names
self.annotate_column = annotate_column
self.annotate_data = annotate_data
self.error_column_names = error_column_names
self.columns_together = columns_together
super(Plotter, self).__init__(str_description, [])
def process(self, obj_data):
'''
Plot each column in obj_data
@param obj_data: Data Wrapper
'''
if self.column_names == None:
column_names = obj_data.getDefaultColumns()
else:
column_names = self.column_names
width = self.width
height = self.height
# Determine total number of figures needed
if self.columns_together == False:
num_figures = obj_data.getLength() * len(column_names)
else:
num_figures = obj_data.getLength()
if num_figures > 0:
# Determine number of rows and height needed to plot all figures
rows = math.ceil( num_figures / self.num_columns)
height *= rows
figure = plt.figure()
figure.set_size_inches(width, height, True)
if self.xlim != None:
plt.xlim(*self.xlim)
if self.ylim != None:
plt.ylim(*self.ylim)
num = 0
# Main loop that iterates over all data
for label, data in obj_data.getIterator():
if self.columns_together == True:
num += 1
# Plotting with errorbars
if self.error_column_names != None:
for column, err_column in zip(column_names, self.error_column_names):
if self.columns_together == False:
num += 1
plt.subplot(rows, self.num_columns, num)
plt.title(label)
plt.ylabel(column)
plt.xticks(rotation=45)
plt.errorbar(np.array(data.index),np.array(data[column]), yerr=np.array(data[err_column]), **self.kwargs)
if self.annotate_column is not None:
try:
for vline in self.annotate_data[label][self.annotate_column]:
plt.axvline(vline,color='black',linewidth=3,alpha=0.5)
except KeyError:
pass
# print('cannot find info')
elif self.annotate_data is not None:
try:
for vline in self.annotate_data[label]:
plt.axvline(vline,color='black',linewidth=3,alpha=0.5)
except KeyError:
pass
# print('cannot find info')
# Plotting without errorbars
else:
for column in column_names:
if self.columns_together == False:
num += 1
plt.subplot(rows, self.num_columns, num)
plt.title(label)
plt.ylabel(column)
plt.xticks(rotation=45)
plt.plot(data[column], **self.kwargs)
if self.annotate_column is not None:
try:
for vline in self.annotate_data[label][self.annotate_column]:
plt.axvline(vline,color='black',linewidth=3,alpha=0.5)
except KeyError:
pass
elif self.annotate_data is not None:
try:
for vline in self.annotate_data[label]:
plt.axvline(vline,color='black',linewidth=3,alpha=0.5)
except KeyError:
pass
# Tight layout usually dispalys nicer
plt.tight_layout()
# If run_id is > -1, display run number on figure
if(obj_data.run_id > -1):
figure.suptitle( "Run: " + str(obj_data.run_id), y=1.02) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/accumulators/plotter.py | 0.842669 | 0.441312 | plotter.py | pypi |
# Framework import
from skdiscovery.data_structure.framework.base import PipelineItem
# 3rd party libraries import
import pandas as pd
class CombineColumns(PipelineItem):
'''
Create a new column by selecting data from a column
Fills in any missing values using a second column
'''
def __init__(self, str_description, column_1, column_2, new_column_name):
'''
Initialize a CombineColumns object
@param str_description: String describing filter
@param column_1: Name of primary column
@param column_2: Name of secondary column to be used
when data from the primary column is not avaiable
@param new_column_name: Name of resulting column
'''
self.column_1 = column_1
self.column_2 = column_2
self.new_column_name = new_column_name
super(CombineColumns,self).__init__(str_description)
def process(self, obj_data):
'''
Apply combine column filter to data set, operating on the data_obj
@param obj_data: Table data wrapper.
'''
for label, data in obj_data.getIterator():
if self.column_1 in data.columns and self.column_2 in data.columns:
# replacing all null median data with mean data
col1_null_index = pd.isnull(data.loc[:,self.column_1])
data.loc[:,self.new_column_name] = data.loc[:,self.column_1]
# Check if there is any replacement data available
if (~pd.isnull(data.loc[col1_null_index, self.column_2])).sum() > 0:
data.loc[col1_null_index, self.new_column_name] = data.loc[col1_null_index, self.column_2]
elif self.column_2 in data.columns and self.column_1 not in data.columns:
data.loc[:,self.new_column_name] = data.loc[:,self.column_2]
elif self.column_2 not in data.columns and self.column_1 in data.columns:
data.loc[:,self.new_column_name] = data.loc[:,self.column_1]
else:
raise KeyError('data needs either "' + self.column_2 + '" or "' + self.column_1 + '" or both') | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/filters/combine_columns.py | 0.641198 | 0.498047 | combine_columns.py | pypi |
import numpy as np
import pandas as pd
from skdiscovery.data_structure.framework import PipelineItem
from skdiscovery.utilities.patterns import kalman_smoother
class KalmanFilter(PipelineItem):
'''
Runs a forward and backward Kalman Smoother with a FOGM state on table data
For more information see: Ji, K. H. 2011, PhD thesis, MIT, and
Fraser, D. C., and Potter, J. E. 1969, IEEE Trans. Automat. Contr., Acl4, 4, 387-390
'''
def __init__(self, str_description, ap_paramList, uncertainty_clip=5, column_names=None,
error_column_names = None, fillna=True):
'''
Initialize Kalman Smoother
@param str_description: String describing filter
@param ap_paramList[ap_tau]: the correlation time
@param ap_paramList[ap_sigmaSq]: the data noise
@param ap_paramList[ap_R]: the process noise
@param uncertainty_clip: Clip data with uncertainties greater than uncertainty_clip * median uncertainty
@param column_names: List of column names to smooth (using None will apply to all columns)
@param error_column_names: List of error column names to smooth (using None will use default error columns)
@param fillna: Fill in missing values
'''
super(KalmanFilter, self).__init__(str_description, ap_paramList)
self.uncertainty_clip = uncertainty_clip
self.ap_paramNames = ['Tau','SigmaSq','R']
self.column_names = column_names
self.error_column_names = error_column_names
self.fillna = fillna
def process(self, obj_data):
'''
Apply kalman smoother to data set
@param obj_data: Input data. Changes are made in place.
'''
uncertainty_clip = self.uncertainty_clip
ap_tau = self.ap_paramList[0]()
ap_sigmaSq = self.ap_paramList[1]()
ap_R = self.ap_paramList[2]()
if self.column_names is None:
column_names = obj_data.getDefaultColumns()
else:
column_names = self.column_names
if self.error_column_names is None:
error_column_names = obj_data.getDefaultErrorColumns()
else:
error_column_names = self.error_column_names
for label, dataframe in obj_data.getIterator():
for column, error_column in zip(column_names, error_column_names):
data = dataframe.loc[:,column].copy()
err = dataframe.loc[:,error_column].copy()
# Clip data with high uncertainties
data.loc[np.logical_and(~pd.isnull(err), err > np.nanmedian(err) * uncertainty_clip)] = np.nan
# clip = np.nanmedian(err) * uncertainty_clip
err.loc[np.logical_and(~pd.isnull(err), err > np.nanmedian(err) * uncertainty_clip)] = np.nan
# If the beginning is missing data, the smoother will diverge
if np.sum(~np.isnan(data.iloc[:20])) == 0:
data.iloc[:2] = np.nanmedian(data)
if ap_R == 'formal':
R = err
else:
R = ap_R
# Smooth the data
smoothed, variance, t, sigma_sq, R = kalman_smoother.KalmanSmoother(data,
t = ap_tau,
sigma_sq = ap_sigmaSq,
R = R)
# Set uncertainties for missing data to those estimated from
# the filter.
err.loc[pd.isnull(err)] = variance[pd.isnull(err)]
# Calculate the sample variance
T = len(data)
r = np.exp(-1 / t)
sample_var = sigma_sq * (T / (T - 1)) * ( 1 - ((1+r) / (T * (1-r))) + ((2*r*(1-r**T)) / (T**2 * (1-r)**2)))
if self.fillna == True:
obj_data.updateData(label, data.index, error_column, np.sqrt(err**2 + sample_var))
obj_data.updateData(label, data.index, column, smoothed)
else:
obj_data.updateData(label, dataframe.loc[:,error_column].dropna().index, error_column, np.sqrt(err**2 + sample_var))
obj_data.updateData(label, dataframe.loc[:,column].dropna().index, column, smoothed)
def _applyKalman(self,label_dataframe,obj_data,run_Params):
column_names = run_Params[0]
error_column_names = run_Params[1]
uncertainty_clip = run_Params[2]
ap_tau = run_Params[3]
ap_sigmaSq = run_Params[4]
ap_R = run_Params[5]
label = label_dataframe[0]
dataframe = label_dataframe[1]
result = {label:dict()}
for column, error_column in zip(column_names, error_column_names):
data = dataframe.loc[:,column].copy()
err = dataframe.loc[:,error_column].copy()
# Clip data with high uncertainties
data.loc[np.logical_and(~pd.isnull(err), err > np.nanmedian(err) * uncertainty_clip)] = np.nan
# clip = np.nanmedian(err) * uncertainty_clip
err.loc[np.logical_and(~pd.isnull(err), err > np.nanmedian(err) * uncertainty_clip)] = np.nan
# If the beginning is missing data, the smoother will diverge
if np.sum(~np.isnan(data.iloc[:20])) == 0:
data.iloc[:2] = np.nanmedian(data)
if ap_R == 'formal':
R = err
else:
R = ap_R
# Smooth the data
smoothed, variance, t, sigma_sq, R = kalman_smoother.KalmanSmoother(data,
t = ap_tau,
sigma_sq = ap_sigmaSq,
R = R)
# Set uncertainties for missing data to those estimated from
# the filter.
err.loc[pd.isnull(err)] = variance[pd.isnull(err)]
# Calculate the sample variance
T = len(data)
r = np.exp(-1 / t)
sample_var = sigma_sq * (T / (T - 1)) * ( 1 - ((1+r) / (T * (1-r))) + ((2*r*(1-r**T)) / (T**2 * (1-r)**2)))
if obj_data != None:
obj_data.updateData(label, data.index, error_column, np.sqrt(err**2 + sample_var))
obj_data.updateData(label, data.index, column, smoothed)
if obj_data == None:
result[label]['index'] = data.index
result[label][error_column] = np.sqrt(err**2 + sample_var)
result[label][column] = smoothed
if obj_data == None:
return result, label | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/filters/kalman.py | 0.688678 | 0.500244 | kalman.py | pypi |
from skdiscovery.data_structure.framework import PipelineItem
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
class OffsetDetrend(PipelineItem):
'''
Trend filter that fits a stepwise function to linearly detrended table data
On detrended data this filter fits a stepwise function (number of
steps provided by the user) to correct the linear fit by
accounting for discontinuous offsets, such as due to a change in
the antenna or from an earthquake. The final linear fit handles
each portion of the offset independently. If the number of
discontinuities is not provided as an autoparam, the filter
assumes a single discontinuity.
'''
def __init__(self, str_description, column_names, ap_paramList = [], labels=None, time_point=None, time_interval=None):
'''
Initialize OffsetDetrend filter for use on table data
@param str_description: String describing filter
@param column_names: List of column names to select data to be removed (using None will apply to all columns)
@param ap_paramList[step_count]: Number of steps to remove from data (Default: 1)
@param labels: List of labels used to select data to be removed (using None will apply to all labels)
@param time_point: Time of offset
@param time_interval: Interval within which the offset occurs
'''
self.labels = labels
self.column_names = column_names
self.time_point = time_point
if time_interval == None:
self.time_interval = [-500,500]
else:
if type(time_interval) == int:
self.time_interval = [-time_interval,time_interval]
else:
self.time_interval = time_interval
self.ap_paramNames = ['step_count']
super(OffsetDetrend, self).__init__(str_description, ap_paramList)
def process(self, obj_data):
'''
Apply offset estimation and detrending filter to data set.
@param obj_data: Input data. Changes are made in place.
'''
labels = self.labels
column_names = self.column_names
# user provided number of steps/offsets in the data
step_count = 1
if len(self.ap_paramList) != 0:
step_count = self.ap_paramList[0]()
for label, data in obj_data.getIterator():
for column in column_names:
if (labels is None or label in labels):
# keep track of the time index and the location of nan's
tindex = data.index
reind = np.array(np.isnan(data))
# a temporary time index and data array without nan's
nts = np.arange(len(data))
nts = np.delete(nts,nts[reind])
nys = data[reind==False]
# Decision Tree Regressor for finding the discontinuities
regr_1 = DecisionTreeRegressor(max_depth=step_count)
if self.time_point == None:
regr_1.fit(nts[:,np.newaxis], nys)
else:
# make time_point (a string) into an index
time_point = np.where(tindex==self.time_point)[0][0]
regr_1.fit(nts[(time_point+self.time_interval[0]):(time_point+self.time_interval[1]),np.newaxis],
nys[(time_point+self.time_interval[0]):(time_point+self.time_interval[1])])
r1 = regr_1.predict(nts[:,np.newaxis])
# offset the discontinuity to be continous and fit a single line
# (using median of 5 points on either side of discontinuity)
nys[r1==r1[-1]] += np.median(nys[r1==r1[0]][-5:-1]) - np.median(nys[r1==r1[-1]][0:5])
z3 = np.polyfit(nts, nys, 1)
# make the data into a pd series and correctly index
x3 = pd.Series(data=nys-(z3[0]*nts+z3[1]),index=tindex[reind==False])
x3 = x3.reindex(tindex)
# and then use that to update in place
obj_data.updateData(label, x3.index, column, x3) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/filters/offset_detrend.py | 0.742422 | 0.626153 | offset_detrend.py | pypi |
from skdiscovery.data_structure.framework import PipelineItem
from skdiscovery.utilities.patterns import trend_tools
class MedianFilter(PipelineItem):
'''
A Median filter for table data
'''
def __init__(self, str_description, ap_paramList, interpolate=True,
subtract = False,regular_period=True, min_periods=1):
'''
Initialize MedianFilter
@param str_description: String describing filter
@param ap_paramList[ap_window]: median filter window width
@param interpolate: Interpolate data points before filtering
@param subtract: Subtract filtered result from original
@param regular_period: Assume the data is regularly sampled
@param min_periods: Minimum required number of data points in window
'''
self.interpolate = interpolate
self.subtract = subtract
self.ap_paramNames = ['windowSize']
self.regular_period = regular_period
self.min_periods = min_periods
super(MedianFilter, self).__init__(str_description, ap_paramList)
def process(self, obj_data):
'''
Apply median filter to data set
@param obj_data: Input panda's data series. Changes are made in place.
'''
ap_window = self.ap_paramList[0]()
column_names = obj_data.getDefaultColumns()
for label, data in obj_data.getIterator():
for column in column_names:
if self.interpolate == True or self.regular_period == False:
result = trend_tools.medianFilter(data[column], ap_window, self.interpolate)
else:
result = data[column].rolling(ap_window,min_periods=self.min_periods, center=True).median()
if self.subtract == True:
obj_data.updateData(label, data.index, column, data[column] - result)
else:
obj_data.updateData(label, data.index, column, result) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/filters/median.py | 0.897395 | 0.328812 | median.py | pypi |
from skdiscovery.data_structure.framework.base import PipelineItem
import numpy as np
class WeightedAverage(PipelineItem):
''' This filter performs a rolling weighted average using standard deviations as weight '''
def __init__(self, str_description, ap_paramList, column_names, std_dev_column_names=None, propagate_uncertainties=False):
'''
Initializes a WeightedAverage object
@param str_description: String describing filter
@param ap_paramList[window]: Window to use for computing rolling weighted average
@param column_names: Names of columns to apply the weighted average
@param std_dev_column_names: Names of columns of the standard deviations. If none a regular mean is computed.
@param propagate_uncertainties: Propagate uncertainties assuming uncorrelated errors
'''
super(WeightedAverage,self).__init__(str_description, ap_paramList)
self.column_names = column_names
self.std_dev_column_names = std_dev_column_names
self.propagate_uncertainties = propagate_uncertainties
def process(self, obj_data):
'''
Apply the moving (weighted) average filter to a table data wrapper.n
Changes are made in place.
@param obj_data: Input table data wrapper
'''
window = self.ap_paramList[0]()
for label, data in obj_data.getIterator():
if self.std_dev_column_names != None:
for column, std_dev_column in zip(self.column_names,
self.std_dev_column_names):
weights = 1 / data[std_dev_column]**2
weighted_data = data[column] * weights
scale = weights.rolling(window=window,center=True, min_periods=1).sum()
weighted_average = weighted_data.rolling(window=window, center=True, min_periods=1).sum() / scale
obj_data.updateData(label, weighted_average.index, column,weighted_average)
if self.propagate_uncertainties:
# Uncertainty determined using the standard error propagation technique
# Assumes data is uncorrelated
uncertainty = 1 / np.sqrt(scale)
obj_data.updateData(label, uncertainty.index, std_dev_column, uncertainty)
else:
for column in self.column_names:
weighted_average = data[column].rolling(window=window, center=True, min_periods=1).mean()
obj_data.updateData(label,weighted_average.index,column,weighted_average) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/filters/weighted_average.py | 0.907093 | 0.369941 | weighted_average.py | pypi |
import pandas as pd
from skdiscovery.data_structure.framework import PipelineItem
from skdiscovery.utilities.patterns import trend_tools
class TrendFilter(PipelineItem):
'''
Trend Filter that removes linear and sinusoidal (annual, semi-annual) trends on series data.
Works on table data
'''
def __init__(self, str_description, ap_paramList, columns = None):
'''
Initialize Trend Filter
@param str_description: String describing filter
@param ap_paramList[list_trendTypes]: List of trend types. List can contain "linear", "annual", or "semiannual"
@param columns: List of column names to filter
'''
super(TrendFilter, self).__init__(str_description, ap_paramList)
self.columns = columns
self.ap_paramNames = ['trend_list']
def process(self, obj_data):
'''
Apply trend filter to data set.
@param obj_data: Input data. Changes are made in place.
'''
if self.columns == None:
column_names = obj_data.getDefaultColumns()
else:
column_names = self.columns
filter_list = None
if len(self.ap_paramList) != 0:
filter_list = self.ap_paramList[0].val()
for label, dataframe in obj_data.getIterator():
for column in column_names:
data = dataframe.loc[:,column]
good_index = pd.notnull(data)
if good_index.sum() == 0:
continue
if filter_list == None or 'linear' in filter_list:
obj_data.updateData(label, data.index[good_index], column, pd.Series(trend_tools.getTrend(data)[0], index=data.index)[good_index])
if filter_list == None or 'semiannual' in filter_list:
obj_data.updateData(label, data.index[good_index], column, pd.Series(trend_tools.sinuFits(data), index=data.index)[good_index])
elif 'annual' in filter_list:
obj_data.updateData(label, data.index[good_index], column, pd.Series(
trend_tools.sinuFits(data, fitN=1), index=data.index)[good_index]) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/filters/trend.py | 0.639286 | 0.458652 | trend.py | pypi |
class PipelineItem(object):
'''
The general class used to create pipeline items.
'''
def __init__(self, str_description, ap_paramList=[]):
'''
Initialize an object
@param str_description: String description of filter
@param ap_paramList: List of AutoParam parameters.
'''
self.str_description = str_description
self.ap_paramList = ap_paramList
self.ap_paramNames = []
def perturbParams(self):
'''choose other random value for all parameters'''
for param in self.ap_paramList:
param.perturb()
def resetParams(self):
'''set all parameters to initial value'''
for param in self.ap_paramList:
param.reset()
def process(self, obj_data):
'''
The actual filter processing. Empty in this generic filter.
@param obj_data: Data wrapper that will be processed
'''
pass
def __str__(self):
'''
String represntation of object.
@return String listing all currenter parameters
'''
return str([str(p) for p in self.ap_paramList])
def getMetadata(self):
'''
Retrieve metadata about filter
@return String containing the item description and current parameters for filter.
'''
return self.str_description + str([str(p) for p in self.ap_paramList])
class TablePipelineItem(PipelineItem):
"""
Pipeline item for Table data
"""
def __init__(self, str_description, ap_paramList, column_list=None, error_column_list=None):
"""
Initialize Table Pipeline item
@param str_description: String describing filter
@param ap_paramList: List of AutoParams and AutoLists
@param column_list: List of columns to process
@param error_column_list: List of the associated error columns
"""
super(TablePipelineItem, self).__init__(str_description, ap_paramList)
self._column_list = column_list
self._error_column_list = error_column_list
def _getColumns(self, obj_data):
"""
Get the columns that need to be processed
Returns the columns set in this item, otherwise returns
the default columns defined the data wrapper
@param obj_data: Table data wrapper
@return Columns to process
"""
if self._column_list is None:
return obj_data.getDefaultColumns()
else:
return self._column_list
def _getErrorColumns(self, obj_data):
"""
Get the columns that need to be processed
Returns the columns set in this item, otherwise returns
the default columns defined the data wrapper
@param obj_data: Table data wrapper
@return Columns to process
"""
if self._column_list is None:
return obj_data.getDefaultErrorColumns()
else:
return self._error_column_list | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/framework/base.py | 0.749729 | 0.30243 | base.py | pypi |
import numpy as np
from shapely.geometry import Polygon, Point
from collections import OrderedDict
def shoelaceArea(in_vertices):
"""
Determine the area of a polygon using the shoelace method
https://en.wikipedia.org/wiki/Shoelace_formula
@param in_vertices: The vertices of a polygon. 2d Array where the first column is the
x coordinates and the second column is the y coordinates
@return: Area of the polygon
"""
x = in_vertices[:,0]
y = in_vertices[:,1]
return 0.5 *(np.sum(x * np.roll(y,shift=-1)) - np.sum(np.roll(x,shift=-1) * y))
def parseBasemapShape(aquifers, aquifers_info):
"""
Create shapely polygons from shapefile read in with basemap
@param aquifers: Data read in shapefile from basemap
@param aquifers_info: Metadata read from shapefile from basemap
@return: Dictionary containing information about shapes and shapely polygon of shapefile data
"""
polygon_data = []
test_list = []
for index,(aquifer,info) in enumerate(zip(aquifers,aquifers_info)):
if shoelaceArea(np.array(aquifer)) < 0:
new_data = OrderedDict()
new_data['shell'] = aquifer
new_data['info'] = info
new_data['holes'] = []
polygon_data.append(new_data)
else:
polygon_data[-1]['holes'].append(aquifer)
for data in polygon_data:
data['polygon'] = Polygon(shell=data['shell'],holes=data['holes'])
return polygon_data
def nearestEdgeDistance(x,y,poly):
"""
Determine the distance to the closest edge of a polygon
@param x: x coordinate
@param y: y coordinate
@param poly: Shapely polygon
@return distance from x,y to nearest edge of the polygon
"""
point = Point(x,y)
ext_dist = poly.exterior.distance(point)
if len(poly.interiors) > 0:
int_dist = np.min([interior.distance(point) for interior in poly.interiors])
return np.min([ext_dist, int_dist])
else:
return ext_dist
def findPolygon(in_data, in_point):
"""
Find the polygon that a point resides in
@param in_data: Input data containing polygons as read in by parseBasemapShape
@param in_point: Shapely point
@return: Index of shape in in_data that contains in_point
"""
result_num = None
for index, data in enumerate(in_data):
if data['polygon'].contains(in_point):
if result_num == None:
result_num = index
else:
raise RuntimeError("Multiple polygons contains point")
if result_num == None:
return -1
return result_num
def getInfo(row, key, fill, polygon_data):
"""
Retrieve information from polygon data:
@param row: Container with key 'ShapeIndex'
@param key: Key of data to retrieve from polygon_data element
@param fill: Value to return if key does not exist in polygon_data element
@param polygon_data: Polygon data as read in by parseBasemapShape
"""
try:
return polygon_data[int(row['ShapeIndex'])]['info'][key]
except KeyError:
return fill
def findClosestPolygonDistance(x,y,polygon_data):
"""
Find the distance to the closest polygon
@param x: x coordinate
@param y: y coordinate
@param polygon_data: Polygon data as read in by parseBasemapShape
@return Distance from x, y to the closest polygon polygon_data
"""
min_dist = np.inf
shape_index = -1
point = Point(x,y)
for index, data in enumerate(polygon_data):
if not data['polygon'].contains(point) and data['info']['AQ_CODE'] != 999:
new_distance = data['polygon'].distance(point)
if new_distance < min_dist:
min_dist = new_distance
shape_index = index
return min_dist, shape_index | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/utilities/patterns/polygon_utils.py | 0.758555 | 0.852445 | polygon_utils.py | pypi |
import statsmodels.api as sm
import numpy as np
import imreg_dft as ird
import shapely
import scipy as sp
def buildMatchedPoints(in_matches, query_kp, train_kp):
'''
Get postions of matched points
@param in_matches: Input matches
@param query_kp: Query key points
@param train_kp: Training key points
@return Tuple containing the matched query and training positions
'''
query_index = [match.queryIdx for match in in_matches]
train_index = [match.trainIdx for match in in_matches]
sorted_query_kp = [query_kp[i] for i in query_index]
sorted_train_kp = [train_kp[i] for i in train_index]
query_positions = [[kp.pt[0], kp.pt[1]] for kp in sorted_query_kp]
train_positions = [[kp.pt[0], kp.pt[1]] for kp in sorted_train_kp]
return query_positions, train_positions
def scaleImage(input_data, vmin=None, vmax=None):
'''
Scale image values to be within 0 and 255
@param input_data: Input data
@param vmin: Minimum value for scaled data, where smaller values are clipped, defaults to Median - stddev as determined by mad
@param vmax: Maximum value for scaled data, where larger values are clipped, defaults to Median - stddev as determined by mad)
@return input_data scaled to be within 0 and 255 as an 8 bit integer
'''
if vmin==None or vmax==None:
stddev = sm.robust.mad(input_data.ravel())
middle = np.median(input_data.ravel())
if vmin == None:
vmin = middle - 1*stddev
if vmax == None:
vmax = middle + 1*stddev
input_data = input_data.astype(np.float)
input_data[input_data<vmin] = vmin
input_data[input_data>vmax] = vmax
input_data = np.round((input_data - vmin) * 255 / (vmax-vmin)).astype(np.uint8)
return input_data
def divideIntoSquares(image, size, stride):
"""
Create many patches from an image
Will drop any patches that contain NaN's
@param image: Source image
@param size: Size of one side of the square patch
@param stride: Spacing between patches (must be an integer greater than 0)
@return Array containing the extent [x_start, x_end, y_start, y_end] of each patch and an array of the patches
"""
def compute_len(size, stride):
return (size-1) // stride + 1
num_x = compute_len(image.shape[-1]-size, stride)
num_y = compute_len(image.shape[-2]-size, stride)
if image.ndim == 2:
array_data = np.zeros((num_x * num_y, size, size), dtype = image.dtype)
elif image.ndim == 3:
array_data = np.zeros((num_x * num_y, image.shape[0], size, size), dtype = image.dtype)
extent_data = np.zeros((num_x * num_y, 4), dtype = np.int)
index = 0
for x in range(0, image.shape[-1]-size, stride):
for y in range(0, image.shape[-2]-size, stride):
if image.ndim == 2:
cut_box = image[y:y+size, x:x+size]
elif image.ndim == 3:
cut_box = image[:, y:y+size, x:x+size]
array_data[index, ...] = cut_box
extent_data[index, :] = np.array([x, x+size, y, y+size])
index += 1
if image.ndim==2:
valid_index = ~np.any(np.isnan(array_data), axis=(1,2))
else:
valid_index = ~np.any(np.isnan(array_data), axis=(1,2,3))
return extent_data[valid_index], array_data[valid_index]
def generateSquaresAroundPoly(poly, size=100, stride=20):
'''
Generate that may touch a shapely polygon
@param poly: Shapely polygon
@param size: Size of boxes to create
@param stride: Distance between squares
@return list of Shapely squares that may touch input polygon
'''
x_start, x_end = np.min(poly.bounds[0]-size).astype(np.int), np.max(poly.bounds[2]+size).astype(np.int)
y_start, y_end = np.min(poly.bounds[1]-size).astype(np.int), np.max(poly.bounds[3]+size).astype(np.int)
x_coords = np.arange(x_start, x_end+1, stride)
y_coords = np.arange(y_start, y_end+1, stride)
x_mesh, y_mesh = np.meshgrid(x_coords, y_coords)
return [shapely.geometry.box(x, y, x+size, y+size) for x, y in zip(x_mesh.ravel(), y_mesh.ravel())] | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/utilities/patterns/image_tools.py | 0.589598 | 0.684679 | image_tools.py | pypi |
# 3rd party imports
import numpy as np
import pandas as pd
def getPCAComponents(pca_results):
'''
Retrieve PCA components from PCA results
@param pca_results: PCA results from a pipeline run
@return Pandas DataFrame containing the pca components
'''
date_range = pd.date_range(pca_results['start_date'], pca_results['end_date'])
column_names = ['PC' + str(i+1) for i in range(pca_results['CA'].n_components)]
pca = pd.DataFrame(data = pca_results['Projection'], index = date_range, columns=column_names)
pca.index.name='Date'
return pca
def rotate(col_vectors, az, ay, ax):
'''
Rotate col vectors in three dimensions
Rx * Ry * Rz * row_vectors
@param col_vectors: Three dimensional Column vectors
@param az: Z angle
@param ay: Y angle
@param ax: X angle
@return rotated col vectors
'''
rz = np.array([[np.cos(az), -np.sin(az), 0], [np.sin(az), np.cos(az), 0], [0, 0, 1]])
ry = np.array([[np.cos(ay), 0, np.sin(ay)], [0, 1, 0], [-np.sin(ay), 0, np.cos(ay)]])
rx = np.array([[ 1, 0, 0], [0, np.cos(ax), -np.sin(ax)], [0, np.sin(ax), np.cos(ax)]])
rot = rx @ ry @ rz
return rot @ col_vector
def translate(col_vectors, delta_x, delta_y, delta_z):
'''
Translate col vectors by x, y, and z
@param col_vectors: Row vectors of positions
@param delta_x: Amount to translate in the x direction
@param delta_y: Amount to translate in the y direction
@param delta_z: Amount to translate in the y direction
'''
col_vectors = col_vectors.copy()
col_vectors[0,:] += delta_x
col_vectors[1,:] += delta_y
col_vectors[2,:] += delta_z
return col_vectors
def formatColorbarLabels(colorbar, pad=29):
"""
Adjust the labels on a colorbar so they are right aligned
@param colorbar: Input matplotlib colorbar
@param pad: Amount of padding to use
"""
for t in colorbar.ax.get_yticklabels():
t.set_horizontalalignment('right')
colorbar.ax.yaxis.set_tick_params(pad=pad) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/utilities/patterns/general_tools.py | 0.882326 | 0.52975 | general_tools.py | pypi |
import skdaccess.utilities.pbo_util as pbo_tools
import skdiscovery.data_structure.series.analysis.mogi as mogi
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
def multiCaPlot(pipeline, mogiFlag=False, offset=.15, direction='H',pca_comp=0,scaleFactor=2.5,map_res='i'):
'''
The multiCaPlot function generates a geographic eigenvector plot of several pipeline runs
This function plots multiple pipeline runs over perturbed pipeline
parameters. The various perturbations are plotted more
transparently (alpha=.5), while the median eigen_vector and Mogi
inversion are plotted in solid blue and red
@param pipeline: The pipeline object with multiple runs
@param mogiFlag: Flag to indicate plotting the Mogi source as well as the PCA
@param offset: Offset for padding the corners of the generated map
@param direction: Indicates the eigenvectors to plot. Only Horizontal component is currently supported ('H')
@param pca_comp: Choose the PCA component to use (integer)
@param scaleFactor: Size of the arrow scaling factor
@param map_res: Map data resolution for Basemap ('c', 'i', 'h', 'f', or None)
'''
# as this is a multi_ca_plot function, assumes GPCA
plt.figure();
meta_data = pipeline.data_generator.meta_data
station_list = pipeline.data_generator.station_list
lat_range, lon_range = pbo_tools.getLatLonRange(meta_data, station_list)
coord_list = pbo_tools.getStationCoords(meta_data, station_list)
# Create a map projection of area
bmap = Basemap(llcrnrlat=lat_range[0] - offset, urcrnrlat=lat_range[1] + offset, llcrnrlon=lon_range[0] - offset, urcrnrlon=lon_range[1] + offset,
projection='gnom', lon_0=np.mean(lon_range), lat_0=np.mean(lat_range), resolution=map_res)
# bmap.fillcontinents(color='white')
# bmap.drawmapboundary(fill_color='white')
bmap.drawmapboundary(fill_color='#41BBEC');
bmap.fillcontinents(color='white')
# Draw just coastlines, no lakes
for i,cp in enumerate(bmap.coastpolygons):
if bmap.coastpolygontypes[i]<2:
bmap.plot(cp[0],cp[1],'k-')
parallels = np.arange(np.round(lat_range[0]-offset,decimals=1),np.round(lat_range[1]+offset,decimals=1),.1)
meridians = np.arange(np.round(lon_range[0]-offset,decimals=1),np.round(lon_range[1]+offset,decimals=1),.1)
bmap.drawmeridians(meridians, labels=[0,0,0,1])
bmap.drawparallels(parallels, labels=[1,0,0,0])
# Plot station coords
for coord in coord_list:
bmap.plot(coord[1], coord[0], 'ko', markersize=6, latlon=True,zorder=12)
x,y = bmap(coord[1], coord[0])
plt.text(x+250,y-450,station_list[coord_list.index(coord)],zorder=12)
# loop over each pipeline run
elatmean = np.zeros(len(station_list))
elonmean = np.zeros_like(elatmean)
# check if want to plot Mogi as well
if mogiFlag:
avg_mogi = np.array([0.,0.])
mlatmean = np.zeros_like(elatmean)
mlonmean = np.zeros_like(elatmean)
for nrun in range(len(pipeline.RA_results)):
pca = pipeline.RA_results[nrun]['GPCA']['CA']
station_lat_list, station_lon_list, ev_lat_list, ev_lon_list, dir_sign = pbo_tools.dirEigenvectors(coord_list, pca.components_[pca_comp])
elatmean += ev_lat_list
elonmean += ev_lon_list
# plot each run in light blue
bmap.quiver(station_lon_list, station_lat_list, ev_lon_list, ev_lat_list, latlon=True,
scale = scaleFactor, alpha = .25, color = 'blue',zorder=11)
if mogiFlag:
mogi_res = pipeline.RA_results[nrun]['Mogi']
avg_mogi += np.array([mogi_res['lon'], mogi_res['lat']])
mogi_x_disp, mogi_y_disp = mogi.MogiVectors(mogi_res,station_lat_list,station_lon_list)
mlatmean += mogi_y_disp
mlonmean += mogi_x_disp
bmap.plot(mogi_res['lon'], mogi_res['lat'], "g^", markersize = 10, latlon=True, alpha = .25,zorder=12)
bmap.quiver(station_lon_list, station_lat_list, mogi_x_disp*dir_sign, mogi_y_disp*dir_sign,
latlon=True, scale=scaleFactor,color='red', alpha = .25,zorder=11)
#plot the mean ev in blue
elatmean = elatmean/len(pipeline.RA_results)
elonmean = elonmean/len(pipeline.RA_results)
bmap.quiver(station_lon_list, station_lat_list, elonmean, elatmean,
latlon=True, scale = scaleFactor, color = 'blue', alpha = 1,zorder=11)
if mogiFlag:
# plot mean mogi results
avg_mogi = avg_mogi/len(pipeline.RA_results)
mlatmean = mlatmean/len(pipeline.RA_results)
mlonmean = mlonmean/len(pipeline.RA_results)
bmap.plot(avg_mogi[0], avg_mogi[1], "g^", markersize = 10, latlon=True, alpha = 1,zorder=12)
bmap.quiver(station_lon_list, station_lat_list, mlonmean*dir_sign, mlatmean*dir_sign,
latlon=True, scale=scaleFactor,color='red', alpha = 1,zorder=11)
ax_x = plt.gca().get_xlim()
ax_y = plt.gca().get_ylim()
x,y = bmap(ax_x[0]+.1*(ax_x[1]-ax_x[0]), ax_y[0]+.1*(ax_y[1]-ax_y[0]),inverse=True)
bmap.quiver(x, y, 0, .2, latlon=True, scale = scaleFactor, headwidth=3,headlength=3,zorder=11)
plt.text(ax_x[0]+.1*(ax_x[1]-ax_x[0])-650, ax_y[0]+.1*(ax_y[1]-ax_y[0])-1000,'20%',zorder=11) | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/visualization/multi_ca_plot.py | 0.582372 | 0.563078 | multi_ca_plot.py | pypi |
import numpy as np
import pandas as pd
import matplotlib
from matplotlib.patches import Polygon
from scipy.spatial import SphericalVoronoi
import pyproj
# utility functions for generating the spherical voronoi tesselation.
def sphericalToXYZ(lat,lon,radius=1):
'''
Convert spherical coordinates to x,y,z
@param lat: Latitude, scalar or array
@param lon: Longitude, scalar or array
@param radius: Sphere's radius
@return Numpy array of x,y,z coordinates
'''
phi = np.deg2rad(90.0 - lat)
theta = np.deg2rad(lon % 360)
x = radius * np.cos(theta)*np.sin(phi)
y = radius * np.sin(theta)*np.sin(phi)
z = radius * np.cos(phi)
if np.isscalar(x) == False:
return np.vstack([x,y,z]).T
else:
return np.array([x,y,z])
def xyzToSpherical(x,y,z):
'''
Convert x,y,z to spherical coordinates
@param x: Cartesian coordinate x
@param y: Cartesian coordinate y
@param z: Cartesian coordinate z
@return numpy array of latitude,longitude, and radius
'''
radius = np.sqrt(x**2 + y**2 + z**2)
theta = np.rad2deg(np.arctan2(y,x))
phi = np.rad2deg(np.arccos(z/radius))
# lon = (theta + 180) % 360 - 180
# lon = (theta + 360) % 360
lon = theta
lat = 90 - phi
return np.array([lat,lon,radius]).T
def find_match(region_index, region_list):
'''
Find neighboring regions
@param region_index: Numeric index of region to find matches for (number between 0 and len(vertices))
@param region_list: list of lists of vertices that define regions
@return Numeric indices of regions that border the region specified by region_index
'''
regions = region_list[region_index]
matches = []
num_matched_list=[]
for i in range(len(region_list)):
test_regions = region_list[i]
num_matches = 0
found = False
for region in regions:
if region in test_regions:
num_matches += 1
found = True
if found is True:
matches.append(i)
num_matched_list.append(num_matches)
return matches
def getVoronoiCollection(data, lat_name, lon_name, bmap = None, v_name = None, full_sphere = False,
max_v=.3, min_v=-0.3, cmap = matplotlib.cm.get_cmap('jet'), test_point = None,
proj1=None, proj2=None, **kwargs):
'''
Perform a Spherical Voronoi Tessellation on the input data.
In the case where the data is restricted to one part of the globe, a polygon will not be returned
for all objects, as matplotlib polygons won't be able to stretch over half the globe.
@param data: Input pandas data frame
@param lat_name: Name of latitude column
@param lon_name: Name of longitude column
@param bmap: Basemap instance used to convert from lat, lon coordinates to projection coordinates
@param v_name: Name of value column. Use this to color each cell according to a value.
@param full_sphere: Set to true if the data spans the entire globe.
If false, a fictional point is created during tessellation and
removed later to work around issues when polygons are suppose to
span the over half the globe.
@param max_v: Specify a maximum value to use when assigning values to the tessellation
@param min_v: Specify a minimum value to use when assigning values to the tessellation
@param cmap: Matplotlib color map to use
@param test_point: Tuple containing the latitude and longitude of the ficitonal point to used to remove polygons that
wrap around the earth. If none, a point is automatically chosen
@param proj1: PyProj projection of input coordinates
@param proj2: PyProj projection of sphere
@param kwargs: Extra keyword arguments are passed to SphericalVoronoi class in scipy
@return Matplotlib patch collection of tessellation, scipy.spatial.SphericalVoronoi object, integer index of objects in patch collection.
'''
data = data.copy()
if full_sphere == False:
if test_point == None:
test_lat = -1*np.mean(data[lat_name])
test_lon = np.mean(data[lon_name]) + 180
else:
test_lat = test_point[0]
test_lon = test_point[1]
full_data = data
full_data = pd.concat([full_data, pd.DataFrame({lat_name: test_lat, lon_name: test_lon},
index=[full_data.index[0]])])
full_data.set_index(np.arange(len(full_data)), inplace=True)
else:
full_data = data
# print(full_data.tail())
if proj1 != None and proj2 != None:
results = pyproj.transform(proj1, proj2, full_data[lon_name].as_matrix(), full_data[lat_name].as_matrix())
full_data[lon_name] = results[0]
full_data[lat_name] = results[1]
xyz = pd.DataFrame(sphericalToXYZ(full_data[lat_name], full_data[lon_name]),columns=['x','y','z'],index=full_data.index)
if v_name != None:
full_data = pd.concat([full_data.loc[:,[lat_name,lon_name, v_name]],xyz],axis=1)
else:
full_data = pd.concat([full_data.loc[:,[lat_name,lon_name, v_name]],xyz],axis=1)
unique_index = np.unique(full_data.loc[:,lat_name] + 1j*full_data.loc[:,lon_name],return_index=True)[1]
full_data = full_data.iloc[np.sort(unique_index)]
voronoi = SphericalVoronoi(full_data.loc[:,['x','y','z']].as_matrix(), **kwargs)
voronoi.sort_vertices_of_regions()
latlon_verts = xyzToSpherical(voronoi.vertices[:,0],voronoi.vertices[:,1], voronoi.vertices[:,2])
if proj1 != None and proj2 != None:
results = pyproj.transform(proj2, proj1, latlon_verts[:,1], latlon_verts[:,0])
latlon_verts[:, 1] = results[0]
latlon_verts[:, 0] = results[1]
matches = list(map(lambda x: find_match(x, voronoi.regions), range(len(voronoi.regions))))
patch_list = []
patch_index = []
for i, (region,match,(station,row)) in enumerate(zip(voronoi.regions,matches,
full_data.iterrows())):
if full_sphere or (len(matches)-1) not in match:
# Keep an index of regions in patchcollection
patch_index.append(i)
if bmap != None:
xy = np.array(bmap(latlon_verts[region,1],latlon_verts[region,0])).T
else:
xy = np.array([latlon_verts[region,1],latlon_verts[region,0]]).T
if v_name != None:
value = row[v_name]
scaled_value = (value - min_v) / (max_v - min_v)
if scaled_value > 1:
scaled_value = 1.0
elif scaled_value < 0:
scaled_value = 0.0
poly = Polygon(xy, fill=True,facecolor = cmap(scaled_value),edgecolor=cmap(scaled_value))
else:
poly = Polygon(xy, fill=False)
patch_list.append(poly)
return matplotlib.collections.PatchCollection(patch_list,match_original=True), voronoi, patch_index | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/visualization/spherical_voronoi.py | 0.735831 | 0.734465 | spherical_voronoi.py | pypi |
.. raw:: html
<img alt="scikit-diveMove" src="docs/source/.static/skdiveMove_logo.png"
width=10% align=left>
<h1>scikit-diveMove</h1>
.. image:: https://img.shields.io/pypi/v/scikit-diveMove
:target: https://pypi.python.org/pypi/scikit-diveMove
:alt: PyPI
.. image:: https://github.com/spluque/scikit-diveMove/workflows/TestPyPI/badge.svg
:target: https://github.com/spluque/scikit-diveMove/actions?query=workflow%3ATestPyPI
:alt: TestPyPI
.. image:: https://github.com/spluque/scikit-diveMove/workflows/Python%20build/badge.svg
:target: https://github.com/spluque/scikit-diveMove/actions?query=workflow%3A%22Python+build%22
:alt: Python Build
.. image:: https://codecov.io/gh/spluque/scikit-diveMove/branch/master/graph/badge.svg
:target: https://codecov.io/gh/spluque/scikit-diveMove
.. image:: https://img.shields.io/pypi/dm/scikit-diveMove
:target: https://pypi.python.org/pypi/scikit-diveMove
:alt: PyPI - Downloads
`scikit-diveMove` is a Python interface to R package `diveMove`_ for
scientific data analysis, with a focus on diving behaviour analysis. It
has utilities to represent, visualize, filter, analyse, and summarize
time-depth recorder (TDR) data. Miscellaneous functions for handling
position and 3D kinematics data are also provided. `scikit-diveMove`
communicates with a single `R` instance for access to low-level tools of
package `diveMove`.
.. _diveMove: https://github.com/spluque/diveMove
The table below shows which features of `diveMove` are accessible from
`scikit-diveMove`:
+----------------------------------+--------------------------+--------------------------------+
| `diveMove` |`scikit-diveMove` |Notes |
+---------------+------------------+ | |
|Functionality |Functions/Methods | | |
+===============+==================+==========================+================================+
|Movement |``austFilter`` | |Under consideration. |
| |``rmsDistFilter`` | | |
| |``grpSpeedFilter``| | |
| |``distSpeed`` | | |
| |``readLocs`` | | |
+---------------+------------------+--------------------------+--------------------------------+
|Bout analysis |``boutfreqs`` |``BoutsNLS`` ``BoutsMLE`` |Fully implemented in Python. |
| |``boutinit`` | | |
| |``bouts2.nlsFUN`` | | |
| |``bouts2.nls`` | | |
| |``bouts3.nlsFUN`` | | |
| |``bouts3.nls`` | | |
| |``bouts2.mleFUN`` | | |
| |``bouts2.ll`` | | |
| |``bouts2.LL`` | | |
| |``bouts.mle`` | | |
| |``labelBouts`` | | |
| |``plotBouts`` | | |
| |``plotBouts2.cdf``| | |
| |``bec2`` | | |
| |``bec3`` | | |
+---------------+------------------+--------------------------+--------------------------------+
|Dive analysis |``readTDR`` |``TDR.__init__`` |Fully implemented. Single |
| |``createTDR`` |``TDRSource.__init__`` |``TDR`` class for data with or |
| | | |without speed measurements. |
+---------------+------------------+--------------------------+--------------------------------+
| |``calibrateDepth``|``TDR.calibrate`` |Fully implemented |
| | |``TDR.zoc`` | |
| | |``TDR.detect_wet`` | |
| | |``TDR.detect_dives`` | |
| | |``TDR.detect_dive_phases``| |
+---------------+------------------+--------------------------+--------------------------------+
| |``calibrateSpeed``|``TDR.calibrate_speed`` |New implementation of the |
| |``rqPlot`` | |algorithm entirely in Python. |
| | | |The procedure generates the plot|
| | | |concurrently. |
+---------------+------------------+--------------------------+--------------------------------+
| |``diveStats`` |``TDR.dive_stats`` |Fully implemented |
| |``stampDive`` |``TDR.time_budget`` | |
| |``timeBudget`` |``TDR.stamp_dives`` | |
+---------------+------------------+--------------------------+--------------------------------+
| |``plotTDR`` |``TDR.plot`` |Fully implemented. |
| |``plotDiveModel`` |``TDR.plot_zoc_filters`` |Interactivity is the default, as|
| |``plotZOC`` |``TDR.plot_phases`` |standard `matplotlib`. |
| | |``TDR.plot_dive_model`` | |
+---------------+------------------+--------------------------+--------------------------------+
| |``getTDR`` |``TDR.tdr`` |Fully implemented. |
| |``getDepth`` |``TDR.get_depth`` |``getCCData`` deemed redundant, |
| |``getSpeed`` |``TDR.get_speed`` |as the columns can be accessed |
| |``getTime`` |``TDR.tdr.index`` |directly from the ``TDR.tdr`` |
| |``getCCData`` |``TDR.src_file`` |attribute. |
| |``getDtime`` |``TDR.dtime`` | |
| |``getFileName`` | | |
+---------------+------------------+--------------------------+--------------------------------+
| |``getDAct`` |``TDR.get_wet_activity`` |Fully implemented |
| |``getDPhaseLab`` |``TDR.get_dives_details`` | |
| |``getDiveDeriv`` |``TDR.get_dive_deriv`` | |
| |``getDiveModel`` | | |
| |``getGAct`` | | |
+---------------+------------------+--------------------------+--------------------------------+
| |``extractDive`` | |Fully implemented |
+---------------+------------------+--------------------------+--------------------------------+
`scikit-diveMove` also provides useful tools for processing signals from
tri-axial Inertial Measurement Units (`IMU`_), such as thermal calibration,
corrections for shifts in coordinate frames, as well as computation of
orientation using a variety of current methods. Analyses are fully
tractable by encouraging the use of `xarray`_ data structures that can be
read from and written to NetCDF file format. Using these data structures,
meta-data attributes can be easily appended at all layers as analyses
progress.
.. _xarray: https://xarray.pydata.org
.. _IMU: https://en.wikipedia.org/wiki/Inertial_measurement_unit
Installation
============
Type the following at a terminal command line:
.. code-block:: sh
pip install scikit-diveMove
Or install from source tree by typing the following at the command line:
.. code-block:: sh
python setup.py install
The documentation can also be installed as described in `Documentation`_.
Once installed, `skdiveMove` can be easily imported as: ::
import skdiveMove as skdive
Dependencies
------------
`skdiveMove` depends primarily on ``R`` package `diveMove`, which must be
installed and available to the user running Python. If needed, install
`diveMove` at the ``R`` prompt:
.. code-block:: R
install.packages("diveMove")
Required Python packages are listed in the `requirements
<requirements.txt>`_ file.
Documentation
=============
Available at: https://spluque.github.io/scikit-diveMove
Alternatively, installing the package as follows:
.. code-block:: sh
pip install -e .["docs"]
allows the documentation to be built locally (choosing the desired target
{"html", "pdf", etc.}):
.. code-block:: sh
make -C docs/ html
The `html` tree is at `docs/build/html`.
| /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/README.rst | 0.876172 | 0.744006 | README.rst | pypi |
.. _demo_ellipsoid-label:
==============================================
Ellipsoid modelling for calibration purposes
==============================================
Magnetometers are highly sensitive to local deviations of the magnetic
field, affecting the desired measurement of the Earth geomagnetic field.
Triaxial accelerometers, however, can have slight offsets in and
misalignments of their axes which need to be corrected to properly
interpret their output. One commonly used method for performing these
corrections is done by fitting an ellipsoid model to data collected while
the sensor's axes are exposed to the forces of the fields they measure.
.. jupyter-execute::
# Set up
import pkg_resources as pkg_rsrc
import os.path as osp
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import skdiveMove.imutools as imutools
from mpl_toolkits.mplot3d import Axes3D
.. jupyter-execute::
:hide-code:
# boiler plate stuff to help out
_FIG1X1 = (11, 5)
def gen_sphere(radius=1):
"""Generate coordinates on a sphere"""
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = radius * np.outer(np.cos(u), np.sin(v))
y = radius * np.outer(np.sin(u), np.sin(v))
z = radius * np.outer(np.ones(np.size(u)), np.cos(v))
return (x, y, z)
np.set_printoptions(precision=3, sign="+")
%matplotlib inline
To demonstrate this procedure with utilities from the `allan` submodule,
measurements from a triaxial accelerometer and magnetometer were recorded
at 100 Hz sampling frequency with an `IMU` that was rotated around the main
axes to cover a large surface of the sphere.
.. jupyter-execute::
:linenos:
icdf = (pkg_rsrc
.resource_filename("skdiveMove",
osp.join("tests", "data", "gertrude",
"magnt_accel_calib.nc")))
magnt_accel = xr.load_dataset(icdf)
magnt = magnt_accel["magnetic_density"].to_numpy()
accel = magnt_accel["acceleration"].to_numpy()
The function `fit_ellipsoid` returns the offset, gain, and rotation matrix
(if requested) necessary to correct the sensor's data. There are six types
of constraint to impose on the result, including which radii should be
equal, and whether the data should be rotated.
.. jupyter-execute::
:linenos:
# Here, a symmetrical constraint whereby any plane passing through the
# origin is used, with all radii equal to each other
magnt_off, magnt_gain, _ = imutools.fit_ellipsoid(magnt, f="sxyz")
accel_off, accel_gain, _ = imutools.fit_ellipsoid(accel, f="sxyz")
Inspect the offsets and gains in the uncorrected data:
.. jupyter-execute::
:hide-code:
print("Magnetometer offsets [uT]: x={:.2f}, y={:.2f}, z={:.2f};"
.format(*magnt_off),
"gains [uT]: x={:.2f}, y={:.2f}, z={:.2f}".format(*magnt_gain))
print("Accelerometer offsets [g]: x={:.3f}, y={:.3f}, z={:.3f};"
.format(*accel_off),
"gains [g]: x={:.3f}, y={:.3f}, z={:.3f}".format(*accel_gain))
Calibrate the sensors using these estimates:
.. jupyter-execute::
:linenos:
magnt_refr = 56.9
magnt_corr = imutools.apply_ellipsoid(magnt, offset=magnt_off,
gain=magnt_gain,
rotM=np.diag(np.ones(3)),
ref_r=magnt_refr)
accel_corr = imutools.apply_ellipsoid(accel, offset=accel_off,
gain=accel_gain,
rotM=np.diag(np.ones(3)),
ref_r=1.0)
An appreciation of the effect of the calibration can be observed by
comparing the difference between maxima/minima and the reference value for
the magnetic field at the geographic location and time of the
measurements, or 1 $g$ in the case of the accelerometers.
.. jupyter-execute::
:linenos:
magnt_refr_diff = [np.abs(magnt.max(axis=0)) - magnt_refr,
np.abs(magnt.min(axis=0)) - magnt_refr]
magnt_corr_refr_diff = [np.abs(magnt_corr.max(axis=0)) - magnt_refr,
np.abs(magnt_corr.min(axis=0)) - magnt_refr]
accel_refr_diff = [np.abs(accel.max(axis=0)) - 1.0,
np.abs(accel.min(axis=0)) - 1.0]
accel_corr_refr_diff = [np.abs(accel_corr.max(axis=0)) - 1.0,
np.abs(accel_corr.min(axis=0)) - 1.0]
.. jupyter-execute::
:hide-code:
print("Uncorrected magnetometer difference to reference [uT]:")
print("maxima: x={:.2f}, y={:.2f}, z={:.2f};"
.format(*magnt_refr_diff[0]),
"minima: x={:.2f}, y={:.2f}, z={:.2f}"
.format(*magnt_refr_diff[1]))
print("Corrected magnetometer difference to reference [uT]:")
print("maxima: x={:.2f}, y={:.2f}, z={:.2f};"
.format(*magnt_corr_refr_diff[0]),
"minima: x={:.2f}, y={:.2f}, z={:.2f}"
.format(*magnt_corr_refr_diff[1]))
print("Uncorrected accelerometer difference to reference [g]:")
print("maxima: x={:.2f}, y={:.2f}, z={:.2f};"
.format(*accel_refr_diff[0]),
"minima: x={:.2f}, y={:.2f}, z={:.2f}"
.format(*accel_refr_diff[1]))
print("Corrected accelerometer difference to reference [g]:")
print("maxima: x={:.2f}, y={:.2f}, z={:.2f};"
.format(*accel_corr_refr_diff[0]),
"minima: x={:.2f}, y={:.2f}, z={:.2f}"
.format(*accel_corr_refr_diff[1]))
Or compare visually on a 3D plot:
.. jupyter-execute::
:hide-code:
_FIG1X2 = [13, 7]
fig = plt.figure(figsize=_FIG1X2)
ax0 = fig.add_subplot(121, projection="3d")
ax1 = fig.add_subplot(122, projection="3d")
ax0.set_xlabel(r"x [$\mu T$]")
ax0.set_ylabel(r"y [$\mu T$]")
ax0.set_zlabel(r"z [$\mu T$]")
ax1.set_xlabel(r"x [$g$]")
ax1.set_ylabel(r"y [$g$]")
ax1.set_zlabel(r"z [$g$]")
ax0.plot_surface(*gen_sphere(magnt_refr), rstride=4, cstride=4, color="c",
linewidth=0, alpha=0.3)
ax1.plot_surface(*gen_sphere(), rstride=4, cstride=4, color="c",
linewidth=0, alpha=0.3)
ax0.plot(magnt[:, 0], magnt[:, 1], magnt[:, 2],
marker=".", linestyle="none", markersize=0.5,
label="uncorrected")
ax0.plot(magnt_corr[:, 0], magnt_corr[:, 1], magnt_corr[:, 2],
marker=".", linestyle="none", markersize=0.5,
label="corrected")
ax1.plot(accel[:, 0], accel[:, 1], accel[:, 2],
marker=".", linestyle="none", markersize=0.5,
label="uncorrected")
ax1.plot(accel_corr[:, 0], accel_corr[:, 1], accel_corr[:, 2],
marker=".", linestyle="none", markersize=0.5,
label="corrected")
l1, lbl1 = fig.axes[-1].get_legend_handles_labels()
fig.legend(l1, lbl1, loc="lower center", borderaxespad=0, frameon=False,
markerscale=12)
ax0.view_init(22, azim=-142)
ax1.view_init(22, azim=-142)
plt.tight_layout()
Feel free to download a copy of this demo
(:jupyter-download:script:`demo_ellipsoid`).
| /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/docs/source/demo_ellipsoid.rst | 0.842118 | 0.715797 | demo_ellipsoid.rst | pypi |
import logging
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
import statsmodels.formula.api as smf
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
def calibrate_speed(x, tau, contour_level, z=0, bad=[0, 0],
plot=True, ax=None):
"""Calibration based on kernel density estimation
Parameters
----------
x : pandas.DataFrame
DataFrame with depth rate and speed
tau : float
contour_level : float
z : float, optional
bad : array_like, optional
plot : bool, optional
Whether to plot calibration results.
ax : matplotlib.Axes, optional
An Axes instance to use as target. Default is to create one.
Returns
-------
out : 2-tuple
The quantile regression fit object, and `matplotlib.pyplot` `Axes`
instance (if plot=True, otherwise None).
Notes
-----
See `skdiveMove.TDR.calibrate_speed` for details.
"""
# `gaussian_kde` expects variables in rows
n_eval = 51
# Numpy for some operations
xnpy = x.to_numpy()
kde = stats.gaussian_kde(xnpy.T)
# Build the grid for evaluation, mimicking bkde2D
mins = x.min()
maxs = x.max()
x_flat = np.linspace(mins[0], maxs[0], n_eval)
y_flat = np.linspace(mins[1], maxs[1], n_eval)
xx, yy = np.meshgrid(x_flat, y_flat)
grid_coords = np.append(xx.reshape(-1, 1), yy.reshape(-1, 1), axis=1)
# Evaluate kde on the grid
z = kde(grid_coords.T)
z = np.flipud(z.reshape(n_eval, n_eval))
# Fit quantile regression
# -----------------------
# Bin depth rate
drbinned = pd.cut(x.iloc[:, 0], n_eval)
drbin_mids = drbinned.apply(lambda x: x.mid) # mid points
# Use bin mid points as x
binned = np.column_stack((drbin_mids, xnpy[:, 1]))
qdata = pd.DataFrame(binned, columns=list("xy"))
qmod = smf.quantreg("y ~ x", qdata)
qfit = qmod.fit(q=tau)
coefs = qfit.params
logger.info("a={}, b={}".format(*coefs))
if plot:
fig = plt.gcf()
if ax is None:
ax = plt.gca()
ax.set_xlabel("Rate of depth change")
ax.set_ylabel("Speed")
zimg = ax.imshow(z, aspect="auto",
extent=[mins[0], maxs[0], mins[1], maxs[1]],
cmap="gist_earth_r")
fig.colorbar(zimg, fraction=0.1, aspect=30, pad=0.02,
label="Kernel density probability")
cntr = ax.contour(z, extent=[mins[0], maxs[0], mins[1], maxs[1]],
origin="image", levels=[contour_level])
ax.clabel(cntr, fmt="%1.2f")
# Plot the binned data, adding some noise for clarity
xjit_binned = np.random.normal(binned[:, 0],
xnpy[:, 0].ptp() / (2 * n_eval))
ax.scatter(xjit_binned, binned[:, 1], s=6, alpha=0.3)
# Plot line
xnew = np.linspace(mins[0], maxs[0])
yhat = coefs[0] + coefs[1] * xnew
ax.plot(xnew, yhat, "--k",
label=(r"$y = {:.3f} {:+.3f} x$"
.format(coefs[0], coefs[1])))
ax.legend(loc="lower right")
# Adjust limits to compensate for the noise in x
ax.set_xlim([mins[0], maxs[0]])
return (qfit, ax)
if __name__ == '__main__':
from skdiveMove.tests import diveMove2skd
tdrX = diveMove2skd()
print(tdrX) | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/calibspeed.py | 0.876898 | 0.591222 | calibspeed.py | pypi |
import logging
import numpy as np
import pandas as pd
from skdiveMove.zoc import ZOC
from skdiveMove.core import diveMove, robjs, cv, pandas2ri
from skdiveMove.helpers import (get_var_sampling_interval, _cut_dive,
rle_key, _append_xr_attr)
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
class TDRPhases(ZOC):
"""Core TDR phase identification routines
See help(TDRSource) for inherited attributes.
Attributes
----------
wet_dry
dives : dict
Dictionary of dive activity data {'row_ids': pandas.DataFrame,
'model': str, 'splines': dict, 'spline_derivs': pandas.DataFrame,
'crit_vals': pandas.DataFrame}.
params : dict
Dictionary with parameters used for detection of wet/dry and dive
phases. {'wet_dry': {'dry_thr': float, 'wet_thr': float}, 'dives':
{'dive_thr': float, 'dive_model': str, 'smooth_par': float,
'knot_factor': int, 'descent_crit_q': float, 'ascent_crit_q':
float}}
"""
def __init__(self, *args, **kwargs):
"""Initialize TDRPhases instance
Parameters
----------
*args : positional arguments
Passed to :meth:`ZOC.__init__`
**kwargs : keyword arguments
Passed to :meth:`ZOC.__init__`
"""
ZOC.__init__(self, *args, **kwargs)
self._wet_dry = None
self.dives = dict(row_ids=None, model=None, splines=None,
spline_derivs=None, crit_vals=None)
self.params = dict(wet_dry={}, dives={})
def __str__(self):
base = ZOC.__str__(self)
wetdry_params = self.get_phases_params("wet_dry")
dives_params = self.get_phases_params("dives")
return (base +
("\n{0:<20} {1}\n{2:<20} {3}"
.format("Wet/Dry parameters:", wetdry_params,
"Dives parameters:", dives_params)))
def detect_wet(self, dry_thr=70, wet_cond=None, wet_thr=3610,
interp_wet=False):
"""Detect wet/dry activity phases
Parameters
----------
dry_thr : float, optional
wet_cond : bool mask, optional
A Pandas.Series bool mask indexed as `depth`. Default is
generated from testing for non-missing `depth`.
wet_thr : float, optional
interp_wet : bool, optional
Notes
-----
See details for arguments in diveMove's ``calibrateDepth``. Unlike
`diveMove`, the beginning/ending times for each phase are not
stored with the class instance, as this information can be
retrieved via the :meth:`~TDR.time_budget` method.
Examples
--------
ZOC using the "offset" method for convenience
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd("TDRPhases")
>>> tdrX.zoc("offset", offset=3)
Detect wet/dry phases
>>> tdrX.detect_wet()
Access the "phases" and "dry_thr" attributes
>>> tdrX.wet_dry # doctest: +ELLIPSIS
phase_id phase_label
date_time
2002-01-05 ... 1 L
...
"""
# Retrieve copy of depth from our own property
depth = self.depth_zoc
depth_py = depth.to_series()
time_py = depth_py.index
dtime = get_var_sampling_interval(depth).total_seconds()
if wet_cond is None:
wet_cond = ~depth_py.isna()
phases_l = (diveMove
._detPhase(robjs.vectors.POSIXct(time_py),
robjs.vectors.FloatVector(depth_py),
dry_thr=dry_thr,
wet_thr=wet_thr,
wet_cond=(robjs.vectors
.BoolVector(~depth_py.isna())),
interval=dtime))
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
phases = pd.DataFrame({'phase_id': phases_l.rx2("phase.id"),
'phase_label': phases_l.rx2("activity")},
index=time_py)
phases.loc[:, "phase_id"] = phases.loc[:, "phase_id"].astype(int)
self._wet_dry = phases
wet_dry_params = dict(dry_thr=dry_thr, wet_thr=wet_thr)
self.params["wet_dry"].update(wet_dry_params)
if interp_wet:
zdepth = depth.to_series()
iswet = phases["phase_label"] == "W"
iswetna = iswet & zdepth.isna()
if any(iswetna):
depth_intp = zdepth[iswet].interpolate(method="cubic")
zdepth[iswetna] = np.maximum(np.zeros_like(depth_intp),
depth_intp)
zdepth = zdepth.to_xarray()
zdepth.attrs = depth.attrs
_append_xr_attr(zdepth, "history", "interp_wet")
self._depth_zoc = zdepth
self._zoc_params.update(dict(interp_wet=interp_wet))
logger.info("Finished detecting wet/dry periods")
def detect_dives(self, dive_thr):
"""Identify dive events
Set the ``dives`` attribute's "row_ids" dictionary element, and
update the ``wet_act`` attribute's "phases" dictionary element.
Parameters
----------
dive_thr : float
Notes
-----
See details for arguments in diveMove's ``calibrateDepth``.
Examples
--------
ZOC using the "offset" method for convenience
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd("TDRPhases")
>>> tdrX.zoc("offset", offset=3)
Detect wet/dry phases and dives with 3 m threshold
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
"""
# Retrieve copy of depth from our own property
depth = self.depth_zoc
depth_py = depth.to_series()
act_phases = self.wet_dry["phase_label"]
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
phases_df = diveMove._detDive(pd.Series(depth_py),
pd.Series(act_phases),
dive_thr=dive_thr)
# Replace dots with underscore
phases_df.columns = (phases_df.columns.str
.replace(".", "_", regex=False))
phases_df.set_index(depth_py.index, inplace=True)
dive_activity = phases_df.pop("dive_activity")
# Dive and post-dive ID should be integer
phases_df = phases_df.astype(int)
self.dives["row_ids"] = phases_df
self._wet_dry["phase_label"] = dive_activity
self.params["dives"].update({'dive_thr': dive_thr})
logger.info("Finished detecting dives")
def detect_dive_phases(self, dive_model, smooth_par=0.1,
knot_factor=3, descent_crit_q=0,
ascent_crit_q=0):
"""Detect dive phases
Complete filling the ``dives`` attribute.
Parameters
----------
dive_model : {"unimodal", "smooth.spline"}
smooth_par : float, optional
knot_factor : int, optional
descent_crit_q : float, optional
ascent_crit_q : float, optional
Notes
-----
See details for arguments in diveMove's ``calibrateDepth``.
Examples
--------
ZOC using the "offset" method for convenience
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd("TDRPhases")
>>> tdrX.zoc("offset", offset=3)
Detect wet/dry phases and dives with 3 m threshold
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
Detect dive phases using the "unimodal" method and selected
parameters
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
"""
# Retrieve copy of depth from our own property
depth = self.depth_zoc
depth_py = depth.to_series()
phases_df = self.get_dives_details("row_ids")
dive_ids = self.get_dives_details("row_ids", columns="dive_id")
ok = (dive_ids > 0) & ~depth_py.isna()
xx = pd.Categorical(np.repeat(["X"], phases_df.shape[0]),
categories=["D", "DB", "B", "BA",
"DA", "A", "X"])
dive_phases = pd.Series(xx, index=phases_df.index)
if any(ok):
ddepths = depth_py[ok] # diving depths
dtimes = ddepths.index
dids = dive_ids[ok]
idx = np.squeeze(np.argwhere(ok.to_numpy()))
time_num = (dtimes - dtimes[0]).total_seconds().to_numpy()
divedf = pd.DataFrame({'dive_id': dids.to_numpy(),
'idx': idx,
'depth': ddepths.to_numpy(),
'time_num': time_num},
index=ddepths.index)
grouped = divedf.groupby("dive_id")
cval_list = []
spl_der_list = []
spl_list = []
for name, grp in grouped:
res = _cut_dive(grp, dive_model=dive_model,
smooth_par=smooth_par,
knot_factor=knot_factor,
descent_crit_q=descent_crit_q,
ascent_crit_q=ascent_crit_q)
dive_phases.loc[grp.index] = (res.pop("label_matrix")[:, 1])
# Splines
spl = res.pop("dive_spline")
# Convert directly into a dict, with each element turned
# into a list of R objects. Access each via
# `_get_dive_spline_slot`
spl_dict = dict(zip(spl.names, list(spl)))
spl_list.append(spl_dict)
# Spline derivatives
spl_der = res.pop("spline_deriv")
spl_der_idx = pd.TimedeltaIndex(spl_der[:, 0], unit="s")
spl_der = pd.DataFrame({'y': spl_der[:, 1]},
index=spl_der_idx)
spl_der_list.append(spl_der)
# Critical values (all that's left in res)
cvals = pd.DataFrame(res, index=[name])
cvals.index.rename("dive_id", inplace=True)
# Adjust critical indices for Python convention and ensure
# integers
cvals.iloc[:, :2] = cvals.iloc[:, :2].astype(int) - 1
cval_list.append(cvals)
self.dives["model"] = dive_model
# Splines
self.dives["splines"] = dict(zip(grouped.groups.keys(),
spl_list))
self.dives["spline_derivs"] = pd.concat(spl_der_list,
keys=(grouped
.groups.keys()))
self.dives["crit_vals"] = pd.concat(cval_list)
else:
logger.warning("No dives found")
# Update the `dives` attribute
self.dives["row_ids"]["dive_phase"] = dive_phases
(self.params["dives"]
.update(dict(dive_model=dive_model, smooth_par=smooth_par,
knot_factor=knot_factor,
descent_crit_q=descent_crit_q,
ascent_crit_q=ascent_crit_q)))
logger.info("Finished detecting dive phases")
def get_dives_details(self, key, columns=None):
"""Accessor for the `dives` attribute
Parameters
----------
key : {"row_ids", "model", "splines", "spline_derivs", crit_vals}
Name of the key to retrieve.
columns : array_like, optional
Names of the columns of the dataframe in `key`, when applicable.
"""
try:
okey = self.dives[key]
except KeyError:
msg = ("\'{}\' is not found.\nAvailable keys: {}"
.format(key, self.dives.keys()))
logger.error(msg)
raise KeyError(msg)
else:
if okey is None:
raise KeyError("\'{}\' not available.".format(key))
if columns:
try:
odata = okey[columns]
except KeyError:
msg = ("At least one of the requested columns does not "
"exist.\nAvailable columns: {}").format(okey.columns)
logger.error(msg)
raise KeyError(msg)
else:
odata = okey
return odata
def _get_wet_activity(self):
return self._wet_dry
wet_dry = property(_get_wet_activity)
"""Wet/dry activity labels
Returns
-------
pandas.DataFrame
DataFrame with columns: `phase_id` and `phase_label` for each
measurement.
"""
def get_phases_params(self, key):
"""Return parameters used for identifying wet/dry or diving phases.
Parameters
----------
key: {'wet_dry', 'dives'}
Returns
-------
out : dict
"""
try:
params = self.params[key]
except KeyError:
msg = "key must be one of: {}".format(self.params.keys())
logger.error(msg)
raise KeyError(msg)
return params
def _get_dive_spline_slot(self, diveNo, name):
"""Accessor for the R objects in `dives`["splines"]
Private method to retrieve elements easily. Elements can be
accessed individually as is, but some elements are handled
specially.
Parameters
----------
diveNo : int or float
Which dive number to retrieve spline details for.
name : str
Element to retrieve. {"data", "xy", "knots", "coefficients",
"order", "lambda.opt", "sigmasq", "degree", "g", "a", "b",
"variter"}
"""
# Safe to assume these are all scalars, based on the current
# default settings in diveMove's `.cutDive`
scalars = ["order", "lambda.opt", "sigmasq", "degree",
"g", "a", "b", "variter"]
idata = self.get_dives_details("splines")[diveNo]
if name == "data":
x = pd.TimedeltaIndex(np.array(idata[name][0]), unit="s")
odata = pd.Series(np.array(idata[name][1]), index=x)
elif name == "xy":
x = pd.TimedeltaIndex(np.array(idata["x"]), unit="s")
odata = pd.Series(np.array(idata["y"]), index=x)
elif name in scalars:
odata = np.float(idata[name][0])
else:
odata = np.array(idata[name])
return odata
def get_dive_deriv(self, diveNo, phase=None):
"""Retrieve depth spline derivative for a given dive
Parameters
----------
diveNo : int
Dive number to retrieve derivative for.
phase : {"descent", "bottom", "ascent"}
If provided, the dive phase to retrieve data for.
Returns
-------
out : pandas.DataFrame
"""
der = self.get_dives_details("spline_derivs").loc[diveNo]
crit_vals = self.get_dives_details("crit_vals").loc[diveNo]
spl_data = self.get_dives_details("splines")[diveNo]["data"]
spl_times = np.array(spl_data[0]) # x row is time steps in (s)
if phase == "descent":
descent_crit = int(crit_vals["descent_crit"])
deltat_crit = pd.Timedelta(spl_times[descent_crit], unit="s")
oder = der.loc[:deltat_crit]
elif phase == "bottom":
descent_crit = int(crit_vals["descent_crit"])
deltat1 = pd.Timedelta(spl_times[descent_crit], unit="s")
ascent_crit = int(crit_vals["ascent_crit"])
deltat2 = pd.Timedelta(spl_times[ascent_crit], unit="s")
oder = der[(der.index >= deltat1) & (der.index <= deltat2)]
elif phase == "ascent":
ascent_crit = int(crit_vals["ascent_crit"])
deltat_crit = pd.Timedelta(spl_times[ascent_crit], unit="s")
oder = der.loc[deltat_crit:]
elif phase is None:
oder = der
else:
msg = "`phase` must be 'descent', 'bottom' or 'ascent'"
logger.error(msg)
raise KeyError(msg)
return oder
def _get_dive_deriv_stats(self, diveNo):
"""Calculate stats for the depth derivative of a given dive
"""
desc = self.get_dive_deriv(diveNo, "descent")
bott = self.get_dive_deriv(diveNo, "bottom")
asc = self.get_dive_deriv(diveNo, "ascent")
# Rename DataFrame to match diveNo
desc_sts = (pd.DataFrame(desc.describe().iloc[1:]).transpose()
.add_prefix("descD_").rename({"y": diveNo}))
bott_sts = (pd.DataFrame(bott.describe().iloc[1:]).transpose()
.add_prefix("bottD_").rename({"y": diveNo}))
asc_sts = (pd.DataFrame(asc.describe().iloc[1:]).transpose()
.add_prefix("ascD_").rename({"y": diveNo}))
sts = pd.merge(desc_sts, bott_sts, left_index=True,
right_index=True)
sts = pd.merge(sts, asc_sts, left_index=True, right_index=True)
return sts
def time_budget(self, ignore_z=True, ignore_du=True):
"""Summary of wet/dry activities at the broadest time scale
Parameters
----------
ignore_z : bool, optional
Whether to ignore trivial aquatic periods.
ignore_du : bool, optional
Whether to ignore diving and underwater periods.
Returns
-------
out : pandas.DataFrame
DataFrame indexed by phase id, with categorical activity label
for each phase, and beginning and ending times.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd("TDRPhases")
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.time_budget(ignore_z=True,
... ignore_du=True) # doctest: +ELLIPSIS
beg phase_label end
phase_id
1 2002-01-05 ... L 2002-01-05 ...
...
"""
phase_lab = self.wet_dry["phase_label"]
idx_name = phase_lab.index.name
labels = phase_lab.reset_index()
if ignore_z:
labels = labels.mask(labels == "Z", "L")
if ignore_du:
labels = labels.mask((labels == "U") | (labels == "D"), "W")
grp_key = rle_key(labels["phase_label"]).rename("phase_id")
labels_grp = labels.groupby(grp_key)
begs = labels_grp.first().rename(columns={idx_name: "beg"})
ends = labels_grp.last()[idx_name].rename("end")
return pd.concat((begs, ends), axis=1)
def stamp_dives(self, ignore_z=True):
"""Identify the wet activity phase corresponding to each dive
Parameters
----------
ignore_z : bool, optional
Whether to ignore trivial aquatic periods.
Returns
-------
out : pandas.DataFrame
DataFrame indexed by dive ID, and three columns identifying
which phase thy are in, and the beginning and ending time
stamps.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd("TDRPhases")
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.stamp_dives(ignore_z=True) # doctest: +ELLIPSIS
phase_id beg end
dive_id
1 2 2002-01-05 ... 2002-01-06 ...
"""
phase_lab = self.wet_dry["phase_label"]
idx_name = phase_lab.index.name
# "U" and "D" considered as "W" here
phase_lab = phase_lab.mask(phase_lab.isin(["U", "D"]), "W")
if ignore_z:
phase_lab = phase_lab.mask(phase_lab == "Z", "L")
dive_ids = self.get_dives_details("row_ids", columns="dive_id")
grp_key = rle_key(phase_lab).rename("phase_id")
isdive = dive_ids > 0
merged = (pd.concat((grp_key, dive_ids, phase_lab), axis=1)
.loc[isdive, :].reset_index())
# Rest index to use in first() and last()
merged_grp = merged.groupby("phase_id")
dives_ll = []
for name, group in merged_grp:
dives_uniq = pd.Series(group["dive_id"].unique(),
name="dive_id")
beg = [group[idx_name].iloc[0]] * dives_uniq.size
end = [group[idx_name].iloc[-1]] * dives_uniq.size
dive_df = pd.DataFrame({'phase_id': [name] * dives_uniq.size,
'beg': beg,
'end': end}, index=dives_uniq)
dives_ll.append(dive_df)
dives_all = pd.concat(dives_ll)
return dives_all | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/tdrphases.py | 0.862988 | 0.258595 | tdrphases.py | pypi |
import logging
import pandas as pd
from skdiveMove.tdrsource import TDRSource
from skdiveMove.core import robjs, cv, pandas2ri, diveMove
from skdiveMove.helpers import _append_xr_attr
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
class ZOC(TDRSource):
"""Perform zero offset correction
See help(ZOC) for inherited attributes.
Attributes
----------
zoc_params
depth_zoc
zoc_method : str
Name of the ZOC method used.
zoc_filters : pandas.DataFrame
DataFrame with output filters for method="filter"
"""
def __init__(self, *args, **kwargs):
"""Initialize ZOC instance
Parameters
----------
*args : positional arguments
Passed to :meth:`TDRSource.__init__`
**kwargs : keyword arguments
Passed to :meth:`TDRSource.__init__`
"""
TDRSource.__init__(self, *args, **kwargs)
self.zoc_method = None
self._zoc_params = None
self._depth_zoc = None
self.zoc_filters = None
def __str__(self):
base = TDRSource.__str__(self)
meth, params = self.zoc_params
return (base +
("\n{0:<20} {1}\n{2:<20} {3}"
.format("ZOC method:", meth, "ZOC parameters:", params)))
def _offset_depth(self, offset=0):
"""Perform ZOC with "offset" method
Parameters
----------
offset : float, optional
Value to subtract from measured depth.
Notes
-----
More details in diveMove's ``calibrateDepth`` function.
"""
# Retrieve copy of depth from our own property
depth = self.depth
self.zoc_method = "offset"
self._zoc_params = dict(offset=offset)
depth_zoc = depth - offset
depth_zoc[depth_zoc < 0] = 0
_append_xr_attr(depth_zoc, "history", "ZOC")
self._depth_zoc = depth_zoc
def _filter_depth(self, k, probs, depth_bounds=None, na_rm=True):
"""Perform ZOC with "filter" method
Parameters
----------
k : array_like
probs : array_like
**kwargs : optional keyword arguments
For this method: ('depth_bounds' (defaults to range), 'na_rm'
(defaults to True)).
Notes
-----
More details in diveMove's ``calibrateDepth`` function.
"""
self.zoc_method = "filter"
# Retrieve copy of depth from our own property
depth = self.depth
depth_ser = depth.to_series()
self._zoc_params = dict(k=k, probs=probs, depth_bounds=depth_bounds,
na_rm=na_rm)
depthmtx = self._depth_filter_r(depth_ser, **self._zoc_params)
depth_zoc = depthmtx.pop("depth_adj")
depth_zoc[depth_zoc < 0] = 0
depth_zoc = depth_zoc.rename("depth").to_xarray()
depth_zoc.attrs = depth.attrs
_append_xr_attr(depth_zoc, "history", "ZOC")
self._depth_zoc = depth_zoc
self.zoc_filters = depthmtx
def zoc(self, method="filter", **kwargs):
"""Apply zero offset correction to depth measurements
Parameters
----------
method : {"filter", "offset"}
Name of method to use for zero offset correction.
**kwargs : optional keyword arguments
Passed to the chosen method (:meth:`offset_depth`,
:meth:`filter_depth`)
Notes
-----
More details in diveMove's ``calibrateDepth`` function.
Examples
--------
ZOC using the "offset" method
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> tdrX.zoc("offset", offset=3)
Using the "filter" method
>>> # Window lengths and probabilities
>>> DB = [-2, 5]
>>> K = [3, 5760]
>>> P = [0.5, 0.02]
>>> tdrX.zoc(k=K, probs=P, depth_bounds=DB)
Plot the filters that were applied
>>> tdrX.plot_zoc(ylim=[-1, 10]) # doctest: +ELLIPSIS
(<Figure ... with 3 Axes>, array([<AxesSubplot:...>,
<AxesSubplot:...>, <AxesSubplot:...>], dtype=object))
"""
if method == "offset":
offset = kwargs.pop("offset", 0)
self._offset_depth(offset)
elif method == "filter":
k = kwargs.pop("k") # must exist
P = kwargs.pop("probs") # must exist
# Default depth bounds equal measured depth range
DB = kwargs.pop("depth_bounds",
[self.depth.min(),
self.depth.max()])
# default as in `_depth_filter`
na_rm = kwargs.pop("na_rm", True)
self._filter_depth(k=k, probs=P, depth_bounds=DB, na_rm=na_rm)
else:
logger.warning("Method {} is not implemented"
.format(method))
logger.info("Finished ZOC")
def _depth_filter_r(self, depth, k, probs, depth_bounds, na_rm=True):
"""Filter method for zero offset correction via `diveMove`
Parameters
----------
depth : pandas.Series
k : array_like
probs : array_like
depth_bounds : array_like
na_rm : bool, optional
Returns
-------
out : pandas.DataFrame
Time-indexed DataFrame with a column for each filter applied, and a
column `depth_adj` for corrected depth.
"""
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
depthmtx = diveMove._depthFilter(depth,
pd.Series(k), pd.Series(probs),
pd.Series(depth_bounds),
na_rm)
colnames = ["k{0}_p{1}".format(k, p) for k, p in zip(k, probs)]
colnames.append("depth_adj")
return pd.DataFrame(depthmtx, index=depth.index, columns=colnames)
def _get_depth(self):
return self._depth_zoc
depth_zoc = property(_get_depth)
"""Depth array accessor
Returns
-------
xarray.DataArray
"""
def _get_params(self):
return (self.zoc_method, self._zoc_params)
zoc_params = property(_get_params)
"""Parameters used with method for zero-offset correction
Returns
-------
method : str
Method used for ZOC.
params : dict
Dictionary with parameters and values used for ZOC.
""" | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/zoc.py | 0.844088 | 0.329109 | zoc.py | pypi |
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
def _night(times, sunrise_time, sunset_time):
"""Construct Series with sunset and sunrise times for given dates
Parameters
----------
times : pandas.Series
(N,) array with depth measurements.
sunrise_time : str
sunset_time : str
Returns
-------
tuple
Two pandas.Series (sunsets, sunrises)
"""
tmin = times.min().strftime("%Y-%m-%d ")
tmax = times.max().strftime("%Y-%m-%d ")
sunsets = pd.date_range(start=tmin + sunset_time,
end=tmax + sunset_time,
freq="1D")
tmin1 = (times.min() + pd.Timedelta(1, unit="d")).strftime("%Y-%m-%d ")
tmax1 = (times.max() + pd.Timedelta(1, unit="d")).strftime("%Y-%m-%d ")
sunrises = pd.date_range(start=tmin1 + sunrise_time,
end=tmax1 + sunrise_time,
freq="1D")
return (sunsets, sunrises)
def _plot_dry_time(times_dataframe, ax):
"""Fill a vertical span between beginning/ending times in DataFrame
Parameters
----------
times_dataframe : pandas.DataFrame
ax: Axes object
"""
for idx, row in times_dataframe.iterrows():
ax.axvspan(row[0], row[1], ymin=0.99, facecolor="tan",
edgecolor=None, alpha=0.6)
def plot_tdr(depth, concur_vars=None, xlim=None, depth_lim=None,
xlab="time [dd-mmm hh:mm]", ylab_depth="depth [m]",
concur_var_titles=None, xlab_format="%d-%b %H:%M",
sunrise_time="06:00:00", sunset_time="18:00:00",
night_col="gray", dry_time=None, phase_cat=None,
key=True, **kwargs):
"""Plot time, depth, and other concurrent data
Parameters
----------
depth : pandas.Series
(N,) array with depth measurements.
concur_vars : pandas.Series or pandas.Dataframe
(N,) Series or dataframe with additional data to plot in subplot.
xlim : 2-tuple/list, optional
Minimum and maximum limits for ``x`` axis. Ignored when
``concur_vars=None``.
ylim : 2-tuple/list, optional
Minimum and maximum limits for ``y`` axis for data other than depth.
depth_lim : 2-tuple/list, optional
Minimum and maximum limits for depth to plot.
xlab : str, optional
Label for ``x`` axis.
ylab_depth : str, optional
Label for ``y`` axis for depth.
concur_var_titles : str or list, optional
String or list of strings with y-axis labels for `concur_vars`.
xlab_format : str, optional
Format string for formatting the x axis.
sunrise_time : str, optional
Time of sunrise, in 24 hr format. This is used for shading night
time.
sunset_time : str, optional
Time of sunset, in 24 hr format. This is used for shading night
time.
night_col : str, optional
Color for shading night time.
dry_time : pandas.DataFrame, optional
Two-column DataFrame with beginning and ending times corresponding
to periods considered to be dry.
phase_cat : pandas.Series, optional
Categorical series dividing rows into sections.
**kwargs : optional keyword arguments
Returns
-------
tuple
Pyplot Figure and Axes instances.
"""
sunsets, sunrises = _night(depth.index,
sunset_time=sunset_time,
sunrise_time=sunrise_time)
def _plot_phase_cat(ser, ax, legend=True):
"""Scatter plot and legend of series coloured by categories"""
cats = phase_cat.cat.categories
cat_codes = phase_cat.cat.codes
isna_ser = ser.isna()
ser_nona = ser.dropna()
scatter = ax.scatter(ser_nona.index, ser_nona, s=12, marker="o",
c=cat_codes[~isna_ser])
if legend:
handles, _ = scatter.legend_elements()
ax.legend(handles, cats, loc="lower right",
ncol=len(cat_codes))
if concur_vars is None:
fig, axs = plt.subplots(1, 1)
axs.set_ylabel(ylab_depth)
depth.plot(ax=axs, color="k", **kwargs)
axs.set_xlabel("")
axs.axhline(0, linestyle="--", linewidth=0.75, color="k")
for beg, end in zip(sunsets, sunrises):
axs.axvspan(beg, end, facecolor=night_col,
edgecolor=None, alpha=0.3)
if (phase_cat is not None):
_plot_phase_cat(depth, axs)
if (dry_time is not None):
_plot_dry_time(dry_time, axs)
if (xlim is not None):
axs.set_xlim(xlim)
if (depth_lim is not None):
axs.set_ylim(depth_lim)
axs.invert_yaxis()
else:
full_df = pd.concat((depth, concur_vars), axis=1)
nplots = full_df.shape[1]
depth_ser = full_df.iloc[:, 0]
concur_df = full_df.iloc[:, 1:]
fig, axs = plt.subplots(nplots, 1, sharex=True)
axs[0].set_ylabel(ylab_depth)
depth_ser.plot(ax=axs[0], color="k", **kwargs)
axs[0].set_xlabel("")
axs[0].axhline(0, linestyle="--", linewidth=0.75, color="k")
concur_df.plot(ax=axs[1:], subplots=True, legend=False, **kwargs)
for i, col in enumerate(concur_df.columns):
if (concur_var_titles is not None):
axs[i + 1].set_ylabel(concur_var_titles[i])
else:
axs[i + 1].set_ylabel(col)
axs[i + 1].axhline(0, linestyle="--",
linewidth=0.75, color="k")
if (xlim is not None):
axs[i + 1].set_xlim(xlim)
for i, ax in enumerate(axs):
for beg, end in zip(sunsets, sunrises):
ax.axvspan(beg, end, facecolor=night_col,
edgecolor=None, alpha=0.3)
if (dry_time is not None):
_plot_dry_time(dry_time, ax)
if (phase_cat is not None):
_plot_phase_cat(depth_ser, axs[0])
for i, col in enumerate(concur_df.columns):
_plot_phase_cat(concur_df.loc[:, col], axs[i + 1], False)
if (depth_lim is not None):
axs[0].set_ylim(depth_lim)
axs[0].invert_yaxis()
fig.tight_layout()
return (fig, axs)
def _plot_zoc_filters(depth, zoc_filters, xlim=None, ylim=None,
ylab="Depth [m]", **kwargs):
"""Plot zero offset correction filters
Parameters
----------
depth : pandas.Series
Measured depth time series, indexed by datetime.
zoc_filters : pandas.DataFrame
DataFrame with ZOC filters in columns. Must have the same number
of records as `depth`.
xlim : 2-tuple/list
ylim : 2-tuple/list
ylab : str
Label for `y` axis.
**kwargs : optional keyword arguments
Passed to `matplotlib.pyplot.subplots`. It can be any keyword,
except for `sharex` or `sharey`.
Returns
-------
tuple
Pyplot Figure and Axes instances.
"""
nfilters = zoc_filters.shape[1]
npanels = 3
lastflts = [1] # col idx of second filters
if nfilters > 2: # append col idx of last filter
lastflts.append(nfilters - 1)
fig, axs = plt.subplots(npanels, 1, sharex=True, sharey=True, **kwargs)
if xlim:
axs[0].set_xlim(xlim)
else:
depth_nona = depth.dropna()
axs[0].set_xlim((depth_nona.index.min(),
depth_nona.index.max()))
if ylim:
axs[0].set_ylim(ylim)
else:
axs[0].set_ylim((depth.min(), depth.max()))
for ax in axs:
ax.set_ylabel(ylab)
ax.invert_yaxis()
ax.axhline(0, linestyle="--", linewidth=0.75, color="k")
depth.plot(ax=axs[0], color="lightgray", label="input")
axs[0].legend(loc="lower left")
# Need to plot legend for input depth here
filter_names = zoc_filters.columns
(zoc_filters.iloc[:, 0]
.plot(ax=axs[1], label=filter_names[0])) # first filter
for i in lastflts:
zoc_filters.iloc[:, i].plot(ax=axs[1], label=filter_names[i])
axs[1].legend(loc="lower left")
# ZOC depth
depth_zoc = depth - zoc_filters.iloc[:, -1]
depth_zoc_label = ("input - {}"
.format(zoc_filters.columns[-1]))
(depth_zoc
.plot(ax=axs[2], color="k", rot=0, label=depth_zoc_label))
axs[2].legend(loc="lower left")
axs[2].set_xlabel("")
fig.tight_layout()
return (fig, axs)
def plot_dive_model(x, depth_s, depth_deriv, d_crit, a_crit,
d_crit_rate, a_crit_rate, leg_title=None, **kwargs):
"""Plot dive model
Parameters
----------
x : pandas.Series
Time-indexed depth measurements.
depth_s : pandas.Series
Time-indexed smoothed depth.
depth_deriv : pandas.Series
Time-indexed derivative of depth smoothing spline.
d_crit : int
Integer denoting the index where the descent ends in the observed
time series.
a_crit : int
Integer denoting the index where the ascent begins in the observed
time series.
d_crit_rate : float
Vertical rate of descent corresponding to the quantile used.
a_crit_rate :
Vertical rate of ascent corresponding to the quantile used.
leg_title : str, optional
Title for the plot legend (e.g. dive number being plotted).
**kwargs : optional keyword arguments
Passed to `matplotlib.pyplot.subplots`. It can be any keyword,
except `sharex`.
Returns
-------
tuple
Pyplot Figure and Axes instances.
Notes
-----
The function is homologous to diveMove's `plotDiveModel`.
"""
d_crit_time = x.index[d_crit]
a_crit_time = x.index[a_crit]
fig, axs = plt.subplots(2, 1, sharex=True, **kwargs)
ax1, ax2 = axs
ax1.invert_yaxis()
ax1.set_ylabel("Depth")
ax2.set_ylabel("First derivative")
ax1.plot(x, marker="o", linewidth=0.7, color="k", label="input")
ax1.plot(depth_s, "--", label="smooth")
ax1.plot(x.iloc[:d_crit + 1], color="C1", label="descent")
ax1.plot(x.iloc[a_crit:], color="C2", label="ascent")
ax1.legend(loc="upper center", title=leg_title, ncol=2)
ax2.plot(depth_deriv, linewidth=0.5, color="k") # derivative
dstyle = dict(marker=".", linestyle="None")
ax2.plot(depth_deriv[depth_deriv > d_crit_rate].loc[:d_crit_time],
color="C1", **dstyle) # descent
ax2.plot(depth_deriv[depth_deriv < a_crit_rate].loc[a_crit_time:],
color="C2", **dstyle) # ascent
qstyle = dict(linestyle="--", linewidth=0.5, color="k")
ax2.axhline(d_crit_rate, **qstyle)
ax2.axhline(a_crit_rate, **qstyle)
ax2.axvline(d_crit_time, **qstyle)
ax2.axvline(a_crit_time, **qstyle)
# Text annotation
qiter = zip(x.index[[0, 0]],
[d_crit_rate, a_crit_rate],
[r"descent $\hat{q}$", r"ascent $\hat{q}$"],
["bottom", "top"])
for xpos, qval, txt, valign in qiter:
ax2.text(xpos, qval, txt, va=valign)
titer = zip([d_crit_time, a_crit_time], [0, 0],
["descent", "ascent"],
["right", "left"])
for ttime, ypos, txt, halign in titer:
ax2.text(ttime, ypos, txt, ha=halign)
return (fig, (ax1, ax2))
if __name__ == '__main__':
from .tdr import get_diveMove_sample_data
tdrX = get_diveMove_sample_data()
print(tdrX) | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/plotting.py | 0.867092 | 0.571468 | plotting.py | pypi |
import pandas as pd
from skdiveMove.helpers import (get_var_sampling_interval,
_append_xr_attr, _load_dataset)
_SPEED_NAMES = ["velocity", "speed"]
class TDRSource:
"""Define TDR data source
Use xarray.Dataset to ensure pseudo-standard metadata
Attributes
----------
tdr_file : str
String indicating the file where the data comes from.
tdr : xarray.Dataset
Dataset with input data.
depth_name : str
Name of data variable with depth measurements.
time_name : str
Name of the time dimension in the dataset.
has_speed : bool
Whether input data include speed measurements.
speed_name : str
Name of data variable with the speed measurements.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> print(tdrX) # doctest: +ELLIPSIS
Time-Depth Recorder -- Class TDR object ...
"""
def __init__(self, dataset, depth_name="depth", time_name="timestamp",
subsample=None, has_speed=False, tdr_filename=None):
"""Set up attributes for TDRSource objects
Parameters
----------
dataset : xarray.Dataset
Dataset containing depth, and optionally other DataArrays.
depth_name : str, optional
Name of data variable with depth measurements.
time_name : str, optional
Name of the time dimension in the dataset.
subsample : str, optional
Subsample dataset at given frequency specification. See pandas
offset aliases.
has_speed : bool, optional
Weather data includes speed measurements. Column name must be
one of ["velocity", "speed"].
tdr_filename : str
Name of the file from which `dataset` originated.
"""
self.time_name = time_name
if subsample is not None:
self.tdr = (dataset.resample({time_name: subsample})
.interpolate("linear"))
for vname, da in self.tdr.data_vars.items():
da.attrs["sampling_rate"] = (1.0 /
pd.to_timedelta(subsample)
.seconds)
da.attrs["sampling_rate_units"] = "Hz"
_append_xr_attr(da, "history",
"Resampled to {}\n".format(subsample))
else:
self.tdr = dataset
self.depth_name = depth_name
speed_var = [x for x in list(self.tdr.data_vars.keys())
if x in _SPEED_NAMES]
if speed_var and has_speed:
self.has_speed = True
self.speed_name = speed_var[0]
else:
self.has_speed = False
self.speed_name = None
self.tdr_file = tdr_filename
@classmethod
def read_netcdf(cls, tdr_file, depth_name="depth", time_name="timestamp",
subsample=None, has_speed=False, **kwargs):
"""Instantiate object by loading Dataset from NetCDF file
Parameters
----------
tdr_file : str
As first argument for :func:`xarray.load_dataset`.
depth_name : str, optional
Name of data variable with depth measurements. Default: "depth".
time_name : str, optional
Name of the time dimension in the dataset.
subsample : str, optional
Subsample dataset at given frequency specification. See pandas
offset aliases.
has_speed : bool, optional
Weather data includes speed measurements. Column name must be
one of ["velocity", "speed"]. Default: False.
**kwargs : optional keyword arguments
Arguments passed to :func:`xarray.load_dataset`.
Returns
-------
obj : TDRSource, ZOC, TDRPhases, or TDR
Class matches the caller.
"""
dataset = _load_dataset(tdr_file, **kwargs)
return cls(dataset, depth_name=depth_name, time_name=time_name,
subsample=subsample, has_speed=has_speed,
tdr_filename=tdr_file)
def __str__(self):
x = self.tdr
depth_xr = x[self.depth_name]
depth_ser = depth_xr.to_series()
objcls = ("Time-Depth Recorder -- Class {} object\n"
.format(self.__class__.__name__))
src = "{0:<20} {1}\n".format("Source File", self.tdr_file)
itv = ("{0:<20} {1}\n"
.format("Sampling interval",
get_var_sampling_interval(depth_xr)))
nsamples = "{0:<20} {1}\n".format("Number of Samples",
depth_xr.shape[0])
beg = "{0:<20} {1}\n".format("Sampling Begins",
depth_ser.index[0])
end = "{0:<20} {1}\n".format("Sampling Ends",
depth_ser.index[-1])
dur = "{0:<20} {1}\n".format("Total duration",
depth_ser.index[-1] -
depth_ser.index[0])
drange = "{0:<20} [{1},{2}]\n".format("Measured depth range",
depth_ser.min(),
depth_ser.max())
others = "{0:<20} {1}\n".format("Other variables",
[x for x in list(x.keys())
if x != self.depth_name])
attr_list = "Attributes:\n"
for key, val in sorted(x.attrs.items()):
attr_list += "{0:>35}: {1}\n".format(key, val)
attr_list = attr_list.rstrip("\n")
return (objcls + src + itv + nsamples + beg + end + dur + drange +
others + attr_list)
def _get_depth(self):
return self.tdr[self.depth_name]
depth = property(_get_depth)
"""Return depth array
Returns
-------
xarray.DataArray
"""
def _get_speed(self):
return self.tdr[self.speed_name]
speed = property(_get_speed)
"""Return speed array
Returns
-------
xarray.DataArray
""" | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/tdrsource.py | 0.884083 | 0.545467 | tdrsource.py | pypi |
import json
__all__ = ["dump_config_template", "assign_xr_attrs"]
_SENSOR_DATA_CONFIG = {
'sampling': "regular",
'sampling_rate': "1",
'sampling_rate_units': "Hz",
'history': "",
'name': "",
'full_name': "",
'description': "",
'units': "",
'units_name': "",
'units_label': "",
'column_name': "",
'frame': "",
'axes': "",
'files': ""
}
_DATASET_CONFIG = {
'dep_id': "",
'dep_device_tzone': "",
'dep_device_regional_settings': "YYYY-mm-dd HH:MM:SS",
'dep_device_time_beg': "",
'deploy': {
'locality': "",
'lon': "",
'lat': "",
'device_time_on': "",
'method': ""
},
'project': {
'name': "",
'date_beg': "",
'date_end': ""
},
'provider': {
'name': "",
'affiliation': "",
'email': "",
'license': "",
'cite': "",
'doi': ""
},
'data': {
'source': "",
'format': "",
'creation_date': "",
'nfiles': ""
},
'device': {
'serial': "",
'make': "",
'type': "",
'model': "",
'url': ""
},
'sensors': {
'firmware': "",
'software': "",
'list': ""
},
'animal': {
'id': "",
'species_common': "",
'species_science': "",
'dbase_url': ""
}
}
def dump_config_template(fname, config_type):
"""Dump configuration file
Dump a json configuration template file to build metadata for a Dataset
or DataArray.
Parameters
----------
fname : str
A valid string path for output file.
config_type : {"dataset", "sensor"}
The type of config to dump.
Examples
--------
>>> import skdiveMove.metadata as metadata
>>> metadata.dump_config_template("mydataset.json",
... "dataset") # doctest: +SKIP
>>> metadata.dump_config_template("mysensor.json",
... "sensor") # doctest: +SKIP
edit the files to your specifications.
"""
with open(fname, "w") as ofile:
if config_type == "dataset":
json.dump(_DATASET_CONFIG, ofile, indent=2)
elif config_type == "sensor":
json.dump(_SENSOR_DATA_CONFIG, ofile, indent=2)
def assign_xr_attrs(obj, config_file):
"""Assign attributes to xarray.Dataset or xarray.DataArray
The `config_file` should have only one-level of nesting.
Parameters
----------
obj : {xarray.Dataset, xarray.DataArray}
Object to assign attributes to.
config_file : str
A valid string path for input json file with metadata attributes.
Returns
-------
out : {xarray.Dataset, xarray.DataArray}
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> import xarray as xr
>>> import skdiveMove.metadata as metadata
Synthetic dataset with depth and speed
>>> nsamples = 60 * 60 * 24
>>> times = pd.date_range("2000-01-01", freq="1s", periods=nsamples,
... name="time")
>>> cycles = np.sin(2 * np.pi * np.arange(nsamples) / (60 * 20))
>>> ds = xr.Dataset({"depth": (("time"), 1 + cycles),
... "speed": (("time"), 3 + cycles)},
... {"time": times})
Dump dataset and sensor templates
>>> metadata.dump_config_template("mydataset.json",
... "dataset") # doctest: +SKIP
>>> metadata.dump_config_template("P_sensor.json",
... "sensor") # doctest: +SKIP
>>> metadata.dump_config_template("S_sensor.json",
... "sensor") # doctest: +SKIP
Edit the templates as appropriate, load and assign to objects
>>> assign_xr_attrs(ds, "mydataset.json") # doctest: +SKIP
>>> assign_xr_attrs(ds.depth, "P_sensor.json") # doctest: +SKIP
>>> assign_xr_attrs(ds.speed, "S_sensor.json") # doctest: +SKIP
"""
with open(config_file) as ifile:
config = json.load(ifile)
# Parse the dict
for key, val in config.items():
top_kname = "{}".format(key)
if not val:
continue
if type(val) is dict:
for key_n, val_n in val.items():
if not val_n:
continue
lower_kname = "{0}_{1}".format(top_kname, key_n)
obj.attrs[lower_kname] = val_n
else:
obj.attrs[top_kname] = val | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/metadata.py | 0.586049 | 0.224331 | metadata.py | pypi |
import numpy as np
import pandas as pd
import xarray as xr
from skdiveMove.core import robjs, cv, pandas2ri, diveMove
__all__ = ["_load_dataset", "_get_dive_indices", "_append_xr_attr",
"get_var_sampling_interval", "_cut_dive",
"_one_dive_stats", "_speed_stats", "rle_key"]
def _load_dataset(filename_or_obj, **kwargs):
"""Private function to load Dataset object from file name or object
Parameters
----------
filename_or_obj : str, Path or xarray.backends.*DataStore
String indicating the file where the data comes from.
**kwargs :
Arguments passed to `xarray.load_dataset`.
Returns
-------
dataset : Dataset
The output Dataset.
"""
return xr.load_dataset(filename_or_obj, **kwargs)
def _get_dive_indices(indices, diveNo):
"""Mapping to diveMove's `.diveIndices`"""
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
# Subtract 1 for zero-based python
idx_ok = diveMove._diveIndices(indices, diveNo) - 1
return idx_ok
def _append_xr_attr(x, attr, val):
"""Append to attribute to xarray.DataArray or xarray.Dataset
If attribute does not exist, create it. Attribute is assumed to be a
string.
Parameters
----------
x : xarray.DataArray or xarray.Dataset
attr : str
Attribute name to update or add
val : str
Attribute value
"""
if attr in x.attrs:
x.attrs[attr] += "{}".format(val)
else:
x.attrs[attr] = "{}".format(val)
def get_var_sampling_interval(x):
"""Retrieve sampling interval from DataArray attributes
Parameters
----------
x : xarray.DataArray
Returns
-------
pandas.Timedelta
"""
attrs = x.attrs
sampling_rate = attrs["sampling_rate"]
sampling_rate_units = attrs["sampling_rate_units"]
if sampling_rate_units.lower() == "hz":
sampling_rate = 1 / sampling_rate
sampling_rate_units = "s"
intvl = pd.Timedelta("{}{}"
.format(sampling_rate, sampling_rate_units))
return intvl
def _cut_dive(x, dive_model, smooth_par, knot_factor,
descent_crit_q, ascent_crit_q):
"""Private function to retrieve results from `diveModel` object in R
Parameters
----------
x : pandas.DataFrame
Subset with a single dive's data, with first column expected to be
dive ID.
dive_model : str
smooth_par : float
knot_factor : int
descent_crit_q : float
ascent_crit_q : float
Notes
-----
See details for arguments in diveMove's ``calibrateDepth``. This
function maps to ``diveMove:::.cutDive``, and only sets some of the
parameters from the `R` function.
Returns
-------
out : dict
Dictionary with the following keys and corresponding component:
{'label_matrix', 'dive_spline', 'spline_deriv', 'descent_crit',
'ascent_crit', 'descent_crit_rate', 'ascent_crit_rate'}
"""
xx = x.iloc[:, 1:]
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
dmodel = diveMove._cutDive(cv.py2rpy(xx), dive_model=dive_model,
smooth_par=smooth_par,
knot_factor=knot_factor,
descent_crit_q=descent_crit_q,
ascent_crit_q=ascent_crit_q)
dmodel_slots = ["label.matrix", "dive.spline", "spline.deriv",
"descent.crit", "ascent.crit",
"descent.crit.rate", "ascent.crit.rate"]
lmtx = (np.array(robjs.r.slot(dmodel, dmodel_slots[0]))
.reshape((xx.shape[0], 2), order="F"))
spl = robjs.r.slot(dmodel, dmodel_slots[1])
spl_der = robjs.r.slot(dmodel, dmodel_slots[2])
spl_der = np.column_stack((spl_der[0], spl_der[1]))
desc_crit = robjs.r.slot(dmodel, dmodel_slots[3])[0]
asc_crit = robjs.r.slot(dmodel, dmodel_slots[4])[0]
desc_crit_r = robjs.r.slot(dmodel, dmodel_slots[5])[0]
asc_crit_r = robjs.r.slot(dmodel, dmodel_slots[6])[0]
# Replace dots with underscore for the output
dmodel_slots = [x.replace(".", "_") for x in dmodel_slots]
res = dict(zip(dmodel_slots,
[lmtx, spl, spl_der, desc_crit, asc_crit,
desc_crit_r, asc_crit_r]))
return res
def _one_dive_stats(x, interval, has_speed=False):
"""Calculate dive statistics for a single dive's DataFrame
Parameters
----------
x : pandas.DataFrame
First column expected to be dive ID, the rest as in `diveMove`.
interval : float
has_speed : bool
Returns
-------
out : pandas.DataFrame
"""
xx = x.iloc[:, 1:]
onames_speed = ["begdesc", "enddesc", "begasc", "desctim", "botttim",
"asctim", "divetim", "descdist", "bottdist", "ascdist",
"bottdep_mean", "bottdep_median", "bottdep_sd",
"maxdep", "desc_tdist", "desc_mean_speed",
"desc_angle", "bott_tdist", "bott_mean_speed",
"asc_tdist", "asc_mean_speed", "asc_angle"]
onames_nospeed = onames_speed[:14]
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
res = diveMove.oneDiveStats(xx, interval, has_speed)
if has_speed:
onames = onames_speed
else:
onames = onames_nospeed
res_df = pd.DataFrame(res, columns=onames)
for tcol in range(3):
# This is per POSIXct convention in R
res_df.iloc[:, tcol] = pd.to_datetime(res_df.iloc[:, tcol],
unit="s")
return res_df
def _speed_stats(x, vdist=None):
"""Calculate total travel distance, mean speed, and angle from speed
Dive stats for a single segment of a dive.
Parameters
----------
x : pandas.Series
Series with speed measurements.
vdist : float, optional
Vertical distance corresponding to `x`.
Returns
-------
out :
"""
kwargs = dict(x=x)
if vdist is not None:
kwargs.update(vdist=vdist)
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
res = diveMove._speedStats(**kwargs)
return res
def rle_key(x):
"""Emulate a run length encoder
Assigns a numerical sequence identifying run lengths in input Series.
Parameters
----------
x : pandas.Series
Series with data to encode.
Returns
-------
out : pandas.Series
Examples
--------
>>> N = 18
>>> color = np.repeat(list("ABCABC"), 3)
>>> ss = pd.Series(color,
... index=pd.date_range("2020-01-01", periods=N,
... freq="10s", tz="UTC"),
... dtype="category")
>>> rle_key(ss)
2020-01-01 00:00:00+00:00 1
2020-01-01 00:00:10+00:00 1
2020-01-01 00:00:20+00:00 1
2020-01-01 00:00:30+00:00 2
2020-01-01 00:00:40+00:00 2
2020-01-01 00:00:50+00:00 2
2020-01-01 00:01:00+00:00 3
2020-01-01 00:01:10+00:00 3
2020-01-01 00:01:20+00:00 3
2020-01-01 00:01:30+00:00 4
2020-01-01 00:01:40+00:00 4
2020-01-01 00:01:50+00:00 4
2020-01-01 00:02:00+00:00 5
2020-01-01 00:02:10+00:00 5
2020-01-01 00:02:20+00:00 5
2020-01-01 00:02:30+00:00 6
2020-01-01 00:02:40+00:00 6
2020-01-01 00:02:50+00:00 6
Freq: 10S, dtype: int64
"""
xout = x.ne(x.shift()).cumsum()
return xout
if __name__ == '__main__':
N = 18
color = np.repeat(list("ABCABC"), 3)
ss = pd.Series(color,
index=pd.date_range("2020-01-01", periods=N,
freq="10s", tz="UTC"),
dtype="category")
xx = pd.Series(np.random.standard_normal(10))
rle_key(xx > 0) | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/helpers.py | 0.874118 | 0.423547 | helpers.py | pypi |
import logging
import numpy as np
import pandas as pd
from skdiveMove.tdrphases import TDRPhases
import skdiveMove.plotting as plotting
import skdiveMove.calibspeed as speedcal
from skdiveMove.helpers import (get_var_sampling_interval,
_get_dive_indices, _append_xr_attr,
_one_dive_stats, _speed_stats)
import skdiveMove.calibconfig as calibconfig
import xarray as xr
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
# Keep attributes in xarray operations
xr.set_options(keep_attrs=True)
class TDR(TDRPhases):
"""Base class encapsulating TDR objects and processing
TDR subclasses `TDRPhases` to provide comprehensive TDR processing
capabilities.
See help(TDR) for inherited attributes.
Attributes
----------
speed_calib_fit : quantreg model fit
Model object fit by quantile regression for speed calibration.
Examples
--------
Construct an instance from diveMove example dataset
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
Plot the `TDR` object
>>> tdrX.plot() # doctest: +ELLIPSIS
(<Figure ... 1 Axes>, <AxesSubplot:...>)
"""
def __init__(self, *args, **kwargs):
"""Set up attributes for TDR objects
Parameters
----------
*args : positional arguments
Passed to :meth:`TDRPhases.__init__`
**kwargs : keyword arguments
Passed to :meth:`TDRPhases.__init__`
"""
TDRPhases.__init__(self, *args, **kwargs)
# Speed calibration fit
self.speed_calib_fit = None
def __str__(self):
base = TDRPhases.__str__(self)
speed_fmt_pref = "Speed calibration coefficients:"
if self.speed_calib_fit is not None:
speed_ccoef_a, speed_ccoef_b = self.speed_calib_fit.params
speed_coefs_fmt = ("\n{0:<20} (a={1:.4f}, b={2:.4f})"
.format(speed_fmt_pref,
speed_ccoef_a, speed_ccoef_b))
else:
speed_ccoef_a, speed_ccoef_b = (None, None)
speed_coefs_fmt = ("\n{0:<20} (a=None, b=None)"
.format(speed_fmt_pref))
return base + speed_coefs_fmt
def calibrate_speed(self, tau=0.1, contour_level=0.1, z=0, bad=[0, 0],
**kwargs):
"""Calibrate speed measurements
Set the `speed_calib_fit` attribute
Parameters
----------
tau : float, optional
Quantile on which to regress speed on rate of depth change.
contour_level : float, optional
The mesh obtained from the bivariate kernel density estimation
corresponding to this contour will be used for the quantile
regression to define the calibration line.
z : float, optional
Only changes in depth larger than this value will be used for
calibration.
bad : array_like, optional
Two-element `array_like` indicating that only rates of depth
change and speed greater than the given value should be used
for calibration, respectively.
**kwargs : optional keyword arguments
Passed to :func:`~speedcal.calibrate_speed`
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.calibrate_speed(z=2)
"""
depth = self.get_depth("zoc").to_series()
ddiffs = depth.reset_index().diff().set_index(depth.index)
ddepth = ddiffs["depth"].abs()
rddepth = ddepth / ddiffs[depth.index.name].dt.total_seconds()
curspeed = self.get_speed("measured").to_series()
ok = (ddepth > z) & (rddepth > bad[0]) & (curspeed > bad[1])
rddepth = rddepth[ok]
curspeed = curspeed[ok]
kde_data = pd.concat((rddepth.rename("depth_rate"),
curspeed), axis=1)
qfit, ax = speedcal.calibrate_speed(kde_data, tau=tau,
contour_level=contour_level,
z=z, bad=bad, **kwargs)
self.speed_calib_fit = qfit
logger.info("Finished calibrating speed")
def dive_stats(self, depth_deriv=True):
"""Calculate dive statistics in `TDR` records
Parameters
----------
depth_deriv : bool, optional
Whether to compute depth derivative statistics.
Returns
-------
pandas.DataFrame
Notes
-----
This method homologous to diveMove's `diveStats` function.
Examples
--------
ZOC using the "filter" method
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> # Window lengths and probabilities
>>> DB = [-2, 5]
>>> K = [3, 5760]
>>> P = [0.5, 0.02]
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.dive_stats() # doctest: +ELLIPSIS
begdesc ... postdive_mean_speed
1 2002-01-05 ... 1.398859
2 ...
"""
phases_df = self.get_dives_details("row_ids")
idx_name = phases_df.index.name
# calib_speed=False if no fit object
if self.has_speed:
tdr = (self.get_tdr(calib_depth=True,
calib_speed=bool(self.speed_calib_fit))
[[self.depth_name, self.speed_name]])
else:
tdr = (self.get_tdr(calib_depth=True,
calib_speed=bool(self.speed_calib_fit))
[[self.depth_name]])
intvl = (get_var_sampling_interval(tdr[self.depth_name])
.total_seconds())
tdr = tdr.to_dataframe()
dive_ids = phases_df.loc[:, "dive_id"]
postdive_ids = phases_df.loc[:, "postdive_id"]
ok = (dive_ids > 0) & dive_ids.isin(postdive_ids)
okpd = (postdive_ids > 0) & postdive_ids.isin(dive_ids)
postdive_ids = postdive_ids[okpd]
postdive_dur = (postdive_ids.reset_index()
.groupby("postdive_id")
.apply(lambda x: x.iloc[-1] - x.iloc[0]))
# Enforce UTC, as otherwise rpy2 uses our locale in the output of
# OneDiveStats
tdrf = (pd.concat((phases_df[["dive_id", "dive_phase"]][ok],
tdr.loc[ok.index[ok]]), axis=1)
.tz_localize("UTC").reset_index())
# Ugly hack to re-order columns for `diveMove` convention
names0 = ["dive_id", "dive_phase", idx_name, self.depth_name]
colnames = tdrf.columns.to_list()
if self.has_speed:
names0.append(self.speed_name)
colnames = names0 + list(set(colnames) - set(names0))
tdrf = tdrf.reindex(columns=colnames)
tdrf_grp = tdrf.groupby("dive_id")
ones_list = []
for name, grp in tdrf_grp:
res = _one_dive_stats(grp.loc[:, names0], interval=intvl,
has_speed=self.has_speed)
# Rename to match dive number
res = res.rename({0: name})
if depth_deriv:
deriv_stats = self._get_dive_deriv_stats(name)
res = pd.concat((res, deriv_stats), axis=1)
ones_list.append(res)
ones_df = pd.concat(ones_list, ignore_index=True)
ones_df.set_index(dive_ids[ok].unique(), inplace=True)
ones_df.index.rename("dive_id", inplace=True)
ones_df["postdive_dur"] = postdive_dur[idx_name]
# For postdive total distance and mean speed (if available)
if self.has_speed:
speed_postd = (tdr[self.speed_name][okpd]
.groupby(postdive_ids))
pd_speed_ll = []
for name, grp in speed_postd:
res = _speed_stats(grp.reset_index())
onames = ["postdive_tdist", "postdive_mean_speed"]
res_df = pd.DataFrame(res[:, :-1], columns=onames,
index=[name])
pd_speed_ll.append(res_df)
pd_speed_stats = pd.concat(pd_speed_ll)
ones_df = pd.concat((ones_df, pd_speed_stats), axis=1)
return ones_df
def plot(self, concur_vars=None, concur_var_titles=None, **kwargs):
"""Plot TDR object
Parameters
----------
concur_vars : str or list, optional
String or list of strings with names of columns in input to
select additional data to plot.
concur_var_titles : str or list, optional
String or list of strings with y-axis labels for `concur_vars`.
**kwargs : optional keyword arguments
Arguments passed to plotting function.
Returns
-------
tuple
:class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes` instances.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> tdrX.plot(xlim=["2002-01-05 21:00:00", "2002-01-06 04:10:00"],
... depth_lim=[95, -1]) # doctest: +ELLIPSIS
(<Figure ... with 1 Axes>, <AxesSubplot:...'>)
"""
try:
depth = self.get_depth("zoc")
except LookupError:
depth = self.get_depth("measured")
if "ylab_depth" not in kwargs:
ylab_depth = ("{0} [{1}]"
.format(depth.attrs["full_name"],
depth.attrs["units"]))
kwargs.update(ylab_depth=ylab_depth)
depth = depth.to_series()
if concur_vars is None:
fig, ax = plotting.plot_tdr(depth, **kwargs)
elif concur_var_titles is None:
ccvars = self.tdr[concur_vars].to_dataframe()
fig, ax = plotting.plot_tdr(depth, concur_vars=ccvars, **kwargs)
else:
ccvars = self.tdr[concur_vars].to_dataframe()
ccvars_title = concur_var_titles # just to shorten
fig, ax = plotting.plot_tdr(depth,
concur_vars=ccvars,
concur_var_titles=ccvars_title,
**kwargs)
return (fig, ax)
def plot_zoc(self, xlim=None, ylim=None, **kwargs):
"""Plot zero offset correction filters
Parameters
----------
xlim, ylim : 2-tuple/list, optional
Minimum and maximum limits for ``x``- and ``y``-axis,
respectively.
**kwargs : optional keyword arguments
Passed to :func:`~matplotlib.pyplot.subplots`.
Returns
-------
tuple
:class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes` instances.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> # Window lengths and probabilities
>>> DB = [-2, 5]
>>> K = [3, 5760]
>>> P = [0.5, 0.02]
>>> tdrX.zoc("filter", k=K, probs=P, depth_bounds=DB)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.plot_zoc() # doctest: +ELLIPSIS
(<Figure ... with 3 Axes>, array([<AxesSubplot:...'>,
<AxesSubplot:...'>, <AxesSubplot:...>], dtype=object))
"""
zoc_method = self.zoc_method
depth_msrd = self.get_depth("measured")
ylab = ("{0} [{1}]"
.format(depth_msrd.attrs["full_name"],
depth_msrd.attrs["units"]))
if zoc_method == "filter":
zoc_filters = self.zoc_filters
depth = depth_msrd.to_series()
if "ylab" not in kwargs:
kwargs.update(ylab=ylab)
fig, ax = (plotting
._plot_zoc_filters(depth, zoc_filters, xlim, ylim,
**kwargs))
elif zoc_method == "offset":
depth_msrd = depth_msrd.to_series()
depth_zoc = self.get_depth("zoc").to_series()
fig, ax = plotting.plt.subplots(1, 1, **kwargs)
ax = depth_msrd.plot(ax=ax, rot=0, label="measured")
depth_zoc.plot(ax=ax, label="zoc")
ax.axhline(0, linestyle="--", linewidth=0.75, color="k")
ax.set_xlabel("")
ax.set_ylabel(ylab)
ax.legend(loc="lower right")
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.invert_yaxis()
return (fig, ax)
def plot_phases(self, diveNo=None, concur_vars=None,
concur_var_titles=None, surface=False, **kwargs):
"""Plot major phases found on the object
Parameters
----------
diveNo : array_like, optional
List of dive numbers (1-based) to plot.
concur_vars : str or list, optional
String or list of strings with names of columns in input to
select additional data to plot.
concur_var_titles : str or list, optional
String or list of strings with y-axis labels for `concur_vars`.
surface : bool, optional
Whether to plot surface readings.
**kwargs : optional keyword arguments
Arguments passed to plotting function.
Returns
-------
tuple
:class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes` instances.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.plot_phases(list(range(250, 300)),
... surface=True) # doctest: +ELLIPSIS
(<Figure ... with 1 Axes>, <AxesSubplot:...>)
"""
row_ids = self.get_dives_details("row_ids")
dive_ids = row_ids["dive_id"]
dive_ids_uniq = dive_ids.unique()
postdive_ids = row_ids["postdive_id"]
if diveNo is None:
diveNo = np.arange(1, row_ids["dive_id"].max() + 1).tolist()
else:
diveNo = [x for x in sorted(diveNo) if x in dive_ids_uniq]
depth_all = self.get_depth("zoc").to_dataframe() # DataFrame
if concur_vars is None:
dives_all = depth_all
else:
concur_df = self.tdr.to_dataframe().loc[:, concur_vars]
dives_all = pd.concat((depth_all, concur_df), axis=1)
isin_dive_ids = dive_ids.isin(diveNo)
isin_postdive_ids = postdive_ids.isin(diveNo)
if surface:
isin = isin_dive_ids | isin_postdive_ids
dives_in = dives_all[isin]
sfce0_idx = (postdive_ids[postdive_ids == diveNo[0] - 1]
.last_valid_index())
dives_df = pd.concat((dives_all.loc[[sfce0_idx]], dives_in),
axis=0)
details_df = pd.concat((row_ids.loc[[sfce0_idx]], row_ids[isin]),
axis=0)
else:
idx_ok = _get_dive_indices(dive_ids, diveNo)
dives_df = dives_all.iloc[idx_ok, :]
details_df = row_ids.iloc[idx_ok, :]
wet_dry = self.time_budget(ignore_z=True, ignore_du=True)
drys = wet_dry[wet_dry["phase_label"] == "L"][["beg", "end"]]
if (drys.shape[0] > 0):
dry_time = drys
else:
dry_time = None
if concur_vars is None:
fig, ax = (plotting
.plot_tdr(dives_df.iloc[:, 0],
phase_cat=details_df["dive_phase"],
dry_time=dry_time, **kwargs))
else:
fig, ax = (plotting
.plot_tdr(dives_df.iloc[:, 0],
concur_vars=dives_df.iloc[:, 1:],
concur_var_titles=concur_var_titles,
phase_cat=details_df["dive_phase"],
dry_time=dry_time, **kwargs))
return (fig, ax)
def plot_dive_model(self, diveNo=None, **kwargs):
"""Plot dive model for selected dive
Parameters
----------
diveNo : array_like, optional
List of dive numbers (1-based) to plot.
**kwargs : optional keyword arguments
Arguments passed to plotting function.
Returns
-------
tuple
:class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes` instances.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.plot_dive_model(diveNo=20,
... figsize=(10, 10)) # doctest: +ELLIPSIS
(<Figure ... with 2 Axes>, (<AxesSubplot:...>, <AxesSubplot:...>))
"""
dive_ids = self.get_dives_details("row_ids", "dive_id")
crit_vals = self.get_dives_details("crit_vals").loc[diveNo]
idxs = _get_dive_indices(dive_ids, diveNo)
depth = self.get_depth("zoc").to_dataframe().iloc[idxs]
depth_s = self._get_dive_spline_slot(diveNo, "xy")
depth_deriv = (self.get_dives_details("spline_derivs").loc[diveNo])
# Index with time stamp
if depth.shape[0] < 4:
depth_s_idx = pd.date_range(depth.index[0], depth.index[-1],
periods=depth_s.shape[0],
tz=depth.index.tz)
depth_s = pd.Series(depth_s.to_numpy(), index=depth_s_idx)
dderiv_idx = pd.date_range(depth.index[0], depth.index[-1],
periods=depth_deriv.shape[0],
tz=depth.index.tz)
# Extract only the series and index with time stamp
depth_deriv = pd.Series(depth_deriv["y"].to_numpy(),
index=dderiv_idx)
else:
depth_s = pd.Series(depth_s.to_numpy(),
index=depth.index[0] + depth_s.index)
# Extract only the series and index with time stamp
depth_deriv = pd.Series(depth_deriv["y"].to_numpy(),
index=depth.index[0] + depth_deriv.index)
# Force integer again as `loc` coerced to float above
d_crit = crit_vals["descent_crit"].astype(int)
a_crit = crit_vals["ascent_crit"].astype(int)
d_crit_rate = crit_vals["descent_crit_rate"]
a_crit_rate = crit_vals["ascent_crit_rate"]
title = "Dive: {:d}".format(diveNo)
fig, axs = plotting.plot_dive_model(depth, depth_s=depth_s,
depth_deriv=depth_deriv,
d_crit=d_crit, a_crit=a_crit,
d_crit_rate=d_crit_rate,
a_crit_rate=a_crit_rate,
leg_title=title, **kwargs)
return (fig, axs)
def get_depth(self, kind="measured"):
"""Retrieve depth records
Parameters
----------
kind : {"measured", "zoc"}
Which depth to retrieve.
Returns
-------
xarray.DataArray
"""
kinds = ["measured", "zoc"]
if kind == kinds[0]:
odepth = self.depth
elif kind == kinds[1]:
odepth = self.depth_zoc
if odepth is None:
msg = "ZOC depth not available."
logger.error(msg)
raise LookupError(msg)
else:
msg = "kind must be one of: {}".format(kinds)
logger.error(msg)
raise LookupError(msg)
return odepth
def get_speed(self, kind="measured"):
"""Retrieve speed records
Parameters
----------
kind : {"measured", "calibrated"}
Which speed to retrieve.
Returns
-------
xarray.DataArray
"""
kinds = ["measured", "calibrated"]
ispeed = self.speed
if kind == kinds[0]:
ospeed = ispeed
elif kind == kinds[1]:
qfit = self.speed_calib_fit
if qfit is None:
msg = "Calibrated speed not available."
logger.error(msg)
raise LookupError(msg)
else:
coefs = qfit.params
coef_a = coefs[0]
coef_b = coefs[1]
ospeed = (ispeed - coef_a) / coef_b
_append_xr_attr(ospeed, "history", "speed_calib_fit")
else:
msg = "kind must be one of: {}".format(kinds)
logger.error(msg)
raise LookupError(msg)
return ospeed
def get_tdr(self, calib_depth=True, calib_speed=True):
"""Return a copy of tdr Dataset
Parameters
----------
calib_depth : bool, optional
Whether to return calibrated depth measurements.
calib_speed : bool, optional
Whether to return calibrated speed measurements.
Returns
-------
xarray.Dataset
"""
tdr = self.tdr.copy()
if calib_depth:
depth_name = self.depth_name
depth_cal = self.get_depth("zoc")
tdr[depth_name] = depth_cal
if self.has_speed and calib_speed:
speed_name = self.speed_name
speed_cal = self.get_speed("calibrated")
tdr[speed_name] = speed_cal
return tdr
def extract_dives(self, diveNo, **kwargs):
"""Extract TDR data corresponding to a particular set of dives
Parameters
----------
diveNo : array_like, optional
List of dive numbers (1-based) to plot.
**kwargs : optional keyword arguments
Passed to :meth:`get_tdr`
Returns
-------
xarray.Dataset
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd(has_speed=False)
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.extract_dives(diveNo=20) # doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: ...
"""
dive_ids = self.get_dives_details("row_ids", "dive_id")
idx_name = dive_ids.index.name
idxs = _get_dive_indices(dive_ids, diveNo)
tdr = self.get_tdr(**kwargs)
tdr_i = tdr[{idx_name: idxs.astype(int)}]
return tdr_i
def calibrate(tdr_file, config_file=None):
"""Perform all major TDR calibration operations
Detect periods of major activities in a `TDR` object, calibrate depth
readings, and speed if appropriate, in preparation for subsequent
summaries of diving behaviour.
This function is a convenience wrapper around :meth:`~TDR.detect_wet`,
:meth:`~TDR.detect_dives`, :meth:`~TDR.detect_dive_phases`,
:meth:`~TDR.zoc`, and :meth:`~TDR.calibrate_speed`. It performs
wet/dry phase detection, zero-offset correction of depth, detection of
dives, as well as proper labelling of the latter, and calibrates speed
data if appropriate.
Due to the complexity of this procedure, and the number of settings
required for it, a calibration configuration file (JSON) is used to
guide the operations.
Parameters
----------
tdr_file : str, Path or xarray.backends.*DataStore
As first argument for :func:`xarray.load_dataset`.
config_file : str
A valid string path for TDR calibration configuration file.
Returns
-------
out : TDR
See Also
--------
dump_config_template : configuration template
"""
if config_file is None:
config = calibconfig._DEFAULT_CONFIG
else:
config = calibconfig.read_config(config_file)
logger = logging.getLogger(__name__)
logger.setLevel(config["log_level"])
load_dataset_kwargs = config["read"].pop("load_dataset_kwargs")
logger.info("Reading config: {}, {}"
.format(config["read"], load_dataset_kwargs))
tdr = TDR.read_netcdf(tdr_file, **config["read"], **load_dataset_kwargs)
do_zoc = config["zoc"].pop("required")
if do_zoc:
logger.info("ZOC config: {}".format(config["zoc"]))
tdr.zoc(config["zoc"]["method"], **config["zoc"]["parameters"])
logger.info("Wet/Dry config: {}".format(config["wet_dry"]))
tdr.detect_wet(**config["wet_dry"])
logger.info("Dives config: {}".format(config["dives"]))
tdr.detect_dives(config["dives"].pop("dive_thr"))
tdr.detect_dive_phases(**config["dives"])
do_speed_calib = bool(config["speed_calib"].pop("required"))
if tdr.has_speed and do_speed_calib:
logger.info("Speed calibration config: {}"
.format(config["speed_calib"]))
tdr.calibrate_speed(**config["speed_calib"], plot=False)
return tdr
if __name__ == '__main__':
# Set up info level logging
logging.basicConfig(level=logging.INFO)
ifile = r"tests/data/ag_mk7_2002_022.nc"
tdrX = TDR.read_netcdf(ifile, has_speed=True)
# tdrX = TDRSource(ifile, has_speed=True)
# print(tdrX) | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/tdr.py | 0.884583 | 0.378947 | tdr.py | pypi |
import numpy as np
from scipy.optimize import curve_fit
# Mapping of error type with corresponding tau and slope
_ERROR_DEFS = {"Q": [np.sqrt(3), -1], "ARW": [1.0, -0.5],
"BI": [np.nan, 0], "RRW": [3.0, 0.5],
"RR": [np.sqrt(2), 1]}
def _armav_nls_fun(x, *args):
coefs = np.array(args).reshape(len(args), 1)
return np.log10(np.dot(x, coefs ** 2)).flatten()
def _armav(taus, adevs):
nsize = taus.size
# Linear regressor matrix
x0 = np.sqrt(np.column_stack([3 / (taus ** 2), 1 / taus,
np.ones(nsize), taus / 3,
taus ** 2 / 2]))
# Ridge regression bias constant
lambda0 = 5e-3
id0 = np.eye(5)
sigma0 = np.linalg.solve((np.dot(x0.T, x0) + lambda0 * id0),
np.dot(x0.T, adevs))
# TODO: need to be able to set bounds
popt, pcov = curve_fit(_armav_nls_fun, x0 ** 2,
np.log10(adevs ** 2), p0=sigma0)
# Compute the bias instability
sigma_hat = np.abs(popt)
adev_reg = np.sqrt(np.dot(x0 ** 2, sigma_hat ** 2))
sigma_hat[2] = np.min(adev_reg) / np.sqrt((2 * np.log(2) / np.pi))
return (sigma_hat, popt, adev_reg)
def _line_fun(t, alpha, tau_crit, adev_crit):
"""Find Allan sigma coefficient from line and point
Log-log parameterization of the point-slope line equation.
Parameters
----------
t : {float, array_like}
Averaging time
alpha : float
Slope of Allan deviation line
tau_crit : float
Observed averaging time
adev_crit : float
Observed Allan deviation at `tau_crit`
"""
return (10 ** (alpha * (np.log10(t) - np.log10(tau_crit)) +
np.log10(adev_crit)))
def allan_coefs(taus, adevs):
"""Compute Allan deviation coefficients for each error type
Given averaging intervals ``taus`` and corresponding Allan deviation
``adevs``, compute the Allan deviation coefficient for each error type:
- Quantization
- (Angle, Velocity) Random Walk
- Bias Instability
- Rate Random Walk
- Rate Ramp
Parameters
----------
taus : array_like
Averaging times
adevs : array_like
Allan deviation
Returns
-------
sigmas_hat: dict
Dictionary with `tau` value and associated Allan deviation
coefficient for each error type.
adev_reg : numpy.ndarray
The array of Allan deviations fitted to `taus`.
"""
# Fit ARMAV model
sigmas_hat, popt, adev_reg = _armav(taus, adevs)
sigmas_d = dict(zip(_ERROR_DEFS.keys(), sigmas_hat))
return (sigmas_d, adev_reg) | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/imutools/allan.py | 0.786295 | 0.651577 | allan.py | pypi |
import numpy as np
from scipy.spatial.transform import Rotation as R
def normalize(v):
"""Normalize vector
Parameters
----------
v : array_like (N,) or (M,N)
input vector
Returns
-------
numpy.ndarray
Normalized vector having magnitude 1.
"""
return v / np.linalg.norm(v, axis=-1, keepdims=True)
def vangle(v1, v2):
"""Angle between one or more vectors
Parameters
----------
v1 : array_like (N,) or (M,N)
vector 1
v2 : array_like (N,) or (M,N)
vector 2
Returns
-------
angle : double or numpy.ndarray(M,)
angle between v1 and v2
Example
-------
>>> v1 = np.array([[1,2,3],
... [4,5,6]])
>>> v2 = np.array([[1,0,0],
... [0,1,0]])
>>> vangle(v1,v2)
array([1.30024656, 0.96453036])
Notes
-----
.. image:: .static/images/vector_angle.png
:scale: 75%
.. math::
\\alpha =arccos(\\frac{\\vec{v_1} \\cdot \\vec{v_2}}{| \\vec{v_1} |
\\cdot | \\vec{v_2}|})
"""
v1_norm = v1 / np.linalg.norm(v1, axis=-1, keepdims=True)
v2_norm = v2 / np.linalg.norm(v2, axis=-1, keepdims=True)
v1v2 = np.einsum("ij,ij->i", *np.atleast_2d(v1_norm, v2_norm))
angle = np.arccos(v1v2)
if len(angle) == 1:
angle = angle.item()
return angle
def rotate_vector(vector, q, inverse=False):
"""Apply rotations to vector or array of vectors given quaternions
Parameters
----------
vector : array_like
One (1D) or more (2D) array with vectors to rotate.
q : array_like
One (1D) or more (2D) array with quaternion vectors. The scalar
component must be last to match `scipy`'s convention.
Returns
-------
numpy.ndarray
The rotated input vector array.
Notes
-----
.. image:: .static/images/vector_rotate.png
:scale: 75%
.. math::
q \\circ \\left( {\\vec x \\cdot \\vec I} \\right) \\circ {q^{ - 1}} =
\\left( {{\\bf{R}} \\cdot \\vec x} \\right) \\cdot \\vec I
More info under
http://en.wikipedia.org/wiki/Quaternion
"""
rotator = R.from_quat(q)
return rotator.apply(vector, inverse=inverse) | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/imutools/vector.py | 0.940463 | 0.70304 | vector.py | pypi |
import numpy as np
import pandas as pd
import allantools as allan
import ahrs.filters as filters
from scipy import constants, signal, integrate
from sklearn import preprocessing
from skdiveMove.tdrsource import _load_dataset
from .allan import allan_coefs
from .vector import rotate_vector
_TIME_NAME = "timestamp"
_DEPTH_NAME = "depth"
_ACCEL_NAME = "acceleration"
_OMEGA_NAME = "angular_velocity"
_MAGNT_NAME = "magnetic_density"
class IMUBase:
"""Define IMU data source
Use :class:`xarray.Dataset` to ensure pseudo-standard metadata.
Attributes
----------
imu_file : str
String indicating the file where the data comes from.
imu : xarray.Dataset
Dataset with input data.
imu_var_names : list
Names of the data variables with accelerometer, angular velocity,
and magnetic density measurements.
has_depth : bool
Whether input data include depth measurements.
depth_name : str
Name of the data variable with depth measurements.
time_name : str
Name of the time dimension in the dataset.
quats : numpy.ndarray
Array of quaternions representing the orientation relative to the
frame of the IMU object data. Note that the scalar component is
last, following `scipy`'s convention.
Examples
--------
This example illustrates some of the issues encountered while reading
data files in a real-world scenario. ``scikit-diveMove`` includes a
NetCDF file with IMU signals collected using a Samsung Galaxy S5 mobile
phone. Set up instance from NetCDF example data:
>>> import pkg_resources as pkg_rsrc
>>> import os.path as osp
>>> import xarray as xr
>>> import skdiveMove.imutools as imutools
>>> icdf = (pkg_rsrc
... .resource_filename("skdiveMove",
... osp.join("tests", "data",
... "samsung_galaxy_s5.nc")))
The angular velocity and magnetic density arrays have two sets of
measurements: output and measured, which, along with the sensor axis
designation, constitutes a multi-index. These multi-indices can be
rebuilt prior to instantiating IMUBase, as they provide significant
advantages for indexing later:
>>> s5ds = (xr.load_dataset(icdf)
... .set_index(gyroscope=["gyroscope_type", "gyroscope_axis"],
... magnetometer=["magnetometer_type",
... "magnetometer_axis"]))
>>> imu = imutools.IMUBase(s5ds.sel(gyroscope="output",
... magnetometer="output"),
... imu_filename=icdf)
See :doc:`demo_allan` demo for an extended example of typical usage of
the methods in this class.
"""
def __init__(self, dataset,
acceleration_name=_ACCEL_NAME,
angular_velocity_name=_OMEGA_NAME,
magnetic_density_name=_MAGNT_NAME,
time_name=_TIME_NAME,
has_depth=False, depth_name=_DEPTH_NAME,
imu_filename=None):
"""Set up attributes for IMU objects
Parameters
----------
dataset : xarray.Dataset
Dataset containing IMU sensor DataArrays, and optionally other
DataArrays.
acceleration_name : str, optional
Name of the acceleration ``DataArray`` in the ``Dataset``.
angular_velocity_name : str, optional
Name of the angular velocity ``DataArray`` in the ``Dataset``.
magnetic_density_name : str, optional
Name of the magnetic density ``DataArray`` in the ``Dataset``.
time_name : str, optional
Name of the time dimension in the dataset.
has_depth : bool, optional
Whether input data include depth measurements.
depth_name : str, optional
Name of the depth ``DataArray`` in the ``Dataset``.
imu_filename : str, optional
Name of the file from which ``dataset`` originated.
"""
self.time_name = time_name
self.imu = dataset
self.imu_var_names = [acceleration_name,
angular_velocity_name,
magnetic_density_name]
if has_depth:
self.has_depth = True
self.depth_name = depth_name
else:
self.has_depth = False
self.depth_name = None
self.imu_file = imu_filename
self.quats = None
@classmethod
def read_netcdf(cls, imu_file,
acceleration_name=_ACCEL_NAME,
angular_velocity_name=_OMEGA_NAME,
magnetic_density_name=_MAGNT_NAME,
time_name=_TIME_NAME,
has_depth=False, depth_name=_DEPTH_NAME,
**kwargs):
"""Instantiate object by loading Dataset from NetCDF file
Provided all ``DataArray`` in the NetCDF file have the same
dimensions (N, 3), this is an efficient way to instantiate.
Parameters
----------
imu_file : str
As first argument for :func:`xarray.load_dataset`.
acceleration_name : str, optional
Name of the acceleration ``DataArray`` in the ``Dataset``.
angular_velocity_name : str, optional
Name of the angular velocity ``DataArray`` in the ``Dataset``.
magnetic_density_name : str, optional
Name of the magnetic density ``DataArray`` in the ``Dataset``.
dimension_names : list, optional
Names of the dimensions of the data in each of the sensors.
has_depth : bool, optional
Whether input data include depth measurements.
depth_name : str, optional
Name of the depth ``DataArray`` in the ``Dataset``.
**kwargs : optional keyword arguments
Arguments passed to :func:`xarray.load_dataset`.
Returns
-------
obj : IMUBase
Class matches the caller.
"""
dataset = _load_dataset(imu_file, **kwargs)
return cls(dataset, acceleration_name=acceleration_name,
angular_velocity_name=angular_velocity_name,
magnetic_density_name=magnetic_density_name,
time_name=time_name, has_depth=has_depth,
depth_name=depth_name, imu_filename=imu_file)
def __str__(self):
x = self.imu
objcls = ("IMU -- Class {} object\n"
.format(self.__class__.__name__))
src = "{0:<20} {1}\n".format("Source File", self.imu_file)
imu_desc = "IMU: {}".format(x.__str__())
return objcls + src + imu_desc
def _allan_deviation(self, sensor, taus):
"""Compute Allan deviation for all axes of a given sensor
Currently uses the modified Allan deviation in package
`allantools`.
Parameters
----------
sensor : str
Attribute name of the sensor of interest
taus : float, str
Tau value, in seconds, for which to compute statistic. Can be
one of "octave" or "decade" for automatic generation of the
value.
Returns
-------
pandas.DataFrame
Allan deviation and error for each sensor axis. DataFrame
index is the averaging time `tau` for each estimate.
"""
sensor_obj = getattr(self, sensor)
sampling_rate = sensor_obj.attrs["sampling_rate"]
sensor_std = preprocessing.scale(sensor_obj, with_std=False)
allan_l = []
for axis in sensor_std.T:
taus, adevs, errs, ns = allan.mdev(axis, rate=sampling_rate,
data_type="freq",
taus=taus)
# taus is common to all sensor axes
adevs_df = pd.DataFrame(np.column_stack((adevs, errs)),
columns=["allan_dev", "error"],
index=taus)
allan_l.append(adevs_df)
keys = [sensor + "_" + i for i in list("xyz")]
devs = pd.concat(allan_l, axis=1, keys=keys)
return devs
def allan_coefs(self, sensor, taus):
"""Estimate Allan deviation coefficients for each error type
This procedure implements the autonomous regression method for
Allan variance described in [1]_.
Given averaging intervals ``taus`` and corresponding Allan
deviation ``adevs``, compute the Allan deviation coefficient for
each error type:
- Quantization
- (Angle, Velocity) Random Walk
- Bias Instability
- Rate Random Walk
- Rate Ramp
Parameters
----------
sensor : str
Attribute name of the sensor of interest
taus : float, str
Tau value, in seconds, for which to compute statistic. Can be
one of "octave" or "decade" for automatic generation of the
value.
Returns
-------
coefs_all : pandas.DataFrame
Allan deviation coefficient and corresponding averaging time
for each sensor axis and error type.
adevs : pandas.DataFrame
`MultiIndex` DataFrame with Allan deviation, corresponding
averaging time, and fitted ARMAV model estimates of the
coefficients for each sensor axis and error type.
Notes
-----
Currently uses a modified Allan deviation formula.
.. [1] Jurado, J, Schubert Kabban, CM, Raquet, J (2019). A
regression-based methodology to improve estimation of
inertial sensor errors using Allan variance data. Navigation
66:251-263.
"""
adevs_errs = self._allan_deviation(sensor, taus)
taus = adevs_errs.index.to_numpy()
adevs = adevs_errs.xs("allan_dev", level=1, axis=1).to_numpy()
coefs_l = []
fitted_l = []
for adevs_i in adevs.T:
coefs_i, adevs_fitted = allan_coefs(taus, adevs_i)
# Parse output for dataframe
coefs_l.append(pd.Series(coefs_i))
fitted_l.append(adevs_fitted)
keys = [sensor + "_" + i for i in list("xyz")]
coefs_all = pd.concat(coefs_l, keys=keys, axis=1)
fitted_all = pd.DataFrame(np.column_stack(fitted_l), columns=keys,
index=taus)
fitted_all.columns = (pd.MultiIndex
.from_tuples([(c, "fitted")
for c in fitted_all]))
adevs = (pd.concat([adevs_errs, fitted_all], axis=1)
.sort_index(axis=1))
return (coefs_all, adevs)
def compute_orientation(self, method="Madgwick", **kwargs):
"""Compute the orientation of IMU tri-axial signals
The method must be one of the following estimators implemented in
Python module :mod:`ahrs.filters`:
- ``AngularRate``: Attitude from angular rate
- ``AQUA``: Algebraic quaternion algorithm
- ``Complementary``: Complementary filter
- ``Davenport``: Davenport's q-method
- ``EKF``: Extended Kalman filter
- ``FAAM``: Fast accelerometer-magnetometer combination
- ``FLAE``: Fast linear attitude estimator
- ``Fourati``: Fourati's nonlinear attitude estimation
- ``FQA``: Factored quaternion algorithm
- ``Madgwick``: Madgwick orientation filter
- ``Mahony``: Mahony orientation filter
- ``OLEQ``: Optimal linear estimator quaternion
- ``QUEST``
- ``ROLEQ``: Recursive optimal linear estimator of quaternion
- ``SAAM``: Super-fast attitude from accelerometer and magnetometer
- ``Tilt``: Attitude from gravity
The estimated quaternions are stored in the ``quats`` attribute.
Parameters
----------
method : str, optional
Name of the filtering method to use.
**kwargs : optional keyword arguments
Arguments passed to filtering method.
"""
orienter_cls = getattr(filters, method)
orienter = orienter_cls(acc=self.acceleration,
gyr=self.angular_velocity,
mag=self.magnetic_density,
Dt=self.sampling_interval,
**kwargs)
self.quats = orienter.Q
def dead_reckon(self, g=constants.g, Wn=1.0, k=1.0):
"""Calculate position assuming orientation is already known
Integrate dynamic acceleration in the body frame to calculate
velocity and position. If the IMU instance has a depth signal, it
is used in the integration instead of acceleration in the vertical
dimension.
Parameters
----------
g : float, optional
Assume gravity (:math:`m / s^2`) is equal to this value.
Default to standard gravity.
Wn : float, optional
Cutoff frequency for second-order Butterworth lowpass filter.
k : float, optional
Scalar to apply to scale lowpass-filtered dynamic acceleration.
This scaling has the effect of making position estimates
realistic for dead-reckoning tracking purposes.
Returns
-------
vel, pos : numpy.ndarray
Velocity and position 2D arrays.
"""
# Acceleration, velocity, and position from q and the measured
# acceleration, get the \frac{d^2x}{dt^2}. Retrieved sampling
# frequency assumes common frequency
fs = self.acceleration.attrs["sampling_rate"]
# Shift quaternions to scalar last to match convention
quats = np.roll(self.quats, -1, axis=1)
g_v = rotate_vector(np.array([0, 0, g]), quats, inverse=True)
acc_sensor = self.acceleration - g_v
acc_space = rotate_vector(acc_sensor, quats, inverse=False)
# Low-pass Butterworth filter design
b, a = signal.butter(2, Wn, btype="lowpass", output="ba", fs=fs)
acc_space_f = signal.filtfilt(b, a, acc_space, axis=0)
# Position and Velocity through integration, assuming 0-velocity at t=0
vel = integrate.cumulative_trapezoid(acc_space_f / k, dx=1.0 / fs,
initial=0, axis=0)
# Use depth derivative (on FLU) for the vertical dimension
if self.has_depth:
pos_z = self.depth
zdiff = np.append([0], np.diff(pos_z))
vel[:, -1] = -zdiff
pos = np.nan * np.ones_like(acc_space)
pos[:, -1] = pos_z
pos[:, :2] = (integrate
.cumulative_trapezoid(vel[:, :2], dx=1.0 / fs,
axis=0, initial=0))
else:
pos = integrate.cumulative_trapezoid(vel, dx=1.0 / fs,
axis=0, initial=0)
return vel, pos
def _get_acceleration(self):
# Acceleration name is the first
return self.imu[self.imu_var_names[0]]
acceleration = property(_get_acceleration)
"""Return acceleration array
Returns
-------
xarray.DataArray
"""
def _get_angular_velocity(self):
# Angular velocity name is the second
return self.imu[self.imu_var_names[1]]
angular_velocity = property(_get_angular_velocity)
"""Return angular velocity array
Returns
-------
xarray.DataArray
"""
def _get_magnetic_density(self):
# Magnetic density name is the last one
return self.imu[self.imu_var_names[-1]]
magnetic_density = property(_get_magnetic_density)
"""Return magnetic_density array
Returns
-------
xarray.DataArray
"""
def _get_depth(self):
return getattr(self.imu, self.depth_name)
depth = property(_get_depth)
"""Return depth array
Returns
-------
xarray.DataArray
"""
def _get_sampling_interval(self):
# Retrieve sampling rate from one DataArray
sampling_rate = self.acceleration.attrs["sampling_rate"]
sampling_rate_units = (self.acceleration
.attrs["sampling_rate_units"])
if sampling_rate_units.lower() == "hz":
itvl = 1.0 / sampling_rate
else:
itvl = sampling_rate
return itvl
sampling_interval = property(_get_sampling_interval)
"""Return sampling interval
Assuming all `DataArray`s have the same interval, the sampling interval
is retrieved from the acceleration `DataArray`.
Returns
-------
xarray.DataArray
Warnings
--------
The sampling rate is retrieved from the attribute named `sampling_rate`
in the NetCDF file, which is assumed to be in Hz units.
""" | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/imutools/imu.py | 0.91055 | 0.622746 | imu.py | pypi |
import logging
import re
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import scipy.signal as signal
import xarray as xr
from skdiveMove.tdrsource import _load_dataset
from .imu import (IMUBase,
_ACCEL_NAME, _OMEGA_NAME, _MAGNT_NAME, _DEPTH_NAME)
_TRIAXIAL_VARS = [_ACCEL_NAME, _OMEGA_NAME, _MAGNT_NAME]
_MONOAXIAL_VARS = [_DEPTH_NAME, "light_levels"]
_AXIS_NAMES = list("xyz")
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
class IMUcalibrate(IMUBase):
r"""Calibration framework for IMU measurements
Measurements from most IMU sensors are influenced by temperature, among
other artifacts. The IMUcalibrate class implements the following
procedure to remove the effects of temperature from IMU signals:
- For each axis, fit a piecewise or simple linear regression of
measured (lowpass-filtered) data against temperature.
- Compute predicted signal from the model.
- Select a reference temperature :math:`T_{\alpha}` to standardize all
measurements at.
- The standardized measurement (:math:`x_\sigma`) at :math:`T_{\alpha}`
is calculated as:
.. math::
:label: 5
x_\sigma = x - (\hat{x} - \hat{x}_{\alpha})
where :math:`\hat{x}` is the value predicted from the model at the
measured temperature, and :math:`\hat{x}_{\alpha}` is the predicted
value at :math:`T_{\alpha}`.
The models fit to signals from a *motionless* (i.e. experimental) IMU
device in the first step can subsequently be used to remove or minimize
temperature effects from an IMU device measuring motions of interest,
provided the temperature is within the range observed in experiments.
In addition to attributes in :class:`IMUBase`, ``IMUcalibrate`` adds
the attributes listed below.
Attributes
----------
periods : list
List of slices with the beginning and ending timestamps defining
periods in ``x_calib`` where valid calibration data are available.
Periods are assumed to be ordered chronologically.
models_l : list
List of dictionaries as long as there are periods, with each
element corresponding to a sensor, in turn containing another
dictionary with each element corresponding to each sensor axis.
axis_order : list
List of characters specifying which axis ``x``, ``y``, or ``z`` was
pointing in the same direction as gravity in each period in
``periods``.
Examples
--------
Construct IMUcalibrate from NetCDF file with samples of IMU signals and
a list with begining and ending timestamps for experimental periods:
>>> import pkg_resources as pkg_rsrc
>>> import os.path as osp
>>> import skdiveMove.imutools as imutools
>>> icdf = (pkg_rsrc
... .resource_filename("skdiveMove",
... osp.join("tests", "data",
... "cats_temperature_calib.nc")))
>>> pers = [slice("2021-09-20T09:00:00", "2021-09-21T10:33:00"),
... slice("2021-09-21T10:40:00", "2021-09-22T11:55:00"),
... slice("2021-09-22T12:14:00", "2021-09-23T11:19:00")]
>>> imucal = (imutools.IMUcalibrate
... .read_netcdf(icdf, periods=pers,
... axis_order=list("zxy"),
... time_name="timestamp_utc"))
>>> print(imucal) # doctest: +ELLIPSIS
IMU -- Class IMUcalibrate object
Source File None
IMU: <xarray.Dataset>
Dimensions: (timestamp_utc: 268081, axis: 3)
Coordinates:
* axis (axis) object 'x' 'y' 'z'
* timestamp_utc (timestamp_utc) datetime64[ns] ...
Data variables:
acceleration (timestamp_utc, axis) float64 ...
angular_velocity (timestamp_utc, axis) float64 ...
magnetic_density (timestamp_utc, axis) float64 ...
depth (timestamp_utc) float64 ...
temperature (timestamp_utc) float64 ...
Attributes:...
history: Resampled from 20 Hz to 1 Hz
Periods:
0:['2021-09-20T09:00:00', '2021-09-21T10:33:00']
1:['2021-09-21T10:40:00', '2021-09-22T11:55:00']
2:['2021-09-22T12:14:00', '2021-09-23T11:19:00']
Plot signals from a given period:
>>> fig, axs, axs_temp = imucal.plot_experiment(0, var="acceleration")
Build temperature models for a given variable and chosen
:math:`T_{\alpha}`, without low-pass filtering the input signals:
>>> fs = 1.0
>>> acc_cal = imucal.build_tmodels("acceleration", T_alpha=8,
... use_axis_order=True,
... win_len=int(2 * 60 * fs) - 1)
Plot model of IMU variable against temperature:
>>> fig, axs = imucal.plot_var_model("acceleration",
... use_axis_order=True)
Notes
-----
This class redefines :meth:`IMUBase.read_netcdf`.
"""
def __init__(self, x_calib, periods, axis_order=list("xyz"),
**kwargs):
"""Set up attributes required for calibration
Parameters
----------
x_calib : xarray.Dataset
Dataset with temperature and tri-axial data from *motionless*
IMU experiments. Data are assumed to be in FLU coordinate
frame.
periods : list
List of slices with the beginning and ending timestamps
defining periods in `x_calib` where valid calibration data are
available. Periods are assumed to be ordered chronologically.
axis_order : list
List of characters specifying which axis ``x``, ``y``, or ``z``
was pointing in the same direction as gravity in each period in
``periods``.
**kwargs : optional keyword arguments
Arguments passed to the `IMUBase.__init__` for instantiation.
"""
super(IMUcalibrate, self).__init__(x_calib, **kwargs)
self.periods = periods
models_l = []
for period in periods:
models_1d = {i: dict() for i in _MONOAXIAL_VARS}
models_2d = dict.fromkeys(_TRIAXIAL_VARS)
for k in models_2d:
models_2d[k] = dict.fromkeys(axis_order)
models_l.append(dict(**models_1d, **models_2d))
self.models_l = models_l
self.axis_order = axis_order
# Private attribute collecting DataArrays with standardized data
self._stdda_l = []
@classmethod
def read_netcdf(cls, imu_nc, load_dataset_kwargs=dict(), **kwargs):
"""Create IMUcalibrate from NetCDF file and list of slices
This method redefines :meth:`IMUBase.read_netcdf`.
Parameters
----------
imu_nc : str
Path to NetCDF file.
load_dataset_kwargs : dict, optional
Dictionary of optional keyword arguments passed to
:func:`xarray.load_dataset`.
**kwargs : optional keyword arguments
Additional arguments passed to :meth:`IMUcalibrate.__init__`
method, except ``has_depth`` or ``imu_filename``. The input
``Dataset`` is assumed to have a depth ``DataArray``.
Returns
-------
out :
"""
imu = _load_dataset(imu_nc, **load_dataset_kwargs)
ocls = cls(imu, **kwargs)
return ocls
def __str__(self):
super_str = super(IMUcalibrate, self).__str__()
pers_ends = []
for per in self.periods:
pers_ends.append([per.start, per.stop])
msg = ("\n".join("{}:{}".format(i, per)
for i, per in enumerate(pers_ends)))
return super_str + "\nPeriods:\n{}".format(msg)
def savgol_filter(self, var, period_idx, win_len, polyorder=1):
"""Apply Savitzky-Golay filter on tri-axial IMU signals
Parameters
----------
var : str
Name of the variable in ``x`` with tri-axial signals.
period_idx : int
Index of period to plot (zero-based).
win_len : int
Window length for the low pass filter.
polyorder : int, optional
Polynomial order to use.
Returns
-------
xarray.DataArray
Array with filtered signals, with the same coordinates,
dimensions, and updated attributes.
"""
darray = self.subset_imu(period_idx)[var]
var_df = darray.to_dataframe().unstack()
var_sg = signal.savgol_filter(var_df, window_length=win_len,
polyorder=polyorder, axis=0)
new_history = (("{}: Savitzky-Golay filter: win_len={}, "
"polyorder={}\n")
.format(pd.to_datetime("today")
.strftime("%Y-%m-%d"), win_len, polyorder))
darray_new = xr.DataArray(var_sg, coords=darray.coords,
dims=darray.dims, name=darray.name,
attrs=darray.attrs)
darray_new.attrs["history"] = (darray_new.attrs["history"] +
new_history)
return darray_new
def build_tmodels(self, var, T_alpha=None, T_brk=None,
use_axis_order=False, filter_sig=True, **kwargs):
r"""Build temperature models for experimental tri-axial IMU sensor signals
Perform thermal compensation on *motionless* tri-axial IMU sensor
data. A simple approach is used for the compensation:
- For each axis, build a piecewise or simple linear regression of
measured data against temperature. If a breakpoint is known,
as per manufacturer specifications or experimentation, use
piecewise regression.
- Compute predicted signal from the model.
- Select a reference temperature :math:`T_{\alpha}` to
standardize all measurements at.
- The standardized measurement at :math:`T_{\alpha}` is
calculated as :math:`x - (\hat{x} - x_{T_{\alpha}})`, where
:math:`\hat{x}` is the value predicted from the model at the
measured temperature, and :math:`x_{T_{\alpha}}` is the
predicted value at :math:`T_{\alpha}`.
Parameters
----------
var : str
Name of the variable in `x` with tri-axial data.
T_alpha : float, optional
Reference temperature at which all measurements will be
adjusted to. Defaults to the mean temperature for each period,
rounded to the nearest integer.
T_brk : float, optional
Temperature change point separating data to be fit differently.
A piecewise regression model is fit. Default is a simple
linear model is fit.
use_axis_order : bool, optional
Whether to use axis order from the instance. If True, only one
sensor axis per period is considered to have valid calibration
data for the correction. Otherwise, all three axes for each
period are used in the correction.
filter_sig : bool, optional
Whether to filter in the measured signal for thermal
correction. Default is to apply a Savitzky-Golay filter to the
signal for characterizing the temperature relationship, and to
calculate the standardized signal.
**kwargs : optional keyword arguments
Arguments passed to `savgol_filter` (e.g. ``win_len`` and
``polyorder``).
Returns
-------
list
List of tuples as long as there are periods, with tuple elements:
- Dictionary with regression model objects for each sensor
axis.
- DataFrame with hierarchical column index with sensor axis
label at the first level. The following columns are in the
second level:
- temperature
- var_name
- var_name_pred
- var_name_temp_refC
- var_name_adj
Notes
-----
A new DataArray with signal standardized at :math:`T_{\alpha}` is
added to the instance Dataset. These signals correspond to the
lowpass-filtered form of the input used to build the models.
See Also
--------
apply_model
"""
# Iterate through periods
per_l = [] # output list as long as periods
for idx in range(len(self.periods)):
per = self.subset_imu(idx)
# Subset the requested variable, smoothing if necessary
if filter_sig:
per_var = self.savgol_filter(var, idx, **kwargs)
else:
per_var = per[var]
per_temp = per["temperature"]
var_df = xr.merge([per_var, per_temp]).to_dataframe()
if T_alpha is None:
t_alpha = np.rint(per_temp.mean().to_numpy().item())
logger.info("Period {} T_alpha set to {:.2f}"
.format(idx, t_alpha))
else:
t_alpha = T_alpha
odata_l = []
models_d = self.models_l[idx]
if use_axis_order:
axis_names = [self.axis_order[idx]]
elif len(per_var.dims) > 1:
axis_names = per_var[per_var.dims[-1]].to_numpy()
else:
axis_names = [per_var.dims[0]]
std_colname = "{}_std".format(var)
pred_colname = "{}_pred".format(var)
for i, axis in enumerate(axis_names): # do all axes
if isinstance(var_df.index, pd.MultiIndex):
data_axis = var_df.xs(axis, level="axis").copy()
else:
data_axis = var_df.copy()
if T_brk is not None:
temp0 = (data_axis["temperature"]
.where(data_axis["temperature"] < T_brk, 0))
data_axis.loc[:, "temp0"] = temp0
temp1 = (data_axis["temperature"]
.where(data_axis["temperature"] > T_brk, 0))
data_axis.loc[:, "temp1"] = temp1
fmla = "{} ~ temperature + temp0 + temp1".format(var)
else:
fmla = "{} ~ temperature".format(var)
model_fit = smf.ols(formula=fmla, data=data_axis).fit()
models_d[var][axis] = model_fit
data_axis.loc[:, pred_colname] = model_fit.fittedvalues
# Data at reference temperature
ref_colname = "{}_{}C".format(var, t_alpha)
if T_brk is not None:
if t_alpha < T_brk:
pred = model_fit.predict(exog=dict(
temperature=t_alpha,
temp0=t_alpha, temp1=0)).to_numpy().item()
data_axis[ref_colname] = pred
else:
pred = model_fit.predict(exog=dict(
temperature=t_alpha,
temp0=0, temp1=t_alpha)).to_numpy().item()
data_axis[ref_colname] = pred
data_axis.drop(["temp0", "temp1"], axis=1, inplace=True)
else:
pred = model_fit.predict(exog=dict(
temperature=t_alpha)).to_numpy().item()
data_axis.loc[:, ref_colname] = pred
logger.info("Predicted {} ({}, rounded) at {:.2f}: {:.3f}"
.format(var, axis, t_alpha, pred))
data_axis[std_colname] = (data_axis[var] -
(data_axis[pred_colname] -
data_axis[ref_colname]))
odata_l.append(data_axis)
# Update instance models_l attribute
self.models_l[idx][var][axis] = model_fit
if var in _MONOAXIAL_VARS:
odata = pd.concat(odata_l)
std_data = xr.DataArray(odata.loc[:, std_colname],
name=std_colname)
else:
odata = pd.concat(odata_l, axis=1,
keys=axis_names[:i + 1],
names=["axis", "variable"])
std_data = xr.DataArray(odata.xs(std_colname,
axis=1, level=1),
name=std_colname)
per_l.append((models_d, odata))
std_data.attrs = per_var.attrs
new_description = ("{} standardized at {}C"
.format(std_data.attrs["description"],
t_alpha))
std_data.attrs["description"] = new_description
new_history = ("{}: temperature_model: temperature models\n"
.format(pd.to_datetime("today")
.strftime("%Y-%m-%d")))
std_data.attrs["history"] = (std_data.attrs["history"] +
new_history)
# Update instance _std_da_l attribute with DataArray having an
# additional dimension for the period index
std_data = std_data.expand_dims(period=[idx])
self._stdda_l.append(std_data)
return per_l
def plot_experiment(self, period_idx, var, units_label=None, **kwargs):
"""Plot experimental IMU
Parameters
----------
period_idx : int
Index of period to plot (zero-based).
var : str
Name of the variable in with tri-axial data.
units_label : str, optional
Label for the units of the chosen variable. Defaults to the
"units_label" attribute available in the DataArray.
**kwargs : optional keyword arguments
Arguments passed to :func:`~matplotlib.pyplot.subplots`
(e.g. ``figsize``).
Returns
-------
fig : matplotlib.Figure
axs : array_like
Array of :class:`~matplotlib.axes.Axes` instances in ``fig``
with IMU signal plots.
axs_temp : array_like
Array of :class:`~matplotlib.axes.Axes` instances in ``fig``
with temperature plots.
See Also
--------
plot_var_model
plot_standardized
"""
per_da = self.subset_imu(period_idx)
per_var = per_da[var]
per_temp = per_da["temperature"]
def _plot(var, temp, ax):
"""Plot variable and temperature"""
ax_temp = ax.twinx()
var.plot.line(ax=ax, label="measured", color="k",
linewidth=0.5)
temp.plot.line(ax=ax_temp, label="temperature", color="r",
linewidth=0.5, alpha=0.5)
ax.set_title("")
ax.set_xlabel("")
# Adjust ylim to exclude outliers
ax.set_ylim(var.quantile(1e-5).to_numpy().item(),
var.quantile(1 - 1e-5).to_numpy().item())
# Axis locators and formatters
dlocator = mdates.AutoDateLocator(minticks=3, maxticks=7)
dformatter = mdates.ConciseDateFormatter(dlocator)
ax.xaxis.set_major_locator(dlocator)
ax.xaxis.set_major_formatter(dformatter)
ax.xaxis.set_tick_params(rotation=0)
return ax_temp
if units_label is None:
units_label = per_var.attrs["units_label"]
ylabel_pre = "{} [{}]".format(per_var.attrs["full_name"],
units_label)
temp_label = "{} [{}]".format(per_temp.attrs["full_name"],
per_temp.attrs["units_label"])
ndims = len(per_var.dims)
axs_temp = []
if ndims == 1:
fig, axs = plt.subplots(**kwargs)
ax_temp = _plot(per_var, per_temp, axs)
axs.set_xlabel("")
axs.set_title("")
axs.set_ylabel(ylabel_pre)
ax_temp.set_ylabel(temp_label)
axs_temp.append(ax_temp)
else:
fig, axs = plt.subplots(3, 1, sharex=True, **kwargs)
ax_x, ax_y, ax_z = axs
axis_names = per_var[per_var.dims[-1]].to_numpy()
for i, axis in enumerate(axis_names):
ymeasured = per_var.sel(axis=axis)
ax_temp = _plot(ymeasured, per_temp, axs[i])
axs[i].set_title("")
axs[i].set_xlabel("")
axs[i].set_ylabel("{} {}".format(ylabel_pre, axis))
if i == 1:
ax_temp.set_ylabel(temp_label)
else:
ax_temp.set_ylabel("")
axs_temp.append(ax_temp)
ax_z.set_xlabel("")
return fig, axs, axs_temp
def plot_var_model(self, var, use_axis_order=True, units_label=None,
axs=None, **kwargs):
"""Plot IMU variable against temperature and fitted model
A multi-panel plot of the selected variable against temperature
from all periods.
Parameters
----------
var : str
IMU variable to plot.
use_axis_order : bool
Whether to use axis order from the instance. If True, only one
sensor axis per period is considered to have valid calibration
data for the correction. Otherwise, all three axes for each
period are used in the correction. Ignored for uniaxial
variables.
units_label : str
Label for the units of the chosen variable. Defaults to the
"units_label" attribute available in the DataArray.
axs : array_like, optional
Array of Axes instances to plot in.
**kwargs : optional keyword arguments
Arguments passed to :func:`~matplotlib.pyplot.subplots`
(e.g. ``figsize``).
Returns
-------
fig : matplotlib.Figure
axs : array_like
Array of :class:`~matplotlib.axes.Axes` instances in ``fig``
with IMU signal plots.
See Also
--------
plot_experiment
plot_standardized
"""
def _plot_signal(x, y, idx, model_fit, ax):
ax.plot(x, y, ".", markersize=2, alpha=0.03,
label="Period {}".format(idx))
# Adjust ylim to exclude outliers
ax.set_ylim(np.quantile(y, 1e-3), np.quantile(y, 1 - 1e-3))
# Linear model
xpred = np.linspace(x.min(), x.max())
ypreds = (model_fit
.get_prediction(exog=dict(Intercept=1,
temperature=xpred))
.summary_frame())
ypred_0 = ypreds["mean"]
ypred_l = ypreds["obs_ci_lower"]
ypred_u = ypreds["obs_ci_upper"]
ax.plot(xpred, ypred_0, color="k", alpha=0.5)
ax.plot(xpred, ypred_l, color="k", linestyle="dashed",
linewidth=1, alpha=0.5)
ax.plot(xpred, ypred_u, color="k", linestyle="dashed",
linewidth=1, alpha=0.5)
per0 = self.subset_imu(0)
if units_label is None:
units_label = per0[var].attrs["units_label"]
xlabel = "{} [{}]".format(per0["temperature"].attrs["full_name"],
per0["temperature"].attrs["units_label"])
ylabel_pre = "{} [{}]".format(per0[var].attrs["full_name"],
units_label)
nperiods = len(self.periods)
if axs is not None:
fig = plt.gcf()
if var in _MONOAXIAL_VARS:
if axs is None:
fig, axs = plt.subplots(1, nperiods, **kwargs)
for per_i in range(nperiods):
peri = self.subset_imu(per_i)
per_var = peri[var]
per_temp = peri["temperature"]
xdata = per_temp.to_numpy()
ydata = per_var.to_numpy()
# Linear model
model_fit = self.get_model(var, period=per_i,
axis=per_var.dims[0])
ax_i = axs[per_i]
_plot_signal(x=xdata, y=ydata, idx=per_i,
model_fit=model_fit, ax=ax_i)
ax_i.set_xlabel(xlabel)
axs[0].set_ylabel(ylabel_pre)
elif use_axis_order:
if axs is None:
fig, axs = plt.subplots(3, 1, **kwargs)
axs[-1].set_xlabel(xlabel)
for i, axis in enumerate(_AXIS_NAMES):
idx = self.axis_order.index(axis)
peri = self.subset_imu(idx)
xdata = peri["temperature"].to_numpy()
ydata = peri[var].sel(axis=axis).to_numpy()
# Linear model
model_fit = self.get_model(var, period=idx, axis=axis)
ax_i = axs[i]
_plot_signal(xdata, y=ydata, idx=idx,
model_fit=model_fit, ax=ax_i)
ax_i.set_ylabel("{} {}".format(ylabel_pre, axis))
ax_i.legend(loc=9, bbox_to_anchor=(0.5, 1),
frameon=False, borderaxespad=0)
else:
if axs is None:
fig, axs = plt.subplots(3, nperiods, **kwargs)
for vert_i in range(nperiods):
peri = self.subset_imu(vert_i)
xdata = peri["temperature"].to_numpy()
axs_xyz = axs[:, vert_i]
for i, axis in enumerate(_AXIS_NAMES):
ydata = (peri[var].sel(axis=axis).to_numpy())
# Linear model
model_fit = self.get_model(var, period=vert_i,
axis=axis)
ax_i = axs_xyz[i]
_plot_signal(xdata, y=ydata, idx=vert_i,
model_fit=model_fit, ax=ax_i)
ax_i.set_ylabel("{} {}".format(ylabel_pre, axis))
axs_xyz[0].set_title("Period {}".format(vert_i))
axs_xyz[-1].set_xlabel(xlabel)
return fig, axs
def plot_standardized(self, var, use_axis_order=True, units_label=None,
ref_val=None, axs=None, **kwargs):
r"""Plot IMU measured and standardized variable along with temperature
A multi-panel time series plot of the selected variable, measured
and standardized, for all periods.
Parameters
----------
var : str
IMU variable to plot.
use_axis_order : bool, optional
Whether to use axis order from the instance. If True, only one
sensor axis per period is considered to have valid calibration
data for the correction. Otherwise, all three axes for each
period are used in the correction.
units_label : str, optional
Label for the units of the chosen variable. Defaults to the
"units_label" attribute available in the DataArray.
ref_val : float
Reference value for the chosen variable (e.g. gravity, for
acceleration). If provided, a horizontal line is included in
the plot for reference.
axs : array_like, optional
Array of Axes instances to plot in.
**kwargs : optional keyword arguments
Arguments passed to :func:`~matplotlib.pyplot.subplots`
(e.g. ``figsize``).
Returns
-------
fig : matplotlib.Figure
axs : array_like
Array of :class:`~matplotlib.axes.Axes` instances in ``fig``
with IMU signal plots.
axs_temp : array_like
Array of :class:`~matplotlib.axes.Axes` instances in ``fig``
with temperature plots.
See Also
--------
plot_experiment
plot_var_model
"""
def _plot_signal(ymeasured, ystd, temp, ax, neg_ref=False):
ax_temp = ax.twinx()
(ymeasured.plot.line(ax=ax, label="measured", color="k",
linewidth=0.5))
(ystd.plot.line(ax=ax, label="standardized", color="b",
linewidth=0.5, alpha=0.5))
temp.plot.line(ax=ax_temp, label="temperature", color="r",
linewidth=0.5, alpha=0.5)
txt_desc = ystd.attrs["description"]
t_alpha_match = re.search(r'[-+]?\d+\.\d+', txt_desc)
ax_temp.axhline(float(txt_desc[t_alpha_match.start():
t_alpha_match.end()]),
linestyle="dashed", linewidth=1,
color="r", label=r"$T_\alpha$")
q0 = ymeasured.quantile(1e-5).to_numpy().item()
q1 = ymeasured.quantile(1 - 11e-5).to_numpy().item()
if ref_val is not None:
# Assumption of FLU with axes pointing against field
if neg_ref:
ref_i = -ref_val
else:
ref_i = ref_val
ax.axhline(ref_i, linestyle="dashdot", color="m",
linewidth=1, label="reference")
ylim0 = np.minimum(q0, ref_i)
ylim1 = np.maximum(q1, ref_i)
else:
ylim0 = q0
ylim1 = q1
ax.set_title("")
ax.set_xlabel("")
# Adjust ylim to exclude outliers
ax.set_ylim(ylim0, ylim1)
# Axis locators and formatters
dlocator = mdates.AutoDateLocator(minticks=3, maxticks=7)
dformatter = mdates.ConciseDateFormatter(dlocator)
ax.xaxis.set_major_locator(dlocator)
ax.xaxis.set_major_formatter(dformatter)
ax.xaxis.set_tick_params(rotation=0)
return ax_temp
per0 = self.subset_imu(0)
if units_label is None:
units_label = per0[var].attrs["units_label"]
ylabel_pre = "{} [{}]".format(per0[var].attrs["full_name"],
units_label)
var_std = var + "_std"
nperiods = len(self.periods)
if axs is not None:
fig = plt.gcf()
std_ds = xr.merge(self._stdda_l)
if var in _MONOAXIAL_VARS:
if axs is None:
fig, axs = plt.subplots(1, nperiods, **kwargs)
axs_temp = np.empty_like(axs)
for per_i in range(nperiods):
peri = self.subset_imu(per_i)
per_var = peri[var]
per_std = std_ds.loc[dict(period=per_i)][var_std]
per_temp = peri["temperature"]
ax_i = axs[per_i]
ax_temp = _plot_signal(per_var, ystd=per_std,
temp=per_temp, ax=ax_i)
ax_i.set_ylabel(ylabel_pre)
axs_temp[per_i] = ax_temp
# legend at center top panel
axs[1].legend(loc=9, bbox_to_anchor=(0.5, 1.15), ncol=3,
frameon=False, borderaxespad=0)
# Temperature legend at the bottom
axs_temp[1].legend(loc=9, bbox_to_anchor=(0.5, -0.23), ncol=2,
frameon=False, borderaxespad=0)
elif use_axis_order:
if axs is None:
fig, axs = plt.subplots(3, 1, sharex=False, **kwargs)
axs_temp = np.empty_like(axs)
for i, axis in enumerate(_AXIS_NAMES):
idx = self.axis_order.index(axis)
peri = self.subset_imu(idx)
per_var = peri[var].sel(axis=axis, drop=True)
per_std = (std_ds.loc[dict(period=idx)][var_std]
.sel(axis=axis, drop=True))
per_temp = peri["temperature"]
ax_i = axs[i]
if axis == "x":
neg_ref = True
else:
neg_ref = False
ax_temp = _plot_signal(per_var, ystd=per_std,
temp=per_temp, ax=ax_i,
neg_ref=neg_ref)
ax_i.set_xlabel("Period {}".format(idx))
ax_i.set_ylabel("{} {}".format(ylabel_pre, axis))
axs_temp[i] = ax_temp
# legend at top panel
axs[0].legend(loc=9, bbox_to_anchor=(0.5, 1.15), ncol=3,
frameon=False, borderaxespad=0)
# Temperature legend at the bottom
axs_temp[i].legend(loc=9, bbox_to_anchor=(0.5, -0.23), ncol=2,
frameon=False, borderaxespad=0)
else:
if axs is None:
fig, axs = plt.subplots(3, nperiods, **kwargs)
axs_temp = np.empty_like(axs)
for vert_i in range(nperiods):
axs_xyz = axs[:, vert_i]
for i, axis in enumerate(_AXIS_NAMES):
peri = self.subset_imu(vert_i)
per_var = peri[var].sel(axis=axis, drop=True)
per_std = (std_ds.loc[dict(period=vert_i)][var_std]
.sel(axis=axis, drop=True))
per_temp = peri["temperature"]
ax_i = axs_xyz[i]
ax_temp = _plot_signal(per_var, ystd=per_std,
temp=per_temp, ax=ax_i)
axs_temp[i, vert_i] = ax_temp
if vert_i == 0:
ax_i.set_ylabel("{} {}".format(ylabel_pre, axis))
else:
ax_i.set_ylabel("")
axs_xyz[0].set_title("Period {}".format(vert_i))
# legend at bottom panel
leg0 = axs[-1, 1].legend(loc=9, bbox_to_anchor=(0.5, -0.23),
ncol=3, frameon=False, borderaxespad=0)
# Temperature legend at bottom panel
leg1 = axs_temp[-1, 1].legend(loc=9, bbox_to_anchor=(0.5, -0.37),
ncol=2, frameon=False,
borderaxespad=0)
axs[-1, 1].add_artist(leg0)
axs_temp[-1, 1].add_artist(leg1)
return fig, axs, axs_temp
def get_model(self, var, period, axis=None):
"""Retrieve linear model for a given IMU sensor axis signal
Parameters
----------
var : str
Name of the variable to calculate offset for.
period: int
Period containing calibration model to use.
axis : str, optional
Name of the sensor axis the signal comes from, if `var` is
tri-axial; ignored otherwise.
Returns
-------
RegressionResultsWrapper
"""
if var in _MONOAXIAL_VARS:
model_d = self.models_l[period][var]
model_fit = [*model_d.values()][0]
else:
model_fit = self.models_l[period][var][axis]
return model_fit
def get_offset(self, var, period, T_alpha, ref_val, axis=None):
"""Calculate signal ofset at given temperature from calibration model
Parameters
----------
var : str
Name of the variable to calculate offset for.
period: int
Period (zero-based) containing calibration model to use.
T_alpha : float
Temperature at which to compute offset.
ref_val : float
Reference value for the chosen variable (e.g. gravity, for
acceleration).
axis : str, optional
Name of the sensor axis the signal comes from, if ``var`` is
tri-axial; ignored otherwise.
Returns
-------
float
Notes
-----
For obtaining offset and gain of magnetometer signals, the
ellipsoid method from the the ``ellipsoid`` module yields far more
accurate results, as it allows for the simultaneous
three-dimensional estimation of the offset.
"""
if var in _MONOAXIAL_VARS:
model_fit = self.get_model(var, period=period)
else:
model_fit = self.get_model(var, period=period, axis=axis)
ypred = (model_fit.predict(exog=dict(temperature=T_alpha))
.to_numpy().item())
logger.info("Predicted {} ({}, rounded) at T_alpha: {:.3f}"
.format(var, axis, ypred))
offset = ypred - ref_val
return offset
def apply_model(self, var, dataset, T_alpha=None, ref_vals=None,
use_axis_order=True, model_idx=None):
"""Apply fitted temperature compensation model to Dataset
The selected models for tri-axial sensor data are applied to input
Dataset, standardizing signals at :math:`T_{\alpha}`, optionally
subtracting the offset at :math:`T_{\alpha}`.
Parameters
----------
var : str
Name of the variable with tri-axial data.
dataset : xarray.Dataset
Dataset with temperature and tri-axial data from motionless IMU.
T_alpha : float, optional
Reference temperature at which all measurements will be
adjusted to. Default is the mean temperature in the input
dataset.
ref_vals : list, optional
Sequence of three floats with target values to compare against
the signal from each sensor axis. If provided, the offset of
each signal at :math:`T_{\alpha}` is computed and subtracted from
the temperature-standardized signal. The order should be the
same as in the `axis_order` attribute if `use_axis_order` is
True, or ``x``, ``y``, ``z`` otherwise.
use_axis_order : bool, optional
Whether to use axis order from the instance. If True, retrieve
model to apply using instance's ``axis_order`` attribute.
Otherwise, use the models defined by ``model_idx`` argument.
Ignored if `var` is monoaxial.
model_idx : list or int, optional
Sequence of three integers identifying the period (zero-based)
from which to retrieve the models for ``x``, ``y``, and ``z``
sensor axes, in that order. If ``var`` is monoaxial, an integer
specifying the period for the model to use. Ignored if
``use_axis_order`` is True.
Returns
-------
xarray.Dataset
"""
temp_obs = dataset["temperature"]
darray = dataset[var]
if T_alpha is None:
T_alpha = temp_obs.mean().item()
logger.info("T_alpha set to {:.2f}".format(T_alpha))
def _standardize_array(darray, model_fit, period_idx, axis=None):
x_hat = (model_fit
.get_prediction(exog=dict(Intercept=1,
temperature=temp_obs))
.predicted_mean)
x_alpha = (model_fit
.get_prediction(exog=dict(Intercept=1,
temperature=T_alpha))
.predicted_mean)
x_sigma = darray - (x_hat - x_alpha)
if ref_vals is not None:
off = self.get_offset(var, axis=axis, period=period_idx,
T_alpha=T_alpha,
ref_val=ref_vals[period_idx])
x_sigma -= off
return x_sigma
darray_l = []
if var in _MONOAXIAL_VARS:
model_fit = self.get_model(var, period=model_idx)
x_sigma = _standardize_array(darray, model_fit=model_fit,
period_idx=model_idx)
darray_l.append(x_sigma)
elif use_axis_order:
for i, axis in enumerate(_AXIS_NAMES):
idx = self.axis_order.index(axis)
model_fit = self.get_model(var, period=idx, axis=axis)
x_i = darray.sel(axis=axis)
x_sigma = _standardize_array(x_i, model_fit=model_fit,
period_idx=idx, axis=axis)
darray_l.append(x_sigma)
else:
for i, axis in enumerate(_AXIS_NAMES):
model_fit = self.get_model(var, period=model_idx[i],
axis=axis)
x_i = darray.sel(axis=axis)
x_sigma = _standardize_array(x_i, model_fit=model_fit,
period_idx=model_idx[i],
axis=axis)
darray_l.append(x_sigma)
if len(darray_l) > 1:
darray_new = xr.concat(darray_l, dim="axis").transpose()
else:
darray_new = darray_l[0]
darray_new.attrs = darray.attrs
new_history = ("{}: Applied temperature model at: T={}\n"
.format(pd.to_datetime("today")
.strftime("%Y-%m-%d"), T_alpha))
darray_new.attrs["history"] = (darray_new.attrs["history"] +
new_history)
return darray_new
def subset_imu(self, period_idx):
"""Subset IMU dataset given a period index
The dataset is subset using the slice corresponding to the period
index.
Parameters
----------
period_idx : int
Index of the experiment period to subset.
Returns
-------
xarray.Dataset
"""
time_name = self.time_name
return self.imu.loc[{time_name: self.periods[period_idx]}] | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/imutools/imucalibrate.py | 0.785884 | 0.480662 | imucalibrate.py | pypi |
import numpy as np
# Types of ellipsoid accepted fits
_ELLIPSOID_FTYPES = ["rxyz", "xyz", "xy", "xz", "yz", "sxyz"]
def fit_ellipsoid(vectors, f="rxyz"):
"""Fit a (non) rotated ellipsoid or sphere to 3D vector data
Parameters
----------
vectors: (N,3) array
Array of measured x, y, z vector components.
f: str
String indicating the model to fit (one of 'rxyz', 'xyz', 'xy',
'xz', 'yz', or 'sxyz'):
rxyz : rotated ellipsoid (any axes)
xyz : non-rotated ellipsoid
xy : radius x=y
xz : radius x=z
yz : radius y=z
sxyz : radius x=y=z sphere
Returns
-------
otuple: tuple
Tuple with offset, gain, and rotation matrix, in that order.
"""
if f not in _ELLIPSOID_FTYPES:
raise ValueError("f must be one of: {}"
.format(_ELLIPSOID_FTYPES))
x = vectors[:, 0, np.newaxis]
y = vectors[:, 1, np.newaxis]
z = vectors[:, 2, np.newaxis]
if f == "rxyz":
D = np.hstack((x ** 2, y ** 2, z ** 2,
2 * x * y, 2 * x * z, 2 * y * z,
2 * x, 2 * y, 2 * z))
elif f == "xyz":
D = np.hstack((x ** 2, y ** 2, z ** 2,
2 * x, 2 * y, 2 * z))
elif f == "xy":
D = np.hstack((x ** 2 + y ** 2, z ** 2,
2 * x, 2 * y, 2 * z))
elif f == "xz":
D = np.hstack((x ** 2 + z ** 2, y ** 2,
2 * x, 2 * y, 2 * z))
elif f == "yz":
D = np.hstack((y ** 2 + z ** 2, x ** 2,
2 * x, 2 * y, 2 * z))
else: # sxyz
D = np.hstack((x ** 2 + y ** 2 + z ** 2,
2 * x, 2 * y, 2 * z))
v = np.linalg.lstsq(D, np.ones(D.shape[0]), rcond=None)[0]
if f == "rxyz":
A = np.array([[v[0], v[3], v[4], v[6]],
[v[3], v[1], v[5], v[7]],
[v[4], v[5], v[2], v[8]],
[v[6], v[7], v[8], -1]])
ofs = np.linalg.lstsq(-A[:3, :3], v[[6, 7, 8]], rcond=None)[0]
Tmtx = np.eye(4)
Tmtx[3, :3] = ofs
AT = Tmtx @ A @ Tmtx.T # ellipsoid translated to 0, 0, 0
ev, rotM = np.linalg.eig(AT[:3, :3] / -AT[3, 3])
rotM = np.fliplr(rotM)
ev = np.flip(ev)
gain = np.sqrt(1.0 / ev)
else:
if f == "xyz":
v = np.array([v[0], v[1], v[2], 0, 0, 0, v[3], v[4], v[5]])
elif f == "xy":
v = np.array([v[0], v[0], v[1], 0, 0, 0, v[2], v[3], v[4]])
elif f == "xz":
v = np.array([v[0], v[1], v[0], 0, 0, 0, v[2], v[3], v[4]])
elif f == "yz":
v = np.array([v[1], v[0], v[0], 0, 0, 0, v[2], v[3], v[4]])
else:
v = np.array([v[0], v[0], v[0], 0, 0, 0, v[1], v[2], v[3]])
ofs = -(v[6:] / v[:3])
rotM = np.eye(3)
g = 1 + (v[6] ** 2 / v[0] + v[7] ** 2 / v[1] + v[8] ** 2 / v[2])
gain = (np.sqrt(g / v[:3]))
return (ofs, gain, rotM)
def _refine_ellipsoid_fit(gain, rotM):
"""Refine ellipsoid fit"""
# m = 0
# rm = 0
# cm = 0
pass
def apply_ellipsoid(vectors, offset, gain, rotM, ref_r):
"""Apply ellipsoid fit to vector array"""
vectors_new = vectors.copy() - offset
vectors_new = vectors_new @ rotM
# Scale to sphere
vectors_new = vectors_new / gain * ref_r
return vectors_new | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/imutools/ellipsoid.py | 0.849441 | 0.810066 | ellipsoid.py | pypi |
import logging
from abc import ABCMeta, abstractmethod
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from skdiveMove.helpers import rle_key
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
def nlsLL(x, coefs):
r"""Generalized log-likelihood for Random Poisson mixtures
This is a generalized form taking any number of Poisson processes.
Parameters
----------
x : array_like
Independent data array described by the function
coefs : array_like
2-D array with coefficients ('a', :math:'\lambda') in rows for each
process of the model in columns.
Returns
-------
out : array_like
Same shape as `x` with the evaluated log-likelihood.
"""
def calc_term(params):
return params[0] * params[1] * np.exp(-params[1] * x)
terms = np.apply_along_axis(calc_term, 0, coefs)
terms_sum = terms.sum(1)
if np.any(terms_sum <= 0):
logger.warning("Negative values at: {}".format(coefs))
return np.log(terms_sum)
def calc_p(coefs):
r"""Calculate `p` (proportion) parameter from `a` coefficients
Parameters
----------
coefs : pandas.DataFrame
DataFrame with model coefficients in columns, and indexed by
parameter names "a" and "lambda".
Returns
-------
p : list
Proportion parameters implied in `coef`.
lambdas : pandas.Series
A series with with the :math:`\lambda` parameters from `coef`.
"""
ncoefs = coefs.shape[1]
coef_arr = np.arange(ncoefs)
pairs = [(i, i + 1) for i in coef_arr[:-1]]
p_ll = [] # build mixing ratios
for pair in pairs:
procn1 = coefs.columns[pair[0]] # name of process 1
procn2 = coefs.columns[pair[1]] # name of process 2
a1 = coefs.loc["a", procn1]
a2 = coefs.loc["a", procn2]
p_i = a1 / (a1 + a2)
p_ll.append(p_i)
return (p_ll, coefs.loc["lambda"])
def ecdf(x, p, lambdas):
r"""Estimated cumulative frequency for Poisson mixture models
ECDF for two- or three-process mixture models.
Parameters
----------
x : array_like
Independent data array described by model with parameters `p`,
:math:`\lambda_f`, and :math:`\lambda_s`.
p : list
List with mixing parameters of the model.
lambdas : pandas.Series
Series with the density parameters (:math:`\lambda`) of the
model. Its length must be length(p) + 1.
Returns
-------
out : array_like
Same shape as `x` with the evaluated function.
"""
ncoefs = lambdas.size
# We assume at least two processes
p0 = p[0]
lda0 = lambdas.iloc[0]
term0 = 1 - p0 * np.exp(-lda0 * x)
if ncoefs == 2:
lda1 = lambdas.iloc[1]
term1 = (1 - p0) * np.exp(-lda1 * x)
cdf = term0 - term1
elif ncoefs == 3:
p1 = p[1]
lda1 = lambdas.iloc[1]
term1 = p1 * (1 - p0) * np.exp(-lda1 * x)
lda2 = lambdas.iloc[2]
term2 = (1 - p0) * (1 - p1) * np.exp(-lda2 * x)
cdf = term0 - term1 - term2
else:
msg = "Only mixtures of <= 3 processes are implemented"
raise KeyError(msg)
return cdf
def label_bouts(x, bec, as_diff=False):
"""Classify data into bouts based on bout ending criteria
Parameters
----------
x : pandas.Series
Series with data to classify according to `bec`.
bec : array_like
Array with bout-ending criteria. It is assumed to be sorted.
as_diff : bool, optional
Whether to apply `diff` on `x` so it matches `bec`'s scale.
Returns
-------
out : numpy.ndarray
Integer array with the same shape as `x`.
"""
if as_diff:
xx = x.diff().fillna(0)
else:
xx = x.copy()
xx_min = np.array(xx.min())
xx_max = np.array(xx.max())
brks = np.append(np.append(xx_min, bec), xx_max)
xx_cat = pd.cut(xx, bins=brks, include_lowest=True)
xx_bouts = rle_key(xx_cat)
return xx_bouts
def _plot_bec(bec_x, bec_y, ax, xytext, horizontalalignment="left"):
"""Plot bout-ending criteria on `Axes`
Private helper function only for convenience here.
Parameters
----------
bec_x : numpy.ndarray, shape (n,)
x coordinate for bout-ending criteria.
bec_y : numpy.ndarray, shape (n,)
y coordinate for bout-ending criteria.
ax : matplotlib.Axes
An Axes instance to use as target.
xytext : 2-tuple
Argument passed to `matplotlib.annotate`; interpreted with
textcoords="offset points".
horizontalalignment : str
Argument passed to `matplotlib.annotate`.
"""
ylims = ax.get_ylim()
ax.vlines(bec_x, ylims[0], bec_y, linestyle="--")
ax.scatter(bec_x, bec_y, c="r", marker="v")
# Annotations
fmtstr = "bec_{0} = {1:.3f}"
if bec_x.size == 1:
bec_x = bec_x.item()
ax.annotate(fmtstr.format(0, bec_x),
(bec_x, bec_y), xytext=xytext,
textcoords="offset points",
horizontalalignment=horizontalalignment)
else:
for i, bec_i in enumerate(bec_x):
ax.annotate(fmtstr.format(i, bec_i),
(bec_i, bec_y[i]), xytext=xytext,
textcoords="offset points",
horizontalalignment=horizontalalignment)
class Bouts(metaclass=ABCMeta):
"""Abstract base class for models of log-transformed frequencies
This is a base class for other classes to build on, and do the model
fitting. `Bouts` is an abstract base class to set up bout
identification procedures. Subclasses must implement `fit` and `bec`
methods, or re-use the default NLS methods in `Bouts`.
Attributes
----------
x : array_like
1D array with input data.
method : str
Method used for calculating the histogram.
lnfreq : pandas.DataFrame
DataFrame with the centers of histogram bins, and corresponding
log-frequencies of `x`.
"""
def __init__(self, x, bw, method="standard"):
"""Histogram of log transformed frequencies of `x`
Parameters
----------
x : array_like
1D array with data where bouts will be identified based on
`method`.
bw : float
Bin width for the histogram
method : {"standard", "seq_diff"}, optional
Method to use for calculating the frequencies: "standard"
simply uses `x`, which "seq_diff" uses the sequential
differences method.
**kwargs : optional keywords
Passed to histogram
"""
self.x = x
self.method = method
if method == "standard":
upper = x.max()
brks = np.arange(x.min(), upper, bw)
if brks[-1] < upper:
brks = np.append(brks, brks[-1] + bw)
h, edges = np.histogram(x, bins=brks)
elif method == "seq_diff":
x_diff = np.abs(np.diff(x))
upper = x_diff.max()
brks = np.arange(0, upper, bw)
if brks[-1] < upper:
brks = np.append(brks, brks[-1] + bw)
h, edges = np.histogram(x_diff, bins=brks)
ctrs = edges[:-1] + np.diff(edges) / 2
ok = h > 0
ok_at = np.where(ok)[0] + 1 # 1-based indices
freq_adj = h[ok] / np.diff(np.insert(ok_at, 0, 0))
self.lnfreq = pd.DataFrame({"x": ctrs[ok],
"lnfreq": np.log(freq_adj)})
def __str__(self):
method = self.method
lnfreq = self.lnfreq
objcls = ("Class {} object\n".format(self.__class__.__name__))
meth_str = "{0:<20} {1}\n".format("histogram method: ", method)
lnfreq_str = ("{0:<20}\n{1}"
.format("log-frequency histogram:",
lnfreq.describe()))
return objcls + meth_str + lnfreq_str
def init_pars(self, x_break, plot=True, ax=None, **kwargs):
"""Find starting values for mixtures of random Poisson processes
Starting values are calculated using the "broken stick" method.
Parameters
----------
x_break : array_like
One- or two-element array with values determining the break(s)
for broken stick model, such that x < x_break[0] is first
process, x >= x_break[1] & x < x_break[2] is second process,
and x >= x_break[2] is third one.
plot : bool, optional
Whether to plot the broken stick model.
ax : matplotlib.Axes, optional
An Axes instance to use as target. Default is to create one.
**kwargs : optional keyword arguments
Passed to plotting function.
Returns
-------
out : pandas.DataFrame
DataFrame with coefficients for each process.
"""
nproc = len(x_break)
if (nproc > 2):
msg = "x_break must be length <= 2"
raise IndexError(msg)
lnfreq = self.lnfreq
ctrs = lnfreq["x"]
xmin = ctrs.min()
xmax = ctrs.max()
xbins = [xmin]
xbins.extend(x_break)
xbins.extend([xmax])
procf = pd.cut(ctrs, bins=xbins, right=True,
include_lowest=True)
lnfreq_grp = lnfreq.groupby(procf)
coefs_ll = []
for name, grp in lnfreq_grp:
fit = smf.ols("lnfreq ~ x", data=grp).fit()
coefs_ll.append(fit.params.rename(name))
coefs = pd.concat(coefs_ll, axis=1)
def calculate_pars(p):
"""Poisson process parameters from linear model
"""
lda = -p["x"]
a = np.exp(p["Intercept"]) / lda
return pd.Series({"a": a, "lambda": lda})
pars = coefs.apply(calculate_pars)
if plot:
if ax is None:
ax = plt.gca()
freq_min = lnfreq["lnfreq"].min()
freq_max = lnfreq["lnfreq"].max()
for name, grp in lnfreq_grp:
ax.scatter(x="x", y="lnfreq", data=grp, label=name)
# Plot current "stick"
coef_i = coefs[name]
y_stick = coef_i["Intercept"] + ctrs * coef_i["x"]
# Limit the "stick" line to min/max of data
ok = (y_stick >= freq_min) & (y_stick <= freq_max)
ax.plot(ctrs[ok], y_stick[ok], linestyle="--")
x_pred = np.linspace(xmin, xmax, num=101) # matches R's curve
y_pred = nlsLL(x_pred, pars)
ax.plot(x_pred, y_pred, alpha=0.5, label="model")
ax.legend(loc="upper right")
ax.set_xlabel("x")
ax.set_ylabel("log frequency")
return pars
@abstractmethod
def fit(self, start, **kwargs):
"""Fit Poisson mixture model to log frequencies
Default is non-linear least squares method.
Parameters
----------
start : pandas.DataFrame
DataFrame with coefficients for each process in columns.
**kwargs : optional keyword arguments
Passed to `scipy.optimize.curve_fit`.
Returns
-------
coefs : pandas.DataFrame
Coefficients of the model.
pcov : 2D array
Covariance of coefs.
"""
lnfreq = self.lnfreq
xdata = lnfreq["x"]
ydata = lnfreq["lnfreq"]
def _nlsLL(x, *args):
"""Wrapper to nlsLL to allow for array argument"""
# Pass in original shape, damn it! Note order="F" needed
coefs = np.array(args).reshape(start.shape, order="F")
return nlsLL(x, coefs)
# Rearrange starting values into a 1D array (needs to be flat)
init_flat = start.to_numpy().T.reshape((start.size,))
popt, pcov = curve_fit(_nlsLL, xdata, ydata,
p0=init_flat, **kwargs)
# Reshape coefs back into init shape
coefs = pd.DataFrame(popt.reshape(start.shape, order="F"),
columns=start.columns, index=start.index)
return (coefs, pcov)
@abstractmethod
def bec(self, coefs):
"""Calculate bout ending criteria from model coefficients
Implementing default as from NLS method.
Parameters
----------
coefs : pandas.DataFrame
DataFrame with model coefficients in columns, and indexed by
parameter names "a" and "lambda".
Returns
-------
out : numpy.ndarray, shape (n,)
1-D array with BECs implied by `coefs`. Length is
coefs.shape[1]
"""
# Find bec's per process by pairing columns
ncoefs = coefs.shape[1]
coef_arr = np.arange(ncoefs)
pairs = [(i, i + 1) for i in coef_arr[:-1]]
becs = []
for pair in pairs:
procn1 = coefs.columns[pair[0]] # name of process 1
procn2 = coefs.columns[pair[1]] # name of process 2
a1 = coefs.loc["a", procn1]
lambda1 = coefs.loc["lambda", procn1]
a2 = coefs.loc["a", procn2]
lambda2 = coefs.loc["lambda", procn2]
bec = (np.log((a1 * lambda1) / (a2 * lambda2)) /
(lambda1 - lambda2))
becs.append(bec)
return np.array(becs)
def plot_fit(self, coefs, ax=None):
"""Plot log frequency histogram and fitted model
Parameters
----------
coefs : pandas.DataFrame
DataFrame with model coefficients in columns, and indexed by
parameter names "a" and "lambda".
ax : matplotlib.Axes instance
An Axes instance to use as target.
Returns
-------
ax : `matplotlib.Axes`
"""
lnfreq = self.lnfreq
ctrs = lnfreq["x"]
xmin = ctrs.min()
xmax = ctrs.max()
x_pred = np.linspace(xmin, xmax, num=101) # matches R's curve
y_pred = nlsLL(x_pred, coefs)
if ax is None:
ax = plt.gca()
# Plot data
ax.scatter(x="x", y="lnfreq", data=lnfreq,
alpha=0.5, label="histogram")
# Plot predicted
ax.plot(x_pred, y_pred, alpha=0.5, label="model")
# Plot BEC (note this plots all BECs in becx)
bec_x = self.bec(coefs) # need an array for nlsLL
bec_y = nlsLL(bec_x, coefs)
_plot_bec(bec_x, bec_y, ax=ax, xytext=(5, 5))
ax.legend(loc=8, bbox_to_anchor=(0.5, 1), frameon=False,
borderaxespad=0.1, ncol=2)
ax.set_xlabel("x")
ax.set_ylabel("log frequency")
return ax
def _plot_ecdf(x_pred_expm1, y_pred, ax):
"""Plot Empirical Frequency Distribution
Plot the ECDF at predicted x and corresponding y locations.
Parameters
----------
x_pred : numpy.ndarray, shape (n,)
Values of the variable at which to plot the ECDF.
y_pred : numpy.ndarray, shape (n,)
Values of the ECDF at `x_pred`.
ax : matplotlib.axes.Axes
An Axes instance to use as target.
"""
pass | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/bouts/bouts.py | 0.929424 | 0.57678 | bouts.py | pypi |
import logging
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from scipy.special import logit, expit
from statsmodels.distributions.empirical_distribution import ECDF
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
from . import bouts
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
def mleLL(x, p, lambdas):
r"""Random Poisson processes function
The current implementation takes two or three random Poisson processes.
Parameters
----------
x : array_like
Independent data array described by model with parameters `p`,
:math:`\lambda_f`, and :math:`\lambda_s`.
p : list
List with mixing parameters of the model.
lambdas : array_like
1-D Array with the density parameters (:math:`\lambda`) of the
model. Its length must be length(p) + 1.
Returns
-------
out : array_like
Same shape as `x` with the evaluated function.
"""
logmsg = "p={0}, lambdas={1}".format(p, lambdas)
logger.info(logmsg)
nproc = lambdas.size
# We assume at least two processes
p0 = p[0]
lda0 = lambdas[0]
term0 = p0 * lda0 * np.exp(-lda0 * x)
if nproc == 2:
lda1 = lambdas[1]
term1 = (1 - p0) * lda1 * np.exp(-lda1 * x)
res = term0 + term1
else: # 3 processes; capabilities enforced in mleLL
p1 = p[1]
lda1 = lambdas[1]
term1 = p1 * (1 - p0) * lda1 * np.exp(-lda1 * x)
lda2 = lambdas[2]
term2 = (1 - p1) * (1 - p0) * lda2 * np.exp(-lda2 * x)
res = term0 + term1 + term2
return np.log(res)
class BoutsMLE(bouts.Bouts):
r"""Maximum Likelihood estimation for models of Poisson process mixtures
Methods for modelling log-frequency data as a mixture of Poisson
processes via maximum likelihood estimation [2]_, [3]_. Mixtures of
two or three Poisson processes are supported.
Even in these relatively simple cases, it is very important to provide
good starting values for the parameters.
One useful strategy to get good starting parameter values is to proceed
in 4 steps. First, fit a broken stick model to the log frequencies of
binned data (see :meth:`~Bouts.init_pars`), to obtain estimates of 4
parameters in a 2-process model [1]_, or 6 in a 3-process model.
Second, calculate parameter(s) :math:`p` from the :math:`\alpha`
parameters obtained by fitting the broken stick model, to get tentative
initial values as in [2]_. Third, obtain MLE estimates for these
parameters, but using a reparameterized version of the -log L2
function. Lastly, obtain the final MLE estimates for the three
parameters by using the estimates from step 3, un-transformed back to
their original scales, maximizing the original parameterization of the
-log L2 function.
:meth:`~Bouts.init_pars` can be used to perform step 1. Calculation of
the mixing parameters :math:`p` in step 2 is trivial from these
estimates. Method :meth:`negMLEll` calculates the negative
log-likelihood for a reparameterized version of the -log L2 function
given by [1]_, so can be used for step 3. This uses a logit
transformation of the mixing parameter :math:`p`, and log
transformations for density parameters :math:`\lambda`. Method
:meth:`negMLEll` is used again to compute the -log L2 function
corresponding to the un-transformed model for step 4.
The :meth:`fit` method performs the main job of maximizing the -log L2
functions, and is essentially a wrapper around
:func:`~scipy.optimize.minimize`. It only takes the -log L2 function,
a `DataFrame` of starting values, and the variable to be modelled, all
of which are passed to :func:`~scipy.optimize.minimize` for
optimization. Additionally, any other arguments are also passed to
:func:`~scipy.optimize.minimize`, hence great control is provided for
fitting any of the -log L2 functions.
In practice, step 3 does not pose major problems using the
reparameterized -log L2 function, but it might be useful to use method
'L-BFGS-B' with appropriate lower and upper bounds. Step 4 can be a
bit more problematic, because the parameters are usually on very
different scales and there can be multiple minima. Therefore, it is
almost always the rule to use method 'L-BFGS-B', again bounding the
parameter search, as well as other settings for controlling the
optimization.
References
----------
.. [2] Langton, S.; Collett, D. and Sibly, R. (1995) Splitting
behaviour into bouts; a maximum likelihood approach. Behaviour 132,
9-10.
.. [3] Luque, S.P. and Guinet, C. (2007) A maximum likelihood approach
for identifying dive bouts improves accuracy, precision, and
objectivity. Behaviour, 144, 1315-1332.
Examples
--------
See :doc:`demo_simulbouts` for a detailed example.
"""
def negMLEll(self, params, x, istransformed=True):
r"""Log likelihood function of parameters given observed data
Parameters
----------
params : array_like
1-D array with parameters to fit. Currently must be either
3-length, with mixing parameter :math:`p`, density parameter
:math:`\lambda_f` and :math:`\lambda_s`, in that order, or
5-length, with :math:`p_f`, :math:`p_fs`, :math:`\lambda_f`,
:math:`\lambda_m`, :math:`\lambda_s`.
x : array_like
Independent data array described by model with parameters
`params`.
istransformed : bool
Whether `params` are transformed and need to be un-transformed
to calculate the likelihood.
Returns
-------
out :
"""
if len(params) == 3:
# Need list `p` for mle_fun
p = [params[0]]
lambdas = params[1:]
elif len(params) == 5:
p = params[:2]
lambdas = params[2:]
else:
msg = "Only mixtures of <= 3 processes are implemented"
raise KeyError(msg)
if istransformed:
p = expit(p)
lambdas = np.exp(lambdas)
ll = -sum(mleLL(x, p, lambdas))
logger.info("LL={}".format(ll))
return ll
def fit(self, start, fit1_opts=None, fit2_opts=None):
"""Maximum likelihood estimation of log frequencies
Parameters
----------
start : pandas.DataFrame
DataFrame with starting values for coefficients of each process
in columns. These can come from the "broken stick" method as
in :meth:`Bouts.init_pars`, and will be transformed to minimize
the first log likelihood function.
fit1_opts, fit2_opts : dict
Dictionaries with keywords to be passed to
:func:`scipy.optimize.minimize`, for the first and second fits.
Returns
-------
fit1, fit2 : scipy.optimize.OptimizeResult
Objects with the optimization result from the first and second
fit, having a `x` attribute with coefficients of the solution.
Notes
-----
Current implementation handles mixtures of two Poisson processes.
"""
# Calculate `p`
p0, lambda0 = bouts.calc_p(start)
# transform parameters for first fit
lambda0 = np.log(lambda0)
x0 = np.array([*logit(p0), *lambda0])
logger.info("Starting first fit")
if fit1_opts:
fit1 = minimize(self.negMLEll, x0=x0, args=(self.x,),
**fit1_opts)
else:
fit1 = minimize(self.negMLEll, x0=x0, args=(self.x,))
coef0 = fit1.x
start2 = [expit(coef0[0]), *np.exp(coef0[1:])]
logger.info("Starting second fit")
if fit2_opts:
fit2 = minimize(self.negMLEll, x0=start2,
args=(self.x, False), **fit2_opts)
else:
fit2 = minimize(self.negMLEll, x0=start2,
args=(self.x, False))
logger.info("N iter fit 1: {0}, fit 2: {1}"
.format(fit1.nit, fit2.nit))
return (fit1, fit2)
def bec(self, fit):
"""Calculate bout ending criteria from model coefficients
Parameters
----------
fit : scipy.optimize.OptimizeResult
Object with the optimization result, having a `x` attribute
with coefficients of the solution.
Returns
-------
out : numpy.ndarray
Notes
-----
Current implementation is for a two-process mixture, hence an array
of a single float is returned.
"""
coefs = fit.x
if len(coefs) == 3:
p_hat = coefs[0]
lda1_hat = coefs[1]
lda2_hat = coefs[2]
bec = (np.log((p_hat * lda1_hat) /
((1 - p_hat) * lda2_hat)) /
(lda1_hat - lda2_hat))
elif len(coefs) == 5:
p0_hat, p1_hat = coefs[:2]
lda0_hat, lda1_hat, lda2_hat = coefs[2:]
bec0 = (np.log((p0_hat * lda0_hat) /
((1 - p0_hat) * lda1_hat)) /
(lda0_hat - lda1_hat))
bec1 = (np.log((p1_hat * lda1_hat) /
((1 - p1_hat) * lda2_hat)) /
(lda1_hat - lda2_hat))
bec = [bec0, bec1]
return np.array(bec)
def plot_fit(self, fit, ax=None):
"""Plot log frequency histogram and fitted model
Parameters
----------
fit : scipy.optimize.OptimizeResult
Object with the optimization result, having a `x` attribute
with coefficients of the solution.
ax : matplotlib.axes.Axes instance
An Axes instance to use as target.
Returns
-------
ax :
:class:`~matplotlib.axes.Axes` instances.
"""
# Method is redefined from Bouts
x = self.x
coefs = fit.x
if len(coefs) == 3:
p_hat = [coefs[0]]
lda_hat = coefs[1:]
elif len(coefs) == 5:
p_hat = coefs[:2]
lda_hat = coefs[2:]
xmin = x.min()
xmax = x.max()
x_pred = np.linspace(xmin, xmax, num=101) # matches R's curve
# Need to transpose to unpack columns rather than rows
y_pred = mleLL(x_pred, p_hat, lda_hat)
if ax is None:
ax = plt.gca()
# Data rug plot
ax.plot(x, np.ones_like(x) * y_pred.max(), "|",
color="k", label="observed")
# Plot predicted
ax.plot(x_pred, y_pred, label="model")
# Plot BEC
bec_x = self.bec(fit)
bec_y = mleLL(bec_x, p_hat, lda_hat)
bouts._plot_bec(bec_x, bec_y, ax=ax, xytext=(5, 5))
ax.legend(loc=8, bbox_to_anchor=(0.5, 1), frameon=False,
borderaxespad=0.1, ncol=2)
ax.set_xlabel("x")
ax.set_ylabel("log frequency")
return ax
def plot_ecdf(self, fit, ax=None, **kwargs):
"""Plot observed and modelled empirical cumulative frequencies
Parameters
----------
fit : scipy.optimize.OptimizeResult
Object with the optimization result, having a `x` attribute
with coefficients of the solution.
ax : matplotlib.axes.Axes instance
An Axes instance to use as target.
**kwargs : optional keyword arguments
Passed to `matplotlib.pyplot.gca`.
Returns
-------
ax :
:class:`~matplotlib.axes.Axes` instances.
"""
x = self.x
xx = np.log1p(x)
x_ecdf = ECDF(xx)
x_pred = np.linspace(0, xx.max(), num=101)
x_pred_expm1 = np.expm1(x_pred)
y_pred = x_ecdf(x_pred)
if ax is None:
ax = plt.gca(**kwargs)
# Plot ECDF of data
ax.step(x_pred_expm1, y_pred, label="observed")
ax.set_xscale("log")
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.set_xlim(np.exp(xx).min(), np.exp(xx).max())
# Plot estimated CDF
coefs = fit.x
if len(coefs) == 3:
p_hat = [coefs[0]] # list to bouts.ecdf()
lda_hat = pd.Series(coefs[1:], name="lambda")
elif len(coefs) == 5:
p_hat = coefs[:2]
lda_hat = pd.Series(coefs[2:], name="lambda")
y_mod = bouts.ecdf(x_pred_expm1, p_hat, lda_hat)
ax.plot(x_pred_expm1, y_mod, label="model")
# Add a little offset to ylim for visibility
yoffset = (0.05, 1.05)
ax.set_ylim(*yoffset) # add some spacing
# Plot BEC
bec_x = self.bec(fit)
bec_y = bouts.ecdf(bec_x, p=p_hat, lambdas=lda_hat)
bouts._plot_bec(bec_x, bec_y=bec_y, ax=ax, xytext=(-5, 5),
horizontalalignment="right")
ax.legend(loc="upper left")
ax.set_xlabel("x")
ax.set_ylabel("ECDF [x]")
return ax
if __name__ == '__main__':
# Set up info level logging
logging.basicConfig(level=logging.INFO)
from skdiveMove.tests import diveMove2skd
tdrX = diveMove2skd()
pars = {"offset_zoc": 3,
"dry_thr": 70,
"wet_thr": 3610,
"dive_thr": 3,
"dive_model": "unimodal",
"smooth_par": 0.1,
"knot_factor": 20,
"descent_crit_q": 0.01,
"ascent_crit_q": 0}
tdrX.calibrate(zoc_method="offset", offset=pars["offset_zoc"],
dry_thr=pars["dry_thr"], wet_thr=pars["dry_thr"],
dive_thr=pars["dive_thr"],
dive_model=pars["dive_model"],
smooth_par=pars["smooth_par"],
knot_factor=pars["knot_factor"],
descent_crit_q=pars["descent_crit_q"],
ascent_crit_q=pars["ascent_crit_q"])
stats = tdrX.dive_stats()
stamps = tdrX.stamp_dives(ignore_z=True)
stats_tab = pd.concat((stamps, stats), axis=1)
# 2=4 here
postdives = stats_tab["postdive_dur"][stats_tab["phase_id"] == 4]
postdives_diff = postdives.dt.total_seconds().diff()[1:].abs()
# Remove isolated dives
postdives_diff = postdives_diff[postdives_diff < 2000]
# Set up instance
bouts_postdive = BoutsMLE(postdives_diff, 0.1)
# Get init parameters from broken stick model
bout_init_pars = bouts_postdive.init_pars([50], plot=False)
# Knowing
p_bnd = (-2, None)
lda1_bnd = (-5, None)
lda2_bnd = (-10, None)
bd1 = (p_bnd, lda1_bnd, lda2_bnd)
p_bnd = (1e-8, None)
lda1_bnd = (1e-8, None)
lda2_bnd = (1e-8, None)
bd2 = (p_bnd, lda1_bnd, lda2_bnd)
fit1, fit2 = bouts_postdive.fit(bout_init_pars,
fit1_opts=dict(method="L-BFGS-B",
bounds=bd1),
fit2_opts=dict(method="L-BFGS-B",
bounds=bd2))
# BEC
becx = bouts_postdive.bec(fit2)
ax = bouts_postdive.plot_fit(fit2)
bouts_postdive.plot_ecdf(fit2) | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/bouts/boutsmle.py | 0.92412 | 0.617916 | boutsmle.py | pypi |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
from statsmodels.distributions.empirical_distribution import ECDF
from . import bouts
class BoutsNLS(bouts.Bouts):
"""Nonlinear Least Squares fitting for models of Poisson process mixtures
Methods for modelling log-frequency data as a mixture of Poisson
processes via nonlinear least squares [1]_.
References
----------
.. [1] Sibly, R.; Nott, H. and Fletcher, D. (1990) Splitting behaviour
into bouts Animal Behaviour 39, 63-69.
Examples
--------
Draw 1000 samples from a mixture where the first process occurs with
:math:`p < 0.7` and the second process occurs with the remaining
probability.
>>> from skdiveMove.tests import random_mixexp
>>> rng = np.random.default_rng(123)
>>> x2 = random_mixexp(1000, p=0.7, lda=np.array([0.05, 0.005]),
... rng=rng)
>>> xbouts2 = BoutsNLS(x2, bw=5)
>>> init_pars = xbouts2.init_pars([80], plot=False)
Fit the model and retrieve coefficients:
>>> coefs, pcov = xbouts2.fit(init_pars)
>>> print(np.round(coefs, 4))
(2.519, 80.0] (80.0, 1297.52]
a 3648.8547 1103.4423
lambda 0.0388 0.0032
Calculate bout-ending criterion (returns array):
>>> print(np.round(xbouts2.bec(coefs), 4))
[103.8648]
Plot observed and predicted data:
>>> xbouts2.plot_fit(coefs) # doctest: +ELLIPSIS
<AxesSubplot:...>
Plot ECDF:
>>> xbouts2.plot_ecdf(coefs) # doctest: +ELLIPSIS
<AxesSubplot:...>
"""
def fit(self, start, **kwargs):
"""Fit non-linear least squares model to log frequencies
The metaclass :class:`bouts.Bouts` implements this method.
Parameters
----------
start : pandas.DataFrame
DataFrame with coefficients for each process in columns.
**kwargs : optional keyword arguments
Passed to `scipy.optimize.curve_fit`.
Returns
-------
coefs : pandas.DataFrame
Coefficients of the model.
pcov : 2D array
Covariance of coefs.
"""
return bouts.Bouts.fit(self, start, **kwargs)
def bec(self, coefs):
"""Calculate bout ending criteria from model coefficients
The metaclass :class:`bouts.Bouts` implements this method.
Parameters
----------
coefs : pandas.DataFrame
DataFrame with model coefficients in columns.
Returns
-------
out : numpy.ndarray, shape (n,)
1-D array with BECs implied by `coefs`. Length is
coefs.shape[1]
"""
# The metaclass implements this method
return bouts.Bouts.bec(self, coefs)
def plot_ecdf(self, coefs, ax=None, **kwargs):
"""Plot observed and modelled empirical cumulative frequencies
Parameters
----------
coefs : pandas.DataFrame
DataFrame with model coefficients in columns.
ax : matplotlib.axes.Axes instance
An Axes instance to use as target.
**kwargs : optional keyword arguments
Passed to `matplotlib.pyplot.gca`.
Returns
-------
ax :
:class:`~matplotlib.axes.Axes` instances.
"""
x = self.x
xx = np.log1p(x)
x_ecdf = ECDF(xx)
x_pred = np.linspace(0, xx.max(), num=101)
x_pred_expm1 = np.expm1(x_pred)
y_pred = x_ecdf(x_pred)
if ax is None:
ax = plt.gca(**kwargs)
# Plot ECDF of data
ax.step(x_pred_expm1, y_pred, label="observed")
ax.set_xscale("log")
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.set_xlim(np.exp(xx).min(), np.exp(xx).max())
# Plot estimated CDF
p, lambdas = bouts.calc_p(coefs)
y_mod = bouts.ecdf(x_pred_expm1, p, lambdas)
ax.plot(x_pred_expm1, y_mod, label="model")
# Add a little offset to ylim for visibility
yoffset = (0.05, 1.05)
ax.set_ylim(*yoffset) # add some spacing
# Plot BEC
bec_x = self.bec(coefs)
bec_y = bouts.ecdf(bec_x, p=p, lambdas=lambdas)
bouts._plot_bec(bec_x, bec_y=bec_y, ax=ax, xytext=(-5, 5),
horizontalalignment="right")
ax.legend(loc="upper left")
ax.set_xlabel("x")
ax.set_ylabel("ECDF [x]")
return ax
if __name__ == '__main__':
from skdiveMove.tests import diveMove2skd
import pandas as pd
tdrX = diveMove2skd()
pars = {"offset_zoc": 3,
"dry_thr": 70,
"wet_thr": 3610,
"dive_thr": 3,
"dive_model": "unimodal",
"smooth_par": 0.1,
"knot_factor": 20,
"descent_crit_q": 0.01,
"ascent_crit_q": 0}
tdrX.calibrate(zoc_method="offset", offset=pars["offset_zoc"],
dry_thr=pars["dry_thr"], wet_thr=pars["dry_thr"],
dive_thr=pars["dive_thr"],
dive_model=pars["dive_model"],
smooth_par=pars["smooth_par"],
knot_factor=pars["knot_factor"],
descent_crit_q=pars["descent_crit_q"],
ascent_crit_q=pars["ascent_crit_q"])
stats = tdrX.dive_stats()
stamps = tdrX.stamp_dives(ignore_z=True)
stats_tab = pd.concat((stamps, stats), axis=1)
# 2=4 here
postdives = stats_tab["postdive_dur"][stats_tab["phase_id"] == 4]
postdives_diff = postdives.dt.total_seconds().diff()[1:].abs()
# Remove isolated dives
postdives_diff = postdives_diff[postdives_diff < 2000]
# Set up instance
bouts_postdive = BoutsNLS(postdives_diff, 0.1)
# Get init parameters
bout_init_pars = bouts_postdive.init_pars([50], plot=False)
nls_coefs, _ = bouts_postdive.fit(bout_init_pars)
# BEC
bouts_postdive.bec(nls_coefs)
bouts_postdive.plot_fit(nls_coefs)
# ECDF
fig1, ax1 = bouts_postdive.plot_ecdf(nls_coefs)
# Try 3 processes
# Get init parameters
bout_init_pars = bouts_postdive.init_pars([50, 550], plot=False)
nls_coefs, _ = bouts_postdive.fit(bout_init_pars)
# BEC
bouts_postdive.bec(nls_coefs)
bouts_postdive.plot_fit(nls_coefs)
# ECDF
fig2, ax2 = bouts_postdive.plot_ecdf(nls_coefs) | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/bouts/boutsnls.py | 0.936677 | 0.774328 | boutsnls.py | pypi |
r"""Tools and classes for the identification of behavioural bouts
A histogram of log-transformed frequencies of `x` with a chosen bin width
and upper limit forms the basis for models. Histogram bins following empty
ones have their frequencies averaged over the number of previous empty bins
plus one. Models attempt to discern the number of random Poisson
processes, and their parameters, generating the underlying distribution of
log-transformed frequencies.
The abstract class :class:`Bouts` provides basic methods.
Abstract class & methods summary
--------------------------------
.. autosummary::
Bouts
Bouts.init_pars
Bouts.fit
Bouts.bec
Bouts.plot_fit
Nonlinear least squares models
------------------------------
Currently, the model describing the histogram as it is built is implemented
in the :class:`BoutsNLS` class. For the case of a mixture of two Poisson
processes, this class would set up the model:
.. math::
:label: 1
y = log[N_f \lambda_f e^{-\lambda_f t} +
N_s \lambda_s e^{-\lambda_s t}]
where :math:`N_f` and :math:`N_s` are the number of events belonging to
process :math:`f` and :math:`s`, respectively; and :math:`\lambda_f` and
:math:`\lambda_s` are the probabilities of an event occurring in each
process. Mixtures of more processes can also be added to the model.
The bout-ending criterion (BEC) corresponding to equation :eq:`1` is:
.. math::
:label: 2
BEC = \frac{1}{\lambda_f - \lambda_s}
log \frac{N_f \lambda_f}{N_s \lambda_s}
Note that there is one BEC per transition between Poisson processes.
The methods of this subclass are provided by the abstract super class
:class:`Bouts`, and defining those listed below.
Methods summary
---------------
.. autosummary::
BoutsNLS.plot_ecdf
Maximum likelihood models
-------------------------
This is the preferred approach to modelling mixtures of random Poisson
processes, as it does not rely on the subjective construction of a
histogram. The histogram is only used to generate reasonable starting
values, but the underlying paramters of the model are obtained via maximum
likelihood, so it is more robust.
For the case of a mixture of two processes, as above, the log likelihood of
all the :math:`N_t` in a mixture can be expressed as:
.. math::
:label: 3
log\ L_2 = \sum_{i=1}^{N_t} log[p \lambda_f e^{-\lambda_f t_i} +
(1-p) \lambda_s e^{-\lambda_s t_i}]
where :math:`p` is a mixing parameter indicating the proportion of fast to
slow process events in the sampled population.
The BEC in this case can be estimated as:
.. math::
:label: 4
BEC = \frac{1}{\lambda_f - \lambda_s}
log \frac{p\lambda_f}{(1-p)\lambda_s}
The subclass :class:`BoutsMLE` offers the framework for these models.
Class & methods summary
-----------------------
.. autosummary::
BoutsMLE.negMLEll
BoutsMLE.fit
BoutsMLE.bec
BoutsMLE.plot_fit
BoutsMLE.plot_ecdf
API
---
"""
from .bouts import Bouts, label_bouts
from .boutsnls import BoutsNLS
from .boutsmle import BoutsMLE
from skdiveMove.tests import random_mixexp
__all__ = ["Bouts", "BoutsNLS", "BoutsMLE", "label_bouts",
"random_mixexp"] | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/bouts/__init__.py | 0.916801 | 0.969584 | __init__.py | pypi |
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import xarray as xr
from skdownscale.pointwise_models import BcsdPrecipitation, BcsdTemperature
# utilities for plotting cdfs
def plot_cdf(ax=None, **kwargs):
if ax:
plt.sca(ax)
else:
ax = plt.gca()
for label, X in kwargs.items():
vals = np.sort(X, axis=0)
pp = scipy.stats.mstats.plotting_positions(vals)
ax.plot(pp, vals, label=label)
ax.legend()
return ax
def plot_cdf_by_month(ax=None, **kwargs):
fig, axes = plt.subplots(4, 3, sharex=True, sharey=False, figsize=(12, 8))
for label, X in kwargs.items():
for month, ax in zip(range(1, 13), axes.flat):
vals = np.sort(X[X.index.month == month], axis=0)
pp = scipy.stats.mstats.plotting_positions(vals)
ax.plot(pp, vals, label=label)
ax.set_title(month)
ax.legend()
return ax
# open a small dataset for training
training = xr.open_zarr("../data/downscale_test_data.zarr.zip", group="training")
training
# open a small dataset of observations (targets)
targets = xr.open_zarr("../data/downscale_test_data.zarr.zip", group="targets")
targets
# extract 1 point of training data for precipitation and temperature
X_temp = training.isel(point=0).to_dataframe()[["T2max"]].resample("MS").mean() - 273.13
X_pcp = training.isel(point=0).to_dataframe()[["PREC_TOT"]].resample("MS").sum() * 24
display(X_temp.head(), X_pcp.head())
# extract 1 point of target data for precipitation and temperature
y_temp = targets.isel(point=0).to_dataframe()[["Tmax"]].resample("MS").mean()
y_pcp = targets.isel(point=0).to_dataframe()[["Prec"]].resample("MS").sum()
display(y_temp.head(), y_pcp.head())
# Fit/predict the BCSD Temperature model
bcsd_temp = BcsdTemperature()
bcsd_temp.fit(X_temp, y_temp)
out = bcsd_temp.predict(X_temp) + X_temp
plot_cdf(X=X_temp, y=y_temp, out=out)
out.plot()
plot_cdf_by_month(X=X_temp, y=y_temp, out=out)
# Fit/predict the BCSD Precipitation model
bcsd_pcp = BcsdPrecipitation()
bcsd_pcp.fit(X_pcp, y_pcp)
out = bcsd_pcp.predict(X_pcp) * X_pcp
plot_cdf(X=X_pcp, y=y_pcp, out=out)
plot_cdf_by_month(X=X_pcp, y=y_pcp, out=out)
```
| /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/examples/bcsd_example.ipynb | 0.525612 | 0.648209 | bcsd_example.ipynb | pypi |
import numpy as np
import pandas as pd
import probscale
import scipy
import seaborn as sns
import xarray as xr
from matplotlib import pyplot as plt
def get_sample_data(kind):
if kind == 'training':
data = xr.open_zarr('../data/downscale_test_data.zarr.zip', group=kind)
# extract 1 point of training data for precipitation and temperature
df = (
data.isel(point=0)
.to_dataframe()[['T2max', 'PREC_TOT']]
.rename(columns={'T2max': 'tmax', 'PREC_TOT': 'pcp'})
)
df['tmax'] -= 273.13
df['pcp'] *= 24
return df.resample('1d').first()
elif kind == 'targets':
data = xr.open_zarr('../data/downscale_test_data.zarr.zip', group=kind)
# extract 1 point of training data for precipitation and temperature
return (
data.isel(point=0)
.to_dataframe()[['Tmax', 'Prec']]
.rename(columns={'Tmax': 'tmax', 'Prec': 'pcp'})
)
elif kind == 'wind-hist':
return (
xr.open_dataset(
'../data/uas/uas.hist.CanESM2.CRCM5-UQAM.day.NAM-44i.raw.Colorado.19801990.nc'
)['uas']
.sel(lat=40.25, lon=-109.2, method='nearest')
.squeeze()
.to_dataframe()[['uas']]
)
elif kind == 'wind-obs':
return (
xr.open_dataset('../data/uas/uas.gridMET.NAM-44i.Colorado.19801990.nc')['uas']
.sel(lat=40.25, lon=-109.2, method='nearest')
.squeeze()
.to_dataframe()[['uas']]
)
elif kind == 'wind-rcp':
return (
xr.open_dataset(
'../data/uas/uas.rcp85.CanESM2.CRCM5-UQAM.day.NAM-44i.raw.Colorado.19902000.nc'
)['uas']
.sel(lat=40.25, lon=-109.2, method='nearest')
.squeeze()
.to_dataframe()[['uas']]
)
else:
raise ValueError(kind)
return df
def prob_plots(x, y, y_hat, shape=(2, 2), figsize=(8, 8)):
fig, axes = plt.subplots(*shape, sharex=True, sharey=True, figsize=figsize)
scatter_kws = dict(label='', marker=None, linestyle='-')
common_opts = dict(plottype='qq', problabel='', datalabel='')
for ax, (label, series) in zip(axes.flat, y_hat.items()):
scatter_kws['label'] = 'original'
fig = probscale.probplot(x, ax=ax, scatter_kws=scatter_kws, **common_opts)
scatter_kws['label'] = 'target'
fig = probscale.probplot(y, ax=ax, scatter_kws=scatter_kws, **common_opts)
scatter_kws['label'] = 'corrected'
fig = probscale.probplot(series, ax=ax, scatter_kws=scatter_kws, **common_opts)
ax.set_title(label)
ax.legend()
[ax.set_xlabel('Standard Normal Quantiles') for ax in axes[-1]]
[ax.set_ylabel('Temperature [C]') for ax in axes[:, 0]]
[fig.delaxes(ax) for ax in axes.flat[len(y_hat.keys()) :]]
fig.tight_layout()
return fig
def zscore_ds_plot(training, target, future, corrected):
labels = ['training', 'future', 'target', 'corrected']
colors = {k: c for (k, c) in zip(labels, sns.color_palette('Set2', n_colors=4))}
alpha = 0.5
time_target = pd.date_range('1980-01-01', '1989-12-31', freq='D')
time_training = time_target[~((time_target.month == 2) & (time_target.day == 29))]
time_future = pd.date_range('1990-01-01', '1999-12-31', freq='D')
time_future = time_future[~((time_future.month == 2) & (time_future.day == 29))]
plt.figure(figsize=(8, 4))
plt.plot(time_training, training.uas, label='training', alpha=alpha, c=colors['training'])
plt.plot(time_target, target.uas, label='target', alpha=alpha, c=colors['target'])
plt.plot(time_future, future.uas, label='future', alpha=alpha, c=colors['future'])
plt.plot(
time_future,
corrected.uas,
label='corrected',
alpha=alpha,
c=colors['corrected'],
)
plt.xlabel('Time')
plt.ylabel('Eastward Near-Surface Wind (m s-1)')
plt.legend()
return
def zscore_correction_plot(zscore):
training_mean = zscore.fit_stats_dict_['X_mean']
training_std = zscore.fit_stats_dict_['X_std']
target_mean = zscore.fit_stats_dict_['y_mean']
target_std = zscore.fit_stats_dict_['y_std']
future_mean = zscore.predict_stats_dict_['meani']
future_mean = future_mean.groupby(future_mean.index.dayofyear).mean()
future_std = zscore.predict_stats_dict_['stdi']
future_std = future_std.groupby(future_std.index.dayofyear).mean()
corrected_mean = zscore.predict_stats_dict_['meanf']
corrected_mean = corrected_mean.groupby(corrected_mean.index.dayofyear).mean()
corrected_std = zscore.predict_stats_dict_['stdf']
corrected_std = corrected_std.groupby(corrected_std.index.dayofyear).mean()
labels = ['training', 'future', 'target', 'corrected']
colors = {k: c for (k, c) in zip(labels, sns.color_palette('Set2', n_colors=4))}
doy = 20
plt.figure()
x, y = _gaus(training_mean, training_std, doy)
plt.plot(x, y, c=colors['training'], label='training')
x, y = _gaus(target_mean, target_std, doy)
plt.plot(x, y, c=colors['target'], label='target')
x, y = _gaus(future_mean, future_std, doy)
plt.plot(x, y, c=colors['future'], label='future')
x, y = _gaus(corrected_mean, corrected_std, doy)
plt.plot(x, y, c=colors['corrected'], label='corrected')
plt.legend()
return
def _gaus(mean, std, doy):
mu = mean[doy]
sigma = std[doy]
x = np.linspace(mu - 3 * sigma, mu + 3 * sigma, 100)
y = scipy.stats.norm.pdf(x, mu, sigma)
return x, y | /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/examples/utils.py | 0.519521 | 0.40392 | utils.py | pypi |
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import xarray as xr
from skdownscale.pointwise_models import AnalogRegression, PureAnalog
# open a small dataset for training
training = xr.open_zarr("../data/downscale_test_data.zarr.zip", group="training")
training
# open a small dataset of observations (targets)
targets = xr.open_zarr("../data/downscale_test_data.zarr.zip", group="targets")
targets
# extract 1 point of training data for precipitation and temperature
X_temp = training.isel(point=0).to_dataframe()[["T2max"]] - 273.13
X_pcp = training.isel(point=0).to_dataframe()[["PREC_TOT"]] * 24
display(X_temp.head(), X_pcp.head())
# extract 1 point of target data for precipitation and temperature
y_temp = targets.isel(point=0).to_dataframe()[["Tmax"]]
y_pcp = targets.isel(point=0).to_dataframe()[["Prec"]]
display(y_temp.head(), y_pcp.head())
# Fit/predict using the PureAnalog class
for kind in ["best_analog", "sample_analogs", "weight_analogs", "mean_analogs"]:
pure_analog = PureAnalog(kind=kind, n_analogs=10)
pure_analog.fit(X_temp[:1000], y_temp[:1000])
out = pure_analog.predict(X_temp[1000:])
plt.plot(out[:300], label=kind)
# Fit/predict using the AnalogRegression class
analog_reg = AnalogRegression(n_analogs=100)
analog_reg.fit(X_temp[:1000], y_temp[:1000])
out = analog_reg.predict(X_temp[1000:])
plt.plot(out[:300], label="AnalogRegression")
plt.legend()
```
| /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/examples/gard_example.ipynb | 0.483892 | 0.701432 | gard_example.ipynb | pypi |
import numpy as np
import pandas as pd
from .utils import default_none_kwargs
class GroupedRegressor:
"""Grouped Regressor
Wrapper supporting fitting seperate estimators distinct groups
Parameters
----------
estimator : object
Estimator object such as derived from `BaseEstimator`. This estimator will be fit to each group
fit_grouper : object
Grouper object, such as `pd.Grouper` or `PaddedDOYGrouper` used to split data into groups during fitting.
predict_grouper : object, func, str
Grouper object, such as `pd.Grouper` used to split data into groups during prediction.
estimator_kwargs : dict
Keyword arguments to pass onto the `estimator`'s contructor.
fit_grouper_kwargs : dict
Keyword arguments to pass onto the `fit_grouper`s contructor.
predict_grouper_kwargs : dict
Keyword arguments to pass onto the `predict_grouper`s contructor.
"""
def __init__(
self,
estimator,
fit_grouper,
predict_grouper,
estimator_kwargs=None,
fit_grouper_kwargs=None,
predict_grouper_kwargs=None,
):
self.estimator = estimator
self.estimator_kwargs = estimator_kwargs
self.fit_grouper = fit_grouper
self.fit_grouper_kwargs = fit_grouper_kwargs
self.predict_grouper = predict_grouper
self.predict_grouper_kwargs = predict_grouper_kwargs
def fit(self, X, y, **fit_kwargs):
"""Fit the grouped regressor
Parameters
----------
X : pd.DataFrame, shape (n_samples, n_features)
Training data
y : pd.Series or pd.DataFrame, shape (n_samples, ) or (n_samples, n_targets)
Target values
**fit_kwargs
Additional keyword arguments to pass onto the estimator's fit method
Returns
-------
self : returns an instance of self.
"""
fit_grouper_kwargs = default_none_kwargs(self.fit_grouper_kwargs)
x_groups = self.fit_grouper(X.index, **fit_grouper_kwargs).groups
y_groups = self.fit_grouper(y.index, **fit_grouper_kwargs).groups
self.targets_ = list(y.keys())
estimator_kwargs = default_none_kwargs(self.estimator_kwargs)
self.estimators_ = {key: self.estimator(**estimator_kwargs) for key in x_groups}
for x_key, x_inds in x_groups.items():
y_inds = y_groups[x_key]
self.estimators_[x_key].fit(X.iloc[x_inds], y.iloc[y_inds], **fit_kwargs)
return self
def predict(self, X):
"""Predict estimator target for X
Parameters
----------
X : pd.DataFrame, shape (n_samples, n_features)
Training data
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted values.
"""
predict_grouper_kwargs = default_none_kwargs(self.predict_grouper_kwargs)
grouper = X.groupby(self.predict_grouper, **predict_grouper_kwargs)
result = np.empty((len(X), len(self.targets_)))
for key, inds in grouper.indices.items():
result[inds, ...] = self.estimators_[key].predict(X.iloc[inds])
return result
class PaddedDOYGrouper:
"""Grouper to group an Index by day-of-year +/ pad
Parameters
----------
index : pd.DatetimeIndex
Pandas DatetimeIndex to be grouped.
window : int
Size of the padded offset for each day of year.
"""
def __init__(self, index, window):
self.index = index
self.window = window
idoy = index.dayofyear
n = idoy.max()
# day-of-year x day-of-year groups
temp_groups = np.zeros((n, n), dtype=np.bool)
for i in range(n):
inds = np.arange(i - self.window, i + self.window + 1)
inds[inds < 0] += n
inds[inds > n - 1] -= n
temp_groups[i, inds] = True
arr = temp_groups[idoy - 1]
self._groups = {doy: np.nonzero(arr[:, doy - 1])[0] for doy in range(1, n + 1)}
@property
def groups(self):
"""Dict {doy -> group indicies}."""
return self._groups | /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/skdownscale/pointwise_models/grouping.py | 0.868771 | 0.543651 | grouping.py | pypi |
import warnings
import pandas as pd
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils import check_array, check_X_y
from sklearn.utils.validation import check_is_fitted
class TimeSynchronousDownscaler(BaseEstimator):
def _check_X_y(self, X, y, **kwargs):
if isinstance(X, pd.DataFrame) and isinstance(X, pd.DataFrame):
assert X.index.equals(y.index)
check_X_y(X, y) # this may be inefficient
else:
X, y = check_X_y(X, y)
warnings.warn('X and y do not have pandas DateTimeIndexes, making one up...')
index = pd.date_range(periods=len(X), start='1950', freq='MS')
X = pd.DataFrame(X, index=index)
y = pd.DataFrame(y, index=index)
return X, y
def _check_array(self, array, **kwargs):
if isinstance(array, pd.DataFrame):
check_array(array)
else:
array = check_array(array)
warnings.warn('array does not have a pandas DateTimeIndex, making one up...')
index = pd.date_range(periods=len(array), start='1950', freq=self._timestep)
array = pd.DataFrame(array, index=index)
return array
def _validate_data(self, X, y=None, reset=True, validate_separately=False, **check_params):
"""Validate input data and set or check the `n_features_in_` attribute.
Parameters
----------
X : {array-like, sparse matrix, dataframe} of shape \
(n_samples, n_features)
The input samples.
y : array-like of shape (n_samples,), default=None
The targets. If None, `check_array` is called on `X` and
`check_X_y` is called otherwise.
reset : bool, default=True
Whether to reset the `n_features_in_` attribute.
If False, the input will be checked for consistency with data
provided when reset was last True.
validate_separately : False or tuple of dicts, default=False
Only used if y is not None.
If False, call validate_X_y(). Else, it must be a tuple of kwargs
to be used for calling check_array() on X and y respectively.
**check_params : kwargs
Parameters passed to :func:`sklearn.utils.check_array` or
:func:`sklearn.utils.check_X_y`. Ignored if validate_separately
is not False.
Returns
-------
out : {ndarray, sparse matrix} or tuple of these
The validated input. A tuple is returned if `y` is not None.
"""
if y is None:
if self._get_tags()['requires_y']:
raise ValueError(
f'This {self.__class__.__name__} estimator '
f'requires y to be passed, but the target y is None.'
)
X = self._check_array(X, **check_params)
out = X
else:
if validate_separately:
# We need this because some estimators validate X and y
# separately, and in general, separately calling check_array()
# on X and y isn't equivalent to just calling check_X_y()
# :(
check_X_params, check_y_params = validate_separately
X = self._check_array(X, **check_X_params)
y = self._check_array(y, **check_y_params)
else:
X, y = self._check_X_y(X, y, **check_params)
out = X, y
# TO-DO: add check_n_features attribute
if check_params.get('ensure_2d', True):
self._check_n_features(X, reset=reset)
return out | /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/skdownscale/pointwise_models/base.py | 0.880116 | 0.561996 | base.py | pypi |
import collections
import copy
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin
from sklearn.linear_model import LinearRegression
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
from .trend import LinearTrendTransformer
from .utils import check_max_features, default_none_kwargs
SYNTHETIC_MIN = -1e20
SYNTHETIC_MAX = 1e20
Cdf = collections.namedtuple('CDF', ['pp', 'vals'])
def plotting_positions(n, alpha=0.4, beta=0.4):
'''Returns a monotonic array of plotting positions.
Parameters
----------
n : int
Length of plotting positions to return.
alpha, beta : float
Plotting positions parameter. Default is 0.4.
Returns
-------
positions : ndarray
Quantile mapped data with shape from `input_data` and probability
distribution from `data_to_match`.
See Also
--------
scipy.stats.mstats.plotting_positions
'''
return (np.arange(1, n + 1) - alpha) / (n + 1.0 - alpha - beta)
class QuantileMapper(TransformerMixin, BaseEstimator):
"""Transform features using quantile mapping.
Parameters
----------
detrend : boolean, optional
If True, detrend the data before quantile mapping and add the trend
back after transforming. Default is False.
lt_kwargs : dict, optional
Dictionary of keyword arguments to pass to the LinearTrendTransformer
qm_kwargs : dict, optional
Dictionary of keyword arguments to pass to the QuantileMapper
Attributes
----------
x_cdf_fit_ : QuantileTransformer
QuantileTranform for fit(X)
"""
_fit_attributes = ['x_cdf_fit_']
def __init__(self, detrend=False, lt_kwargs=None, qt_kwargs=None):
self.detrend = detrend
self.lt_kwargs = lt_kwargs
self.qt_kwargs = qt_kwargs
def fit(self, X, y=None):
"""Fit the quantile mapping model.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data
"""
# TO-DO: fix validate data fctn
X = self._validate_data(X)
qt_kws = default_none_kwargs(self.qt_kwargs, copy=True)
# maybe detrend the input datasets
if self.detrend:
lt_kwargs = default_none_kwargs(self.lt_kwargs)
self.x_trend_fit_ = LinearTrendTransformer(**lt_kwargs).fit(X)
x_to_cdf = self.x_trend_fit_.transform(X)
else:
x_to_cdf = X
# calculate the cdfs for X
qt = CunnaneTransformer(**qt_kws)
self.x_cdf_fit_ = qt.fit(x_to_cdf)
return self
def transform(self, X):
"""Perform the quantile mapping.
Parameters
----------
X : array_like, shape [n_samples, n_features]
Samples.
Returns
-------
y : ndarray of shape (n_samples, )
Transformed data
"""
# validate input data
check_is_fitted(self)
# TO-DO: fix validate_data fctn
X = self._validate_data(X)
# maybe detrend the datasets
if self.detrend:
lt_kwargs = default_none_kwargs(self.lt_kwargs)
x_trend = LinearTrendTransformer(**lt_kwargs).fit(X)
x_to_cdf = x_trend.transform(X)
else:
x_to_cdf = X
# do the final mapping
qt_kws = default_none_kwargs(self.qt_kwargs, copy=True)
x_quantiles = CunnaneTransformer(**qt_kws).fit_transform(x_to_cdf)
x_qmapped = self.x_cdf_fit_.inverse_transform(x_quantiles)
# add the trend back
if self.detrend:
x_qmapped = x_trend.inverse_transform(x_qmapped)
# reset the baseline (remove bias)
x_qmapped -= x_trend.lr_model_.intercept_ - self.x_trend_fit_.lr_model_.intercept_
return x_qmapped
def _more_tags(self):
return {
'_xfail_checks': {
'check_estimators_dtypes': 'QuantileMapper only suppers 1 feature',
'check_fit_score_takes_y': 'QuantileMapper only suppers 1 feature',
'check_transformer_data_not_an_array': 'QuantileMapper only suppers 1 feature',
'check_estimators_fit_returns_self': 'QuantileMapper only suppers 1 feature',
'check_estimators_fit_returns_self(readonly_memmap=True)': 'QuantileMapper only suppers 1 feature',
'check_dtype_object': 'QuantileMapper only suppers 1 feature',
'check_pipeline_consistency': 'QuantileMapper only suppers 1 feature',
'check_estimators_nan_inf': 'QuantileMapper only suppers 1 feature',
'check_estimators_overwrite_params': 'QuantileMapper only suppers 1 feature',
'check_estimators_pickle': 'QuantileMapper only suppers 1 feature',
'check_fit2d_predict1d': 'QuantileMapper only suppers 1 feature',
'check_methods_subset_invariance': 'QuantileMapper only suppers 1 feature',
'check_fit2d_1sample': 'QuantileMapper only suppers 1 feature',
'check_dict_unchanged': 'QuantileMapper only suppers 1 feature',
'check_dont_overwrite_parameters': 'QuantileMapper only suppers 1 feature',
'check_fit_idempotent': 'QuantileMapper only suppers 1 feature',
'check_n_features_in': 'QuantileMapper only suppers 1 feature',
'check_estimators_empty_data_messages': 'skip due to odd sklearn string matching in unit test',
'check_fit_check_is_fitted': 'QuantileMapper only suppers 1 feature',
'check_transformer_general': 'QuantileMapper only suppers 1 feature',
'check_transformer_preserve_dtypes': 'QuantileMapper only suppers 1 feature',
'check_methods_sample_order_invariance': 'QuantileMapper only suppers 1 feature',
},
}
class QuantileMappingReressor(RegressorMixin, BaseEstimator):
"""Transform features using quantile mapping.
Parameters
----------
extrapolate : str, optional
How to extend the cdfs at the tails. Valid options include {`'min'`, `'max'`, `'both'`, `'1to1'`, `None`}
n_endpoints : int
Number of endpoints to include when extrapolating the tails of the cdf
Attributes
----------
_X_cdf : Cdf
NamedTuple representing the fit's X cdf
_y_cdf : Cdf
NamedTuple representing the fit's y cdf
"""
_fit_attributes = ['_X_cdf', '_y_cdf']
def __init__(self, extrapolate=None, n_endpoints=10):
self.extrapolate = extrapolate
self.n_endpoints = n_endpoints
if self.n_endpoints < 2:
raise ValueError('Invalid number of n_endpoints, must be >= 2')
def fit(self, X, y, **kwargs):
"""Fit the quantile mapping regression model.
Parameters
----------
X : array-like, shape [n_samples, 1]
Training data.
Returns
-------
self : object
"""
X = check_array(
X, dtype='numeric', ensure_min_samples=2 * self.n_endpoints + 1, ensure_2d=True
)
y = check_array(
y, dtype='numeric', ensure_min_samples=2 * self.n_endpoints + 1, ensure_2d=False
)
X = check_max_features(X, n=1)
self._X_cdf = self._calc_extrapolated_cdf(X, sort=True, extrapolate=self.extrapolate)
self._y_cdf = self._calc_extrapolated_cdf(y, sort=True, extrapolate=self.extrapolate)
return self
def predict(self, X, **kwargs):
"""Predict regression for target X.
Parameters
----------
X : array_like, shape [n_samples, 1]
Samples.
Returns
-------
y : ndarray of shape (n_samples, )
Predicted data.
"""
check_is_fitted(self, self._fit_attributes)
X = check_array(X, ensure_2d=True)
X = X[:, 0]
sort_inds = np.argsort(X)
X_cdf = self._calc_extrapolated_cdf(X[sort_inds], sort=False, extrapolate=self.extrapolate)
# Fill value for when x < xp[0] or x > xp[-1] (i.e. when X_cdf vals are out of range for self._X_cdf vals)
left = -np.inf if self.extrapolate in ['min', 'both'] else None
right = np.inf if self.extrapolate in ['max', 'both'] else None
# For all values in future X, find the corresponding percentile in historical X
X_cdf.pp[:] = np.interp(
X_cdf.vals, self._X_cdf.vals, self._X_cdf.pp, left=left, right=right
)
# Extrapolate the tails beyond 1.0 to handle "new extremes", only triggered when the new extremes are even more drastic then
# the linear extrapolation result from historical X at SYNTHETIC_MIN and SYNTHETIC_MAX
if np.isinf(X_cdf.pp).any():
lower_inds = np.nonzero(-np.inf == X_cdf.pp)[0]
upper_inds = np.nonzero(np.inf == X_cdf.pp)[0]
model = LinearRegression()
if len(lower_inds):
s = slice(lower_inds[-1] + 1, lower_inds[-1] + 1 + self.n_endpoints)
model.fit(X_cdf.pp[s].reshape(-1, 1), X_cdf.vals[s].reshape(-1, 1))
X_cdf.pp[lower_inds] = model.predict(X_cdf.vals[lower_inds].reshape(-1, 1))
if len(upper_inds):
s = slice(upper_inds[0] - self.n_endpoints, upper_inds[0])
model.fit(X_cdf.pp[s].reshape(-1, 1), X_cdf.vals[s].reshape(-1, 1))
X_cdf.pp[upper_inds] = model.predict(X_cdf.vals[upper_inds].reshape(-1, 1))
# do the full quantile mapping
y_hat = np.full_like(X, np.nan)
y_hat[sort_inds] = np.interp(X_cdf.pp, self._y_cdf.pp, self._y_cdf.vals)[1:-1]
# If extrapolate is 1to1, apply the offset between X and y to the
# tails of y_hat
if self.extrapolate == '1to1':
y_hat = self._extrapolate_1to1(X, y_hat)
return y_hat
def _extrapolate_1to1(self, X, y_hat):
X_fit_len = len(self._X_cdf.vals)
X_fit_min = self._X_cdf.vals[0]
X_fit_max = self._X_cdf.vals[-1]
y_fit_len = len(self._y_cdf.vals)
y_fit_min = self._y_cdf.vals[0]
y_fit_max = self._y_cdf.vals[-1]
# adjust values over fit max
inds = X > X_fit_max
if inds.any():
if X_fit_len == y_fit_len:
y_hat[inds] = y_fit_max + (X[inds] - X_fit_max)
elif X_fit_len > y_fit_len:
X_fit_at_y_fit_max = np.interp(self._y_cdf.pp[-1], self._X_cdf.pp, self._X_cdf.vals)
y_hat[inds] = y_fit_max + (X[inds] - X_fit_at_y_fit_max)
elif X_fit_len < y_fit_len:
y_fit_at_X_fit_max = np.interp(self._X_cdf.pp[-1], self._y_cdf.pp, self._y_cdf.vals)
y_hat[inds] = y_fit_at_X_fit_max + (X[inds] - X_fit_max)
# adjust values under fit min
inds = X < X_fit_min
if inds.any():
if X_fit_len == y_fit_len:
y_hat[inds] = y_fit_min + (X[inds] - X_fit_min)
elif X_fit_len > y_fit_len:
X_fit_at_y_fit_min = np.interp(self._y_cdf.pp[0], self._X_cdf.pp, self._X_cdf.vals)
y_hat[inds] = X_fit_min + (X[inds] - X_fit_at_y_fit_min)
elif X_fit_len < y_fit_len:
y_fit_at_X_fit_min = np.interp(self._X_cdf.pp[0], self._y_cdf.pp, self._y_cdf.vals)
y_hat[inds] = y_fit_at_X_fit_min + (X[inds] - X_fit_min)
return y_hat
def _calc_extrapolated_cdf(
self, data, sort=True, extrapolate=None, pp_min=SYNTHETIC_MIN, pp_max=SYNTHETIC_MAX
):
"""Calculate a new extrapolated cdf
The goal of this function is to create a CDF with bounds outside the [0, 1] range.
This allows for quantile mapping beyond observed data points.
Parameters
----------
data : array_like, shape [n_samples, 1]
Input data (can be unsorted)
sort : bool
If true, sort the data before building the CDF
extrapolate : str or None
How to extend the cdfs at the tails. Valid options include {`'min'`, `'max'`, `'both'`, `'1to1'`, `None`}
pp_min, pp_max : float
Plotting position min/max values.
Returns
-------
cdf : Cdf (NamedTuple)
"""
n = len(data)
# plotting positions
pp = np.empty(n + 2)
pp[1:-1] = plotting_positions(n)
# extended data values (sorted)
if data.ndim == 2:
data = data[:, 0]
if sort:
data = np.sort(data)
vals = np.full(n + 2, np.nan)
vals[1:-1] = data
vals[0] = data[0]
vals[-1] = data[-1]
# Add endpoints to the vector of plotting positions
if extrapolate in [None, '1to1']:
pp[0] = pp[1]
pp[-1] = pp[-2]
elif extrapolate == 'both':
pp[0] = pp_min
pp[-1] = pp_max
elif extrapolate == 'max':
pp[0] = pp[1]
pp[-1] = pp_max
elif extrapolate == 'min':
pp[0] = pp_min
pp[-1] = pp[-2]
else:
raise ValueError('unknown value for extrapolate: %s' % extrapolate)
if extrapolate in ['min', 'max', 'both']:
model = LinearRegression()
# extrapolate lower end point
if extrapolate in ['min', 'both']:
s = slice(1, self.n_endpoints + 1)
# fit linear model to first n_endpoints
model.fit(pp[s].reshape(-1, 1), vals[s].reshape(-1, 1))
# calculate the data value pp[0]
vals[0] = model.predict(pp[0].reshape(-1, 1))
# extrapolate upper end point
if extrapolate in ['max', 'both']:
s = slice(-self.n_endpoints - 1, -1)
# fit linear model to last n_endpoints
model.fit(pp[s].reshape(-1, 1), vals[s].reshape(-1, 1))
# calculate the data value pp[-1]
vals[-1] = model.predict(pp[-1].reshape(-1, 1))
return Cdf(pp, vals)
def _more_tags(self):
return {
'_xfail_checks': {
'check_estimators_dtypes': 'QuantileMappingReressor only suppers 1 feature',
'check_fit_score_takes_y': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_fit_returns_self': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_fit_returns_self(readonly_memmap=True)': 'QuantileMappingReressor only suppers 1 feature',
'check_dtype_object': 'QuantileMappingReressor only suppers 1 feature',
'check_pipeline_consistency': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_nan_inf': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_overwrite_params': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_pickle': 'QuantileMappingReressor only suppers 1 feature',
'check_fit2d_predict1d': 'QuantileMappingReressor only suppers 1 feature',
'check_methods_subset_invariance': 'QuantileMappingReressor only suppers 1 feature',
'check_fit2d_1sample': 'QuantileMappingReressor only suppers 1 feature',
'check_dict_unchanged': 'QuantileMappingReressor only suppers 1 feature',
'check_dont_overwrite_parameters': 'QuantileMappingReressor only suppers 1 feature',
'check_fit_idempotent': 'QuantileMappingReressor only suppers 1 feature',
'check_n_features_in': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_empty_data_messages': 'skip due to odd sklearn string matching in unit test',
'check_regressors_train': 'QuantileMappingReressor only suppers 1 feature',
'check_regressors_train(readonly_memmap=True)': 'QuantileMappingReressor only suppers 1 feature',
'check_regressors_train(readonly_memmap=True,X_dtype=float32)': 'QuantileMappingReressor only suppers 1 feature',
'check_regressor_data_not_an_array': 'QuantileMappingReressor only suppers 1 feature',
'check_regressors_no_decision_function': 'QuantileMappingReressor only suppers 1 feature',
'check_supervised_y_2d': 'QuantileMappingReressor only suppers 1 feature',
'check_regressors_int': 'QuantileMappingReressor only suppers 1 feature',
'check_methods_sample_order_invariance': 'QuantileMappingReressor only suppers 1 feature',
'check_fit_check_is_fitted': 'QuantileMappingReressor only suppers 1 feature',
'check_requires_y_none': 'QuantileMappingReressor only suppers 1 feature',
},
}
class CunnaneTransformer(TransformerMixin, BaseEstimator):
"""Quantile transform using Cunnane plotting positions with optional extrapolation.
Parameters
----------
alpha : float, optional
Plotting positions parameter. Default is 0.4.
beta : float, optional
Plotting positions parameter. Default is 0.4.
extrapolate : str, optional
How to extend the cdfs at the tails. Valid options include {`'min'`, `'max'`, `'both'`, `'1to1'`, `None`}. Default is None.
n_endpoints : int
Number of endpoints to include when extrapolating the tails of the cdf. Usused if ``extrapolate`` is None. Default is 10.
Attributes
----------
cdf_ : Cdf
NamedTuple representing the fit cdf
"""
_fit_attributes = ['cdf_']
def __init__(self, *, alpha=0.4, beta=0.4, extrapolate='both', n_endpoints=10):
self.alpha = alpha
self.beta = beta
self.extrapolate = extrapolate
self.n_endpoints = n_endpoints
def fit(self, X, y=None):
"""Compute CDF and plotting positions for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, 1)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
X = check_array(X, ensure_2d=True)
if X.shape[1] > 1:
raise ValueError('CunnaneTransformer.fit() only supports a single feature')
X = X[:, 0]
self.cdf_ = Cdf(plotting_positions(len(X)), np.sort(X))
return self
def transform(self, X):
"""Perform the quantile transform.
Parameters
----------
X : array_like, shape [n_samples, 1]
Samples.
Returns
-------
y : ndarray of shape (n_samples, )
Transformed data
"""
X = check_array(X, ensure_2d=True)
if X.shape[1] > 1:
raise ValueError('CunnaneTransformer.transform() only supports a single feature')
X = X[:, 0]
left = -np.inf if self.extrapolate in ['min', 'both'] else None
right = np.inf if self.extrapolate in ['max', 'both'] else None
pps = np.interp(X, self.cdf_.vals, self.cdf_.pp, left=left, right=right)
if np.isinf(pps).any():
lower_inds = np.nonzero(-np.inf == pps)[0]
upper_inds = np.nonzero(np.inf == pps)[0]
model = LinearRegression()
if len(lower_inds):
s = slice(None, self.n_endpoints)
model.fit(self.cdf_.vals[s].reshape(-1, 1), self.cdf_.pp[s].reshape(-1, 1))
pps[lower_inds] = model.predict(X[lower_inds].values.reshape(-1, 1)).squeeze()
if len(upper_inds):
s = slice(-self.n_endpoints, None)
model.fit(self.cdf_.vals[s].reshape(-1, 1), self.cdf_.pp[s].reshape(-1, 1))
pps[upper_inds] = model.predict(X[upper_inds].values.reshape(-1, 1)).squeeze()
return pps.reshape(-1, 1)
def fit_transform(self, X, y=None):
"""Fit `CunnaneTransform` to `X`, then transform `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to generate the fit CDF.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : ndarray of shape (n_samples, n_features)
Transformed data.
"""
return self.fit(X).transform(X)
def inverse_transform(self, X):
X = check_array(X, ensure_2d=True)
X = X[:, 0]
left = -np.inf if self.extrapolate in ['min', 'both'] else None
right = np.inf if self.extrapolate in ['max', 'both'] else None
vals = np.interp(X, self.cdf_.pp, self.cdf_.vals, left=left, right=right)
if np.isinf(vals).any():
lower_inds = np.nonzero(-np.inf == vals)[0]
upper_inds = np.nonzero(np.inf == vals)[0]
model = LinearRegression()
if len(lower_inds):
s = slice(None, self.n_endpoints)
model.fit(self.cdf_.pp[s].reshape(-1, 1), self.cdf_.vals[s].reshape(-1, 1))
vals[lower_inds] = model.predict(X[lower_inds].reshape(-1, 1)).squeeze()
if len(upper_inds):
s = slice(-self.n_endpoints, None)
model.fit(self.cdf_.pp[s].reshape(-1, 1), self.cdf_.vals[s].reshape(-1, 1))
vals[upper_inds] = model.predict(X[upper_inds].reshape(-1, 1)).squeeze()
return vals.reshape(-1, 1)
def _more_tags(self):
return {
'_xfail_checks': {
'check_estimators_dtypes': 'CunnaneTransformer only suppers 1 feature',
'check_fit_score_takes_y': 'CunnaneTransformer only suppers 1 feature',
'check_transformer_data_not_an_array': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_fit_returns_self': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_fit_returns_self(readonly_memmap=True)': 'CunnaneTransformer only suppers 1 feature',
'check_dtype_object': 'CunnaneTransformer only suppers 1 feature',
'check_pipeline_consistency': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_nan_inf': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_overwrite_params': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_pickle': 'CunnaneTransformer only suppers 1 feature',
'check_fit2d_predict1d': 'CunnaneTransformer only suppers 1 feature',
'check_methods_subset_invariance': 'CunnaneTransformer only suppers 1 feature',
'check_fit2d_1sample': 'CunnaneTransformer only suppers 1 feature',
'check_dict_unchanged': 'CunnaneTransformer only suppers 1 feature',
'check_dont_overwrite_parameters': 'CunnaneTransformer only suppers 1 feature',
'check_fit_idempotent': 'CunnaneTransformer only suppers 1 feature',
'check_n_features_in': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_empty_data_messages': 'skip due to odd sklearn string matching in unit test',
'check_fit_check_is_fitted': 'CunnaneTransformer only suppers 1 feature',
'check_transformer_general': 'CunnaneTransformer only suppers 1 feature',
'check_transformer_preserve_dtypes': 'CunnaneTransformer only suppers 1 feature',
'check_methods_sample_order_invariance': 'CunnaneTransformer only suppers 1 feature',
},
}
class EquidistantCdfMatcher(QuantileMappingReressor):
"""Transform features using equidistant CDF matching, a version of quantile mapping that preserves the difference or ratio between X_test and X_train.
Parameters
----------
extrapolate : str, optional
How to extend the cdfs at the tails. Valid options include {`'min'`, `'max'`, `'both'`, `'1to1'`, `None`}
n_endpoints : int
Number of endpoints to include when extrapolating the tails of the cdf
Attributes
----------
_X_cdf : Cdf
NamedTuple representing the fit's X cdf
_y_cdf : Cdf
NamedTuple representing the fit's y cdf
"""
_fit_attributes = ['_X_cdf', '_y_cdf']
def __init__(self, kind='difference', extrapolate=None, n_endpoints=10, max_ratio=None):
if kind not in ['difference', 'ratio']:
raise NotImplementedError('kind must be either difference or ratio')
self.kind = kind
self.extrapolate = extrapolate
self.n_endpoints = n_endpoints
# MACA seems to have a max ratio for precip at 5.0
self.max_ratio = max_ratio
if self.n_endpoints < 2:
raise ValueError('Invalid number of n_endpoints, must be >= 2')
def predict(self, X, **kwargs):
"""Predict regression for target X.
Parameters
----------
X : array_like, shape [n_samples, 1]
Samples.
Returns
-------
y : ndarray of shape (n_samples, )
Predicted data.
"""
check_is_fitted(self, self._fit_attributes)
X = check_array(X, ensure_2d=True)
X = X[:, 0]
sort_inds = np.argsort(X)
X_cdf = self._calc_extrapolated_cdf(X[sort_inds], sort=False, extrapolate=self.extrapolate)
X_train_vals = np.interp(x=X_cdf.pp, xp=self._X_cdf.pp, fp=self._X_cdf.vals)
# generate y value as historical y plus/multiply by quantile difference
if self.kind == 'difference':
diff = X_cdf.vals - X_train_vals
sorted_y_hat = np.interp(x=X_cdf.pp, xp=self._y_cdf.pp, fp=self._y_cdf.vals) + diff
elif self.kind == 'ratio':
ratio = X_cdf.vals / X_train_vals
if self.max_ratio is not None:
ratio = np.min(ratio, self.max_ratio)
sorted_y_hat = np.interp(x=X_cdf.pp, xp=self._y_cdf.pp, fp=self._y_cdf.vals) * ratio
# put things into the right order
y_hat = np.full_like(X, np.nan)
y_hat[sort_inds] = sorted_y_hat[1:-1]
# If extrapolate is 1to1, apply the offset between X and y to the
# tails of y_hat
if self.extrapolate == '1to1':
y_hat = self._extrapolate_1to1(X, y_hat)
return y_hat
class TrendAwareQuantileMappingRegressor(RegressorMixin, BaseEstimator):
"""Experimental meta estimator for performing trend-aware quantile mapping
Parameters
----------
qm_estimator : object, default=None
Regressor object such as ``QuantileMappingReressor``.
"""
def __init__(self, qm_estimator=None, trend_transformer=None):
self.qm_estimator = qm_estimator
if trend_transformer is None:
self.trend_transformer = LinearTrendTransformer()
def fit(self, X, y):
"""Fit the model.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
Returns
-------
self : object
"""
self._X_mean_fit = X.mean()
self._y_mean_fit = y.mean()
y_trend = copy.deepcopy(self.trend_transformer)
y_detrend = y_trend.fit_transform(y)
X_trend = copy.deepcopy(self.trend_transformer)
x_detrend = X_trend.fit_transform(X)
self.qm_estimator.fit(x_detrend, y_detrend)
return self
def predict(self, X):
"""Predict regression for target X.
Parameters
----------
X : array_like, shape [n_samples, n_features]
Samples.
Returns
-------
y : ndarray of shape (n_samples, )
Predicted data.
"""
X_trend = copy.deepcopy(self.trend_transformer)
x_detrend = X_trend.fit_transform(X)
y_hat = self.qm_estimator.predict(x_detrend).reshape(-1, 1)
# add the mean and trend back
# delta: X (predict) - X (fit) + y --> projected change + historical obs mean
delta = (X.mean().values - self._X_mean_fit.values) + self._y_mean_fit.values
# calculate the trendline
# TODO: think about how this would need to change if we're using a rolling average trend
trendline = X_trend.trendline(X)
trendline -= trendline.mean() # center at 0
# apply the trend and delta
y_hat += trendline + delta
return y_hat | /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/skdownscale/pointwise_models/quantile.py | 0.905947 | 0.528168 | quantile.py | pypi |
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.linear_model import LinearRegression
from sklearn.utils.validation import check_is_fitted
from .utils import default_none_kwargs
class LinearTrendTransformer(TransformerMixin, BaseEstimator):
"""Transform features by removing linear trends.
Uses Ordinary least squares Linear Regression as implemented in
sklear.linear_model.LinearRegression.
Parameters
----------
**lr_kwargs
Keyword arguments to pass to sklearn.linear_model.LinearRegression
Attributes
----------
lr_model_ : sklearn.linear_model.LinearRegression
Linear Regression object.
"""
def __init__(self, lr_kwargs=None):
self.lr_kwargs = lr_kwargs
def fit(self, X, y=None):
"""Compute the linear trend.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
"""
X = self._validate_data(X)
kwargs = default_none_kwargs(self.lr_kwargs)
self.lr_model_ = LinearRegression(**kwargs)
self.lr_model_.fit(np.arange(len(X)).reshape(-1, 1), X)
return self
def transform(self, X):
"""Perform transformation by removing the trend.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data that should be detrended.
"""
# validate input data
check_is_fitted(self)
X = self._validate_data(X)
return X - self.trendline(X)
def inverse_transform(self, X):
"""Add the trend back to the data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data that should be transformed back.
"""
# validate input data
check_is_fitted(self)
X = self._validate_data(X)
return X + self.trendline(X)
def trendline(self, X):
"""helper function to calculate a linear trendline"""
X = self._validate_data(X)
return self.lr_model_.predict(np.arange(len(X)).reshape(-1, 1))
def _more_tags(self):
return {
'_xfail_checks': {
'check_methods_subset_invariance': 'because',
'check_methods_sample_order_invariance': 'temporal order matters',
}
} | /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/skdownscale/pointwise_models/trend.py | 0.928433 | 0.530236 | trend.py | pypi |
![Logo](logo.png)
# scikit-dsp-comm
[![pypi](https://img.shields.io/pypi/v/scikit-dsp-comm.svg)](https://pypi.python.org/pypi/scikit-dsp-comm)
[![Anaconda-Server Badge](https://anaconda.org/conda-forge/scikit-dsp-comm/badges/version.svg)](https://anaconda.org/conda-forge/scikit-dsp-comm)
[![Docs](https://readthedocs.org/projects/scikit-dsp-comm/badge/?version=latest)](http://scikit-dsp-comm.readthedocs.io/en/latest/?badge=latest)
## Background
The origin of this package comes from the writing the book Signals and Systems for Dummies, published by Wiley in 2013. The original module for this book is named `ssd.py`. In `scikit-dsp-comm` this module is renamed to `sigsys.py` to better reflect the fact that signal processing and communications theory is founded in signals and systems, a traditional subject in electrical engineering curricula.
## Package High Level Overview
This package is a collection of functions and classes to support signal processing and communications theory teaching and research. The foundation for this package is `scipy.signal`. The code in particular currently requires Python `>=3.5x`.
**There are presently ten modules that make up scikit-dsp-comm:**
1. `sigsys.py` for basic signals and systems functions both continuous-time and discrete-time, including graphical display tools such as pole-zero plots, up-sampling and down-sampling.
2. `digitalcomm.py` for digital modulation theory components, including asynchronous resampling and variable time delay functions, both useful in advanced modem testing.
3. `synchronization.py` which contains phase-locked loop simulation functions and functions for carrier and phase synchronization of digital communications waveforms.
4. `fec_conv.py` for the generation rate one-half and one-third convolutional codes and soft decision Viterbi algorithm decoding, including soft and hard decisions, trellis and trellis-traceback display functions, and puncturing.
5. `fir_design_helper.py` which for easy design of lowpass, highpass, bandpass, and bandstop filters using the Kaiser window and equal-ripple designs, also includes a list plotting function for easily comparing magnitude, phase, and group delay frequency responses.
6. `iir_design_helper.py` which for easy design of lowpass, highpass, bandpass, and bandstop filters using scipy.signal Butterworth, Chebyshev I and II, and elliptical designs, including the use of the cascade of second-order sections (SOS) topology from scipy.signal, also includes a list plotting function for easily comparing of magnitude, phase, and group delay frequency responses.
7. `multirate.py` that encapsulate digital filters into objects for filtering, interpolation by an integer factor, and decimation by an integer factor.
8. `coeff2header.py` write `C/C++` header files for FIR and IIR filters implemented in `C/C++`, using the cascade of second-order section representation for the IIR case. This last module find use in real-time signal processing on embedded systems, but can be used for simulation models in `C/C++`.
Presently the collection of modules contains about 125 functions and classes. The authors/maintainers are working to get more detailed documentation in place.
## Documentation
Documentation is now housed on `readthedocs` which you can get to by clicking the docs badge near the top of this `README`. Example notebooks can be viewed on [GitHub pages](https://mwickert.github.io/scikit-dsp-comm/). In time more notebook postings will be extracted from [Dr. Wickert's Info Center](http://www.eas.uccs.edu/~mwickert/).
## Getting Set-up on Your System
The best way to use this package is to clone this repository and then install it.
```bash
git clone https://github.com/mwickert/scikit-dsp-comm.git
```
There are package dependencies for some modules that you may want to avoid. Specifically these are whenever hardware interfacing is involved. Specific hardware and software configuration details are discussed in [wiki pages](https://github.com/mwickert/SP-Comm-Tutorial-using-scikit-dsp-comm/wiki).
For Windows users `pip` install takes care of almost everything. I assume below you have Python on your path, so for example with [Anaconda](https://www.anaconda.com/download/#macos), I suggest letting the installer set these paths up for you.
### Editable Install with Dependencies
With the terminal in the root directory of the cloned repo perform an editable `pip` install using
```bash
pip install -e .
```
### Why an Editable Install?
The advantage of the editable `pip` install is that it is very easy to keep `scikit-dsp-comm ` up to date. If you know that updates have been pushed to the master branch, you simply go to your local repo folder and
```bash
git pull origin master
```
This will update you local repo and automatically update the Python install without the need to run `pip` again. **Note**: If you have any Python kernels running, such as a Jupyter Notebook, you will need to restart the kernel to insure any module changes get reloaded.
| /scikit-dsp-comm-2.0.3.tar.gz/scikit-dsp-comm-2.0.3/README.md | 0.632616 | 0.993661 | README.md | pypi |
import numpy as np
import scipy.special as special
from .digitalcom import q_fctn
from .fec_conv import binary
from logging import getLogger
log = getLogger(__name__)
class FECHamming(object):
"""
Class responsible for creating hamming block codes and then
encoding and decoding. Methods provided include hamm_gen,
hamm_encoder(), hamm_decoder().
Parameters
----------
j: Hamming code order (in terms of parity bits) where n = 2^j-1,
k = n-j, and the rate is k/n.
Returns
-------
Examples
--------
Andrew Smit November 2018
"""
def __init__(self,j):
self.j = j
self.G, self.H, self.R, self.n, self.k = self.hamm_gen(self.j)
log.info('(%d,%d) hamming code object' %(self.n,self.k))
def hamm_gen(self,j):
"""
Generates parity check matrix (H) and generator
matrix (G).
Parameters
----------
j: Number of Hamming code parity bits with n = 2^j-1 and k = n-j
returns
-------
G: Systematic generator matrix with left-side identity matrix
H: Systematic parity-check matrix with right-side identity matrix
R: k x k identity matrix
n: number of total bits/block
k: number of source bits/block
Andrew Smit November 2018
"""
if(j < 3):
raise ValueError('j must be > 2')
# calculate codeword length
n = 2**j-1
# calculate source bit length
k = n-j
# Allocate memory for Matrices
G = np.zeros((k,n),dtype=int)
H = np.zeros((j,n),dtype=int)
P = np.zeros((j,k),dtype=int)
R = np.zeros((k,n),dtype=int)
# Encode parity-check matrix columns with binary 1-n
for i in range(1,n+1):
b = list(binary(i,j))
for m in range(0,len(b)):
b[m] = int(b[m])
H[:,i-1] = np.array(b)
# Reformat H to be systematic
H1 = np.zeros((1,j),dtype=int)
H2 = np.zeros((1,j),dtype=int)
for i in range(0,j):
idx1 = 2**i-1
idx2 = n-i-1
H1[0,:] = H[:,idx1]
H2[0,:] = H[:,idx2]
H[:,idx1] = H2
H[:,idx2] = H1
# Get parity matrix from H
P = H[:,:k]
# Use P to calcuate generator matrix P
G[:,:k] = np.diag(np.ones(k))
G[:,k:] = P.T
# Get k x k identity matrix
R[:,:k] = np.diag(np.ones(k))
return G, H, R, n, k
def hamm_encoder(self,x):
"""
Encodes input bit array x using hamming block code.
parameters
----------
x: array of source bits to be encoded by block encoder.
returns
-------
codewords: array of code words generated by generator
matrix G and input x.
Andrew Smit November 2018
"""
if(np.dtype(x[0]) != int):
raise ValueError('Error: Invalid data type. Input must be a vector of ints')
if(len(x) % self.k or len(x) < self.k):
raise ValueError('Error: Invalid input vector length. Length must be a multiple of %d' %self.k)
N_symbols = int(len(x)/self.k)
codewords = np.zeros(N_symbols*self.n)
x = np.reshape(x,(1,len(x)))
for i in range(0,N_symbols):
codewords[i*self.n:(i+1)*self.n] = np.matmul(x[:,i*self.k:(i+1)*self.k],self.G)%2
return codewords
def hamm_decoder(self,codewords):
"""
Decode hamming encoded codewords. Make sure code words are of
the appropriate length for the object.
parameters
---------
codewords: bit array of codewords
returns
-------
decoded_bits: bit array of decoded source bits
Andrew Smit November 2018
"""
if(np.dtype(codewords[0]) != int):
raise ValueError('Error: Invalid data type. Input must be a vector of ints')
if(len(codewords) % self.n or len(codewords) < self.n):
raise ValueError('Error: Invalid input vector length. Length must be a multiple of %d' %self.n)
# Calculate the number of symbols (codewords) in the input array
N_symbols = int(len(codewords)/self.n)
# Allocate memory for decoded sourcebits
decoded_bits = np.zeros(N_symbols*self.k)
# Loop through codewords to decode one block at a time
codewords = np.reshape(codewords,(1,len(codewords)))
for i in range(0,N_symbols):
# find the syndrome of each codeword
S = np.matmul(self.H,codewords[:,i*self.n:(i+1)*self.n].T) % 2
# convert binary syndrome to an integer
bits = ''
for m in range(0,len(S)):
bit = str(int(S[m,:]))
bits = bits + bit
error_pos = int(bits,2)
h_pos = self.H[:,error_pos-1]
# Use the syndrome to find the position of an error within the block
bits = ''
for m in range(0,len(S)):
bit = str(int(h_pos[m]))
bits = bits + bit
decoded_pos = int(bits,2)-1
# correct error if present
if(error_pos):
codewords[:,i*self.n+decoded_pos] = (codewords[:,i*self.n+decoded_pos] + 1) % 2
# Decode the corrected codeword
decoded_bits[i*self.k:(i+1)*self.k] = np.matmul(self.R,codewords[:,i*self.n:(i+1)*self.n].T).T % 2
return decoded_bits.astype(int)
class FECCyclic(object):
"""
Class responsible for creating cyclic block codes and then
encoding and decoding. Methods provided include
cyclic_encoder(), cyclic_decoder().
Parameters
----------
G: Generator polynomial used to create cyclic code object
Suggested G values (from Ziemer and Peterson pg 430):
j G
------------
3 G = '1011'
4 G = '10011'
5 G = '101001'
6 G = '1100001'
7 G = '10100001'
8 G = '101110001'
9 G = '1000100001'
10 G = '10010000001'
11 G = '101000000001'
12 G = '1100101000001'
13 G = '11011000000001'
14 G = '110000100010001'
15 G = '1100000000000001'
16 G = '11010000000010001'
17 G = '100100000000000001'
18 G = '1000000100000000001'
19 G = '11100100000000000001'
20 G = '100100000000000000001'
21 G = '1010000000000000000001'
22 G = '11000000000000000000001'
23 G = '100001000000000000000001'
24 G = '1110000100000000000000001'
Returns
-------
Examples
--------
Andrew Smit November 2018
"""
def __init__(self,G='1011'):
self.j = len(G)-1
self.n = 2**self.j - 1
self.k =self.n-self.j
self.G = G
if(G[0] == '0' or G[len(G)-1] == '0'):
raise ValueError('Error: Invalid generator polynomial')
log.info('(%d,%d) cyclic code object' %(self.n,self.k))
def cyclic_encoder(self,x,G='1011'):
"""
Encodes input bit array x using cyclic block code.
parameters
----------
x: vector of source bits to be encoded by block encoder. Numpy array
of integers expected.
returns
-------
codewords: vector of code words generated from input vector
Andrew Smit November 2018
"""
# Check block length
if(len(x) % self.k or len(x) < self.k):
raise ValueError('Error: Incomplete block in input array. Make sure input array length is a multiple of %d' %self.k)
# Check data type of input vector
if(np.dtype(x[0]) != int):
raise ValueError('Error: Input array should be int data type')
# Calculate number of blocks
Num_blocks = int(len(x) / self.k)
codewords = np.zeros((Num_blocks,self.n),dtype=int)
x = np.reshape(x,(Num_blocks,self.k))
#print(x)
for p in range(Num_blocks):
S = np.zeros(len(self.G))
codeword = np.zeros(self.n)
current_block = x[p,:]
#print(current_block)
for i in range(0,self.n):
if(i < self.k):
S[0] = current_block[i]
S0temp = 0
for m in range(0,len(self.G)):
if(self.G[m] == '1'):
S0temp = S0temp + S[m]
#print(j,S0temp,S[j])
S0temp = S0temp % 2
S = np.roll(S,1)
codeword[i] = current_block[i]
S[1] = S0temp
else:
out = 0
for m in range(1,len(self.G)):
if(self.G[m] == '1'):
out = out + S[m]
codeword[i] = out % 2
S = np.roll(S,1)
S[1] = 0
codewords[p,:] = codeword
#print(codeword)
codewords = np.reshape(codewords,np.size(codewords))
return codewords.astype(int)
def cyclic_decoder(self,codewords):
"""
Decodes a vector of cyclic coded codewords.
parameters
----------
codewords: vector of codewords to be decoded. Numpy array of integers expected.
returns
-------
decoded_blocks: vector of decoded bits
Andrew Smit November 2018
"""
# Check block length
if(len(codewords) % self.n or len(codewords) < self.n):
raise ValueError('Error: Incomplete coded block in input array. Make sure coded input array length is a multiple of %d' %self.n)
# Check input data type
if(np.dtype(codewords[0]) != int):
raise ValueError('Error: Input array should be int data type')
# Calculate number of blocks
Num_blocks = int(len(codewords) / self.n)
decoded_blocks = np.zeros((Num_blocks,self.k),dtype=int)
codewords = np.reshape(codewords,(Num_blocks,self.n))
for p in range(Num_blocks):
codeword = codewords[p,:]
Ureg = np.zeros(self.n)
S = np.zeros(len(self.G))
decoded_bits = np.zeros(self.k)
output = np.zeros(self.n)
for i in range(0,self.n): # Switch A closed B open
Ureg = np.roll(Ureg,1)
Ureg[0] = codeword[i]
S0temp = 0
S[0] = codeword[i]
for m in range(len(self.G)):
if(self.G[m] == '1'):
S0temp = S0temp + S[m]
S0 = S
S = np.roll(S,1)
S[1] = S0temp % 2
for i in range(0,self.n): # Switch B closed A open
Stemp = 0
for m in range(1,len(self.G)):
if(self.G[m] == '1'):
Stemp = Stemp + S[m]
S = np.roll(S,1)
S[1] = Stemp % 2
and_out = 1
for m in range(1,len(self.G)):
if(m > 1):
and_out = and_out and ((S[m]+1) % 2)
else:
and_out = and_out and S[m]
output[i] = (and_out + Ureg[len(Ureg)-1]) % 2
Ureg = np.roll(Ureg,1)
Ureg[0] = 0
decoded_bits = output[0:self.k].astype(int)
decoded_blocks[p,:] = decoded_bits
return np.reshape(decoded_blocks,np.size(decoded_blocks)).astype(int)
def ser2ber(q,n,d,t,ps):
"""
Converts symbol error rate to bit error rate. Taken from Ziemer and
Tranter page 650. Necessary when comparing different types of block codes.
parameters
----------
q: size of the code alphabet for given modulation type (BPSK=2)
n: number of channel bits
d: distance (2e+1) where e is the number of correctable errors per code word.
For hamming codes, e=1, so d=3.
t: number of correctable errors per code word
ps: symbol error probability vector
returns
-------
ber: bit error rate
"""
lnps = len(ps) # len of error vector
ber = np.zeros(lnps) # inialize output vector
for k in range(0,lnps): # iterate error vector
ser = ps[k] # channel symbol error rate
sum1 = 0 # initialize sums
sum2 = 0
for i in range(t+1,d+1):
term = special.comb(n,i)*(ser**i)*((1-ser))**(n-i)
sum1 = sum1 + term
for i in range(d+1,n+1):
term = (i)*special.comb(n,i)*(ser**i)*((1-ser)**(n-i))
sum2 = sum2+term
ber[k] = (q/(2*(q-1)))*((d/n)*sum1+(1/n)*sum2)
return ber
def block_single_error_Pb_bound(j,SNRdB,coded=True,M=2):
"""
Finds the bit error probability bounds according to Ziemer and Tranter
page 656.
parameters:
-----------
j: number of parity bits used in single error correction block code
SNRdB: Eb/N0 values in dB
coded: Select single error correction code (True) or uncoded (False)
M: modulation order
returns:
--------
Pb: bit error probability bound
"""
Pb = np.zeros_like(SNRdB)
Ps = np.zeros_like(SNRdB)
SNR = 10.**(SNRdB/10.)
n = 2**j-1
k = n-j
for i,SNRn in enumerate(SNR):
if coded: # compute Hamming code Ps
if M == 2:
Ps[i] = q_fctn(np.sqrt(k * 2. * SNRn / n))
else:
Ps[i] = 4./np.log2(M)*(1 - 1/np.sqrt(M))*\
np.gaussQ(np.sqrt(3*np.log2(M)/(M-1)*SNRn))/k
else: # Compute Uncoded Pb
if M == 2:
Pb[i] = q_fctn(np.sqrt(2. * SNRn))
else:
Pb[i] = 4./np.log2(M)*(1 - 1/np.sqrt(M))*\
np.gaussQ(np.sqrt(3*np.log2(M)/(M-1)*SNRn))
# Convert symbol error probability to bit error probability
if coded:
Pb = ser2ber(M,n,3,1,Ps)
return Pb
# .. ._.. .._ # | /scikit-dsp-comm-2.0.3.tar.gz/scikit-dsp-comm-2.0.3/src/sk_dsp_comm/fec_block.py | 0.793306 | 0.450541 | fec_block.py | pypi |
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
from logging import getLogger
log = getLogger(__name__)
def firwin_lpf(n_taps, fc, fs = 1.0):
"""
Design a windowed FIR lowpass filter in terms of passband
critical frequencies f1 < f2 in Hz relative to sampling rate
fs in Hz. The number of taps must be provided.
Mark Wickert October 2016
"""
return signal.firwin(n_taps, 2 * fc / fs)
def firwin_bpf(n_taps, f1, f2, fs = 1.0, pass_zero=False):
"""
Design a windowed FIR bandpass filter in terms of passband
critical frequencies f1 < f2 in Hz relative to sampling rate
fs in Hz. The number of taps must be provided.
Mark Wickert October 2016
"""
return signal.firwin(n_taps, 2 * (f1, f2) / fs, pass_zero=pass_zero)
def firwin_kaiser_lpf(f_pass, f_stop, d_stop, fs = 1.0, n_bump=0, status = True):
"""
Design an FIR lowpass filter using the sinc() kernel and
a Kaiser window. The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Note: the passband ripple cannot be set independent of the
stopband attenuation.
Mark Wickert October 2016
"""
wc = 2*np.pi*(f_pass + f_stop)/2/fs
delta_w = 2*np.pi*(f_stop - f_pass)/fs
# Find the filter order
M = np.ceil((d_stop - 8)/(2.285*delta_w))
# Adjust filter order up or down as needed
M += n_bump
N_taps = M + 1
# Obtain the Kaiser window
beta = signal.kaiser_beta(d_stop)
w_k = signal.kaiser(N_taps,beta)
n = np.arange(N_taps)
b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/2)) * w_k
b_k /= np.sum(b_k)
if status:
log.info('Kaiser Win filter taps = %d.' % N_taps)
return b_k
def firwin_kaiser_hpf(f_stop, f_pass, d_stop, fs = 1.0, n_bump=0, status = True):
"""
Design an FIR highpass filter using the sinc() kernel and
a Kaiser window. The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Note: the passband ripple cannot be set independent of the
stopband attenuation.
Mark Wickert October 2016
"""
# Transform HPF critical frequencies to lowpass equivalent
f_pass_eq = fs/2. - f_pass
f_stop_eq = fs/2. - f_stop
# Design LPF equivalent
wc = 2*np.pi*(f_pass_eq + f_stop_eq)/2/fs
delta_w = 2*np.pi*(f_stop_eq - f_pass_eq)/fs
# Find the filter order
M = np.ceil((d_stop - 8)/(2.285*delta_w))
# Adjust filter order up or down as needed
M += n_bump
N_taps = M + 1
# Obtain the Kaiser window
beta = signal.kaiser_beta(d_stop)
w_k = signal.kaiser(N_taps,beta)
n = np.arange(N_taps)
b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/2)) * w_k
b_k /= np.sum(b_k)
# Transform LPF equivalent to HPF
n = np.arange(len(b_k))
b_k *= (-1)**n
if status:
log.info('Kaiser Win filter taps = %d.' % N_taps)
return b_k
def firwin_kaiser_bpf(f_stop1, f_pass1, f_pass2, f_stop2, d_stop,
fs = 1.0, n_bump=0, status = True):
"""
Design an FIR bandpass filter using the sinc() kernel and
a Kaiser window. The filter order is determined based on
f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the
desired stopband attenuation d_stop in dB for both stopbands,
all relative to a sampling rate of fs Hz.
Note: the passband ripple cannot be set independent of the
stopband attenuation.
Mark Wickert October 2016
"""
# Design BPF starting from simple LPF equivalent
# The upper and lower stopbands are assumed to have
# the same attenuation level. The LPF equivalent critical
# frequencies:
f_pass = (f_pass2 - f_pass1)/2
f_stop = (f_stop2 - f_stop1)/2
# Continue to design equivalent LPF
wc = 2*np.pi*(f_pass + f_stop)/2/fs
delta_w = 2*np.pi*(f_stop - f_pass)/fs
# Find the filter order
M = np.ceil((d_stop - 8)/(2.285*delta_w))
# Adjust filter order up or down as needed
M += n_bump
N_taps = M + 1
# Obtain the Kaiser window
beta = signal.kaiser_beta(d_stop)
w_k = signal.kaiser(N_taps,beta)
n = np.arange(N_taps)
b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/2)) * w_k
b_k /= np.sum(b_k)
# Transform LPF to BPF
f0 = (f_pass2 + f_pass1)/2
w0 = 2*np.pi*f0/fs
n = np.arange(len(b_k))
b_k_bp = 2*b_k*np.cos(w0*(n-M/2))
if status:
log.info('Kaiser Win filter taps = %d.' % N_taps)
return b_k_bp
def firwin_kaiser_bsf(f_stop1, f_pass1, f_pass2, f_stop2, d_stop,
fs = 1.0, n_bump=0, status = True):
"""
Design an FIR bandstop filter using the sinc() kernel and
a Kaiser window. The filter order is determined based on
f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the
desired stopband attenuation d_stop in dB for both stopbands,
all relative to a sampling rate of fs Hz.
Note: The passband ripple cannot be set independent of the
stopband attenuation.
Note: The filter order is forced to be even (odd number of taps)
so there is a center tap that can be used to form 1 - H_BPF.
Mark Wickert October 2016
"""
# First design a BPF starting from simple LPF equivalent
# The upper and lower stopbands are assumed to have
# the same attenuation level. The LPF equivalent critical
# frequencies:
f_pass = (f_pass2 - f_pass1)/2
f_stop = (f_stop2 - f_stop1)/2
# Continue to design equivalent LPF
wc = 2*np.pi*(f_pass + f_stop)/2/fs
delta_w = 2*np.pi*(f_stop - f_pass)/fs
# Find the filter order
M = np.ceil((d_stop - 8)/(2.285*delta_w))
# Adjust filter order up or down as needed
M += n_bump
# Make filter order even (odd number of taps)
if ((M+1)/2.0-int((M+1)/2.0)) == 0:
M += 1
N_taps = M + 1
# Obtain the Kaiser window
beta = signal.kaiser_beta(d_stop)
w_k = signal.kaiser(N_taps,beta)
n = np.arange(N_taps)
b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/2)) * w_k
b_k /= np.sum(b_k)
# Transform LPF to BPF
f0 = (f_pass2 + f_pass1)/2
w0 = 2*np.pi*f0/fs
n = np.arange(len(b_k))
b_k_bs = 2*b_k*np.cos(w0*(n-M/2))
# Transform BPF to BSF via 1 - BPF for odd N_taps
b_k_bs = -b_k_bs
b_k_bs[int(M/2)] += 1
if status:
log.info('Kaiser Win filter taps = %d.' % N_taps)
return b_k_bs
def lowpass_order(f_pass, f_stop, dpass_dB, dstop_dB, fsamp = 1):
"""
Optimal FIR (equal ripple) Lowpass Order Determination
Text reference: Ifeachor, Digital Signal Processing a Practical Approach,
second edition, Prentice Hall, 2002.
Journal paper reference: Herriman et al., Practical Design Rules for Optimum
Finite Imulse Response Digitl Filters, Bell Syst. Tech. J., vol 52, pp.
769-799, July-Aug., 1973.IEEE, 1973.
"""
dpass = 1 - 10**(-dpass_dB/20)
dstop = 10**(-dstop_dB/20)
Df = (f_stop - f_pass)/fsamp
a1 = 5.309e-3
a2 = 7.114e-2
a3 = -4.761e-1
a4 = -2.66e-3
a5 = -5.941e-1
a6 = -4.278e-1
Dinf = np.log10(dstop)*(a1*np.log10(dpass)**2 + a2*np.log10(dpass) + a3) \
+ (a4*np.log10(dpass)**2 + a5*np.log10(dpass) + a6)
f = 11.01217 + 0.51244*(np.log10(dpass) - np.log10(dstop))
N = Dinf/Df - f*Df + 1
ff = 2*np.array([0, f_pass, f_stop, fsamp/2])/fsamp
aa = np.array([1, 1, 0, 0])
wts = np.array([1.0, dpass/dstop])
return int(N), ff, aa, wts
def bandpass_order(f_stop1, f_pass1, f_pass2, f_stop2, dpass_dB, dstop_dB, fsamp = 1):
"""
Optimal FIR (equal ripple) Bandpass Order Determination
Text reference: Ifeachor, Digital Signal Processing a Practical Approach,
second edition, Prentice Hall, 2002.
Journal paper reference: F. Mintzer & B. Liu, Practical Design Rules for Optimum
FIR Bandpass Digital Filters, IEEE Transactions on Acoustics and Speech, pp.
204-206, April,1979.
"""
dpass = 1 - 10**(-dpass_dB/20)
dstop = 10**(-dstop_dB/20)
Df1 = (f_pass1 - f_stop1)/fsamp
Df2 = (f_stop2 - f_pass2)/fsamp
b1 = 0.01201
b2 = 0.09664
b3 = -0.51325
b4 = 0.00203
b5 = -0.5705
b6 = -0.44314
Df = min(Df1, Df2)
Cinf = np.log10(dstop)*(b1*np.log10(dpass)**2 + b2*np.log10(dpass) + b3) \
+ (b4*np.log10(dpass)**2 + b5*np.log10(dpass) + b6)
g = -14.6*np.log10(dpass/dstop) - 16.9
N = Cinf/Df + g*Df + 1
ff = 2*np.array([0, f_stop1, f_pass1, f_pass2, f_stop2, fsamp/2])/fsamp
aa = np.array([0, 0, 1, 1, 0, 0])
wts = np.array([dpass/dstop, 1, dpass/dstop])
return int(N), ff, aa, wts
def bandstop_order(f_stop1, f_pass1, f_pass2, f_stop2, dpass_dB, dstop_dB, fsamp = 1):
"""
Optimal FIR (equal ripple) Bandstop Order Determination
Text reference: Ifeachor, Digital Signal Processing a Practical Approach,
second edition, Prentice Hall, 2002.
Journal paper reference: F. Mintzer & B. Liu, Practical Design Rules for Optimum
FIR Bandpass Digital Filters, IEEE Transactions on Acoustics and Speech, pp.
204-206, April,1979.
"""
dpass = 1 - 10**(-dpass_dB/20)
dstop = 10**(-dstop_dB/20)
Df1 = (f_pass1 - f_stop1)/fsamp
Df2 = (f_stop2 - f_pass2)/fsamp
b1 = 0.01201
b2 = 0.09664
b3 = -0.51325
b4 = 0.00203
b5 = -0.5705
b6 = -0.44314
Df = min(Df1, Df2)
Cinf = np.log10(dstop)*(b1*np.log10(dpass)**2 + b2*np.log10(dpass) + b3) \
+ (b4*np.log10(dpass)**2 + b5*np.log10(dpass) + b6)
g = -14.6*np.log10(dpass/dstop) - 16.9
N = Cinf/Df + g*Df + 1
ff = 2*np.array([0, f_stop1, f_pass1, f_pass2, f_stop2, fsamp/2])/fsamp
aa = np.array([1, 1, 0, 0, 1, 1])
wts = np.array([2, dpass/dstop, 2])
return int(N), ff, aa, wts
def fir_remez_lpf(f_pass, f_stop, d_pass, d_stop, fs = 1.0, n_bump=5, status = True):
"""
Design an FIR lowpass filter using remez with order
determination. The filter order is determined based on
f_pass Hz, fstop Hz, and the desired passband ripple
d_pass dB and stopband attenuation d_stop dB all
relative to a sampling rate of fs Hz.
Mark Wickert October 2016, updated October 2018
"""
n, ff, aa, wts = lowpass_order(f_pass, f_stop, d_pass, d_stop, fsamp=fs)
# Bump up the order by N_bump to bring down the final d_pass & d_stop
N_taps = n
N_taps += n_bump
b = signal.remez(N_taps, ff, aa[0::2], wts,Hz=2)
if status:
log.info('Remez filter taps = %d.' % N_taps)
return b
def fir_remez_hpf(f_stop, f_pass, d_pass, d_stop, fs = 1.0, n_bump=5, status = True):
"""
Design an FIR highpass filter using remez with order
determination. The filter order is determined based on
f_pass Hz, fstop Hz, and the desired passband ripple
d_pass dB and stopband attenuation d_stop dB all
relative to a sampling rate of fs Hz.
Mark Wickert October 2016, updated October 2018
"""
# Transform HPF critical frequencies to lowpass equivalent
f_pass_eq = fs/2. - f_pass
f_stop_eq = fs/2. - f_stop
# Design LPF equivalent
n, ff, aa, wts = lowpass_order(f_pass_eq, f_stop_eq, d_pass, d_stop, fsamp=fs)
# Bump up the order by N_bump to bring down the final d_pass & d_stop
N_taps = n
N_taps += n_bump
b = signal.remez(N_taps, ff, aa[0::2], wts,Hz=2)
# Transform LPF equivalent to HPF
n = np.arange(len(b))
b *= (-1)**n
if status:
log.info('Remez filter taps = %d.' % N_taps)
return b
def fir_remez_bpf(f_stop1, f_pass1, f_pass2, f_stop2, d_pass, d_stop,
fs = 1.0, n_bump=5, status = True):
"""
Design an FIR bandpass filter using remez with order
determination. The filter order is determined based on
f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the
desired passband ripple d_pass dB and stopband attenuation
d_stop dB all relative to a sampling rate of fs Hz.
Mark Wickert October 2016, updated October 2018
"""
n, ff, aa, wts = bandpass_order(f_stop1, f_pass1, f_pass2, f_stop2,
d_pass, d_stop, fsamp=fs)
# Bump up the order by N_bump to bring down the final d_pass & d_stop
N_taps = n
N_taps += n_bump
b = signal.remez(N_taps, ff, aa[0::2], wts,Hz=2)
if status:
log.info('Remez filter taps = %d.' % N_taps)
return b
def fir_remez_bsf(f_pass1, f_stop1, f_stop2, f_pass2, d_pass, d_stop,
fs = 1.0, n_bump=5, status = True):
"""
Design an FIR bandstop filter using remez with order
determination. The filter order is determined based on
f_pass1 Hz, f_stop1 Hz, f_stop2 Hz, f_pass2 Hz, and the
desired passband ripple d_pass dB and stopband attenuation
d_stop dB all relative to a sampling rate of fs Hz.
Mark Wickert October 2016, updated October 2018
"""
n, ff, aa, wts = bandstop_order(f_pass1, f_stop1, f_stop2, f_pass2,
d_pass, d_stop, fsamp=fs)
# Bump up the order by N_bump to bring down the final d_pass & d_stop
# Initially make sure the number of taps is even so N_bump needs to be odd
if np.mod(n,2) != 0:
n += 1
N_taps = n
N_taps += n_bump
b = signal.remez(N_taps, ff, aa[0::2], wts, Hz=2,
maxiter = 25, grid_density = 16)
if status:
log.info('N_bump must be odd to maintain odd filter length')
log.info('Remez filter taps = %d.' % N_taps)
return b
def freqz_resp_list(b, a=np.array([1]), mode = 'dB', fs=1.0, n_pts = 1024, fsize=(6, 4)):
"""
A method for displaying digital filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
b = ndarray of numerator coefficients
a = ndarray of denominator coefficents
mode = display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
Npts = number of points to plot; default is 1024
fsize = figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
if type(b) == list:
# We have a list of filters
N_filt = len(b)
f = np.arange(0, n_pts) / (2.0 * n_pts)
for n in range(N_filt):
w,H = signal.freqz(b[n],a[n],2*np.pi*f)
if n == 0:
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f*fs,20*np.log10(np.abs(H)))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f*fs,np.angle(H))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2*theta)/2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2)/np.diff(w)
# For gain almost zero set groupdelay = 0
idx = np.nonzero(np.ravel(20*np.log10(H[:-1]) < -400))[0]
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
#print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1]*fs,Tg/fs)
plt.ylim([0,1.2*max_Tg])
else:
plt.plot(f[:-1]*fs,Tg)
plt.ylim([0,1.2*max_Tg])
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
log.info(s1 + s2) | /scikit-dsp-comm-2.0.3.tar.gz/scikit-dsp-comm-2.0.3/src/sk_dsp_comm/fir_design_helper.py | 0.749912 | 0.37088 | fir_design_helper.py | pypi |
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
from logging import getLogger
log = getLogger(__name__)
def IIR_lpf(f_pass, f_stop, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR lowpass filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Parameters
----------
f_pass : Passband critical frequency in Hz
f_stop : Stopband critical frequency in Hz
Ripple_pass : Filter gain in dB at f_pass
Atten_stop : Filter attenuation in dB at f_stop
fs : Sampling rate in Hz
ftype : Analog prototype from 'butter' 'cheby1', 'cheby2',
'ellip', and 'bessel'
Returns
-------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
sos : 2D ndarray of second-order section coefficients
Notes
-----
Additionally a text string telling the user the filter order is
written to the console, e.g., IIR cheby1 order = 8.
Examples
--------
>>> fs = 48000
>>> f_pass = 5000
>>> f_stop = 8000
>>> b_but,a_but,sos_but = IIR_lpf(f_pass,f_stop,0.5,60,fs,'butter')
>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby1')
>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby2')
>>> b_elli,a_elli,sos_elli = IIR_lpf(f_pass,f_stop,0.5,60,fs,'ellip')
Mark Wickert October 2016
"""
b,a = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype = ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def IIR_hpf(f_stop, f_pass, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR highpass filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Parameters
----------
f_stop :
f_pass :
Ripple_pass :
Atten_stop :
fs : sampling rate in Hz
ftype : Analog prototype from 'butter' 'cheby1', 'cheby2',
'ellip', and 'bessel'
Returns
-------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
sos : 2D ndarray of second-order section coefficients
Examples
--------
>>> fs = 48000
>>> f_pass = 8000
>>> f_stop = 5000
>>> b_but,a_but,sos_but = IIR_hpf(f_stop,f_pass,0.5,60,fs,'butter')
>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby1')
>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby2')
>>> b_elli,a_elli,sos_elli = IIR_hpf(f_stop,f_pass,0.5,60,fs,'ellip')
Mark Wickert October 2016
"""
b,a = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype =ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def IIR_bpf(f_stop1, f_pass1, f_pass2, f_stop2, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR bandpass filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Parameters
----------
f_stop1 : ndarray of the numerator coefficients
f_pass : ndarray of the denominator coefficients
Ripple_pass :
Atten_stop :
fs : sampling rate in Hz
ftype : Analog prototype from 'butter' 'cheby1', 'cheby2',
'ellip', and 'bessel'
Returns
-------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
sos : 2D ndarray of second-order section coefficients
Examples
--------
>>> fs = 48000
>>> f_pass = 8000
>>> f_stop = 5000
>>> b_but,a_but,sos_but = IIR_hpf(f_stop,f_pass,0.5,60,fs,'butter')
>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby1')
>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby2')
>>> b_elli,a_elli,sos_elli = IIR_hpf(f_stop,f_pass,0.5,60,fs,'ellip')
Mark Wickert October 2016
"""
b,a = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype =ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def IIR_bsf(f_pass1, f_stop1, f_stop2, f_pass2, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR bandstop filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Mark Wickert October 2016
"""
b,a = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype =ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def freqz_resp_list(b,a=np.array([1]),mode = 'dB',fs=1.0,Npts = 1024,fsize=(6,4)):
"""
A method for displaying digital filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
b = ndarray of numerator coefficients
a = ndarray of denominator coefficents
mode = display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
Npts = number of points to plot; default is 1024
fsize = figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
if type(b) == list:
# We have a list of filters
N_filt = len(b)
f = np.arange(0,Npts)/(2.0*Npts)
for n in range(N_filt):
w,H = signal.freqz(b[n],a[n],2*np.pi*f)
if n == 0:
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f*fs,20*np.log10(np.abs(H)))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f*fs,np.angle(H))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2*theta)/2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2)/np.diff(w)
# For gain almost zero set groupdelay = 0
idx = np.nonzero(np.ravel(20*np.log10(H[:-1]) < -400))[0]
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
#print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1]*fs,Tg/fs)
plt.ylim([0,1.2*max_Tg])
else:
plt.plot(f[:-1]*fs,Tg)
plt.ylim([0,1.2*max_Tg])
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
log.info(s1 + s2)
def freqz_cas(sos,w):
"""
Cascade frequency response
Mark Wickert October 2016
"""
Ns,Mcol = sos.shape
w,Hcas = signal.freqz(sos[0,:3],sos[0,3:],w)
for k in range(1,Ns):
w,Htemp = signal.freqz(sos[k,:3],sos[k,3:],w)
Hcas *= Htemp
return w, Hcas
def freqz_resp_cas_list(sos, mode = 'dB', fs=1.0, n_pts=1024, fsize=(6, 4)):
"""
A method for displaying cascade digital filter form frequency response
magnitude, phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
b = ndarray of numerator coefficients
a = ndarray of denominator coefficents
mode = display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
Npts = number of points to plot; default is 1024
fsize = figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
if type(sos) == list:
# We have a list of filters
N_filt = len(sos)
f = np.arange(0, n_pts) / (2.0 * n_pts)
for n in range(N_filt):
w,H = freqz_cas(sos[n],2*np.pi*f)
if n == 0:
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f*fs,20*np.log10(np.abs(H)))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f*fs,np.angle(H))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2*theta)/2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2)/np.diff(w)
# For gain almost zero set groupdelay = 0
idx = np.nonzero(np.ravel(20*np.log10(H[:-1]) < -400))[0]
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
#print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1]*fs,Tg/fs)
plt.ylim([0,1.2*max_Tg])
else:
plt.plot(f[:-1]*fs,Tg)
plt.ylim([0,1.2*max_Tg])
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
log.info(s1 + s2)
def unique_cpx_roots(rlist,tol = 0.001):
"""
The average of the root values is used when multiplicity
is greater than one.
Mark Wickert October 2016
"""
uniq = [rlist[0]]
mult = [1]
for k in range(1,len(rlist)):
N_uniq = len(uniq)
for m in range(N_uniq):
if abs(rlist[k]-uniq[m]) <= tol:
mult[m] += 1
uniq[m] = (uniq[m]*(mult[m]-1) + rlist[k])/float(mult[m])
break
uniq = np.hstack((uniq,rlist[k]))
mult = np.hstack((mult,[1]))
return np.array(uniq), np.array(mult)
def sos_cascade(sos1,sos2):
"""
Mark Wickert October 2016
"""
return np.vstack((sos1,sos2))
def sos_zplane(sos,auto_scale=True,size=2,tol = 0.001):
"""
Create an z-plane pole-zero plot.
Create an z-plane pole-zero plot using the numerator
and denominator z-domain system function coefficient
ndarrays b and a respectively. Assume descending powers of z.
Parameters
----------
sos : ndarray of the sos coefficients
auto_scale : bool (default True)
size : plot radius maximum when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> sos_zplane(sos)
>>> # Here the plot is generated using manual scaling
>>> sos_zplane(sos,False,1.5)
"""
Ns,Mcol = sos.shape
# Extract roots from sos num and den removing z = 0
# roots due to first-order sections
N_roots = []
for k in range(Ns):
N_roots_tmp = np.roots(sos[k,:3])
if N_roots_tmp[1] == 0.:
N_roots = np.hstack((N_roots,N_roots_tmp[0]))
else:
N_roots = np.hstack((N_roots,N_roots_tmp))
D_roots = []
for k in range(Ns):
D_roots_tmp = np.roots(sos[k,3:])
if D_roots_tmp[1] == 0.:
D_roots = np.hstack((D_roots,D_roots_tmp[0]))
else:
D_roots = np.hstack((D_roots,D_roots_tmp))
# Plot labels if multiplicity greater than 1
x_scale = 1.5*size
y_scale = 1.5*size
x_off = 0.02
y_off = 0.01
M = len(N_roots)
N = len(D_roots)
if auto_scale:
if M > 0 and N > 0:
size = max(np.max(np.abs(N_roots)),np.max(np.abs(D_roots)))+.1
elif M > 0:
size = max(np.max(np.abs(N_roots)),1.0)+.1
elif N > 0:
size = max(1.0,np.max(np.abs(D_roots)))+.1
else:
size = 1.1
plt.figure(figsize=(5,5))
plt.axis('equal')
r = np.linspace(0,2*np.pi,200)
plt.plot(np.cos(r),np.sin(r),'r--')
plt.plot([-size,size],[0,0],'k-.')
plt.plot([0,0],[-size,size],'k-.')
if M > 0:
#N_roots = np.roots(b)
N_uniq, N_mult=unique_cpx_roots(N_roots,tol=tol)
plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8)
idx_N_mult = np.nonzero(np.ravel(N_mult>1))[0]
for k in range(len(idx_N_mult)):
x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale
y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),
ha='center',va='bottom',fontsize=10)
if N > 0:
#D_roots = np.roots(a)
D_uniq, D_mult=unique_cpx_roots(D_roots,tol=tol)
plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8)
idx_D_mult = np.nonzero(np.ravel(D_mult>1))[0]
for k in range(len(idx_D_mult)):
x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale
y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),
ha='center',va='bottom',fontsize=10)
if M - N < 0:
plt.plot(0.0,0.0,'bo',mfc='None',ms=8)
elif M - N > 0:
plt.plot(0.0,0.0,'kx',ms=8)
if abs(M - N) > 1:
plt.text(x_off*x_scale,y_off*y_scale,str(abs(M-N)),
ha='center',va='bottom',fontsize=10)
plt.xlabel('Real Part')
plt.ylabel('Imaginary Part')
plt.title('Pole-Zero Plot')
#plt.grid()
plt.axis([-size,size,-size,size])
return M,N | /scikit-dsp-comm-2.0.3.tar.gz/scikit-dsp-comm-2.0.3/src/sk_dsp_comm/iir_design_helper.py | 0.801431 | 0.4856 | iir_design_helper.py | pypi |
from matplotlib import pylab
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as signal
from . import sigsys as ssd
from . import fir_design_helper as fir_d
from . import iir_design_helper as iir_d
from logging import getLogger
log = getLogger(__name__)
import warnings
class rate_change(object):
"""
A simple class for encapsulating the upsample/filter and
filter/downsample operations used in modeling a comm
system. Objects of this class will hold the required filter
coefficients once an object is instantiated.
Mark Wickert February 2015
"""
def __init__(self,M_change = 12,fcutoff=0.9,N_filt_order=8,ftype='butter'):
"""
Object constructor method
"""
self.M = M_change # Rate change factor M or L
self.fc = fcutoff*.5 # must be fs/(2*M), but scale by fcutoff
self.N_forder = N_filt_order
if ftype.lower() == 'butter':
self.b, self.a = signal.butter(self.N_forder,2/self.M*self.fc)
elif ftype.lower() == 'cheby1':
# Set the ripple to 0.05 dB
self.b, self.a = signal.cheby1(self.N_forder,0.05,2/self.M*self.fc)
else:
warnings.warn('ftype must be "butter" or "cheby1"')
def up(self,x):
"""
Upsample and filter the signal
"""
y = self.M*ssd.upsample(x,self.M)
y = signal.lfilter(self.b,self.a,y)
return y
def dn(self,x):
"""
Downsample and filter the signal
"""
y = signal.lfilter(self.b,self.a,x)
y = ssd.downsample(y,self.M)
return y
class multirate_FIR(object):
"""
A simple class for encapsulating FIR filtering, or FIR upsample/
filter, or FIR filter/downsample operations used in modeling a comm
system. Objects of this class will hold the required filter
coefficients once an object is instantiated. Frequency response
and the pole zero plot can also be plotted using supplied class methods.
Mark Wickert March 2017
"""
def __init__(self,b):
"""
Object constructor method
"""
self.N_forder = len(b)
self.b = b
log.info('FIR filter taps = %d' % self.N_forder)
def filter(self,x):
"""
Filter the signal
"""
y = signal.lfilter(self.b,[1],x)
return y
def up(self,x,L_change = 12):
"""
Upsample and filter the signal
"""
y = L_change*ssd.upsample(x,L_change)
y = signal.lfilter(self.b,[1],y)
return y
def dn(self,x,M_change = 12):
"""
Downsample and filter the signal
"""
y = signal.lfilter(self.b,[1],x)
y = ssd.downsample(y,M_change)
return y
def freq_resp(self, mode= 'dB', fs = 8000, ylim = [-100,2]):
"""
"""
fir_d.freqz_resp_list([self.b], [1], mode, fs=fs, n_pts= 1024)
pylab.grid()
pylab.ylim(ylim)
def zplane(self,auto_scale=True,size=2,detect_mult=True,tol=0.001):
"""
Plot the poles and zeros of the FIR filter in the z-plane
"""
ssd.zplane(self.b,[1],auto_scale,size,tol)
class multirate_IIR(object):
"""
A simple class for encapsulating IIR filtering, or IIR upsample/
filter, or IIR filter/downsample operations used in modeling a comm
system. Objects of this class will hold the required filter
coefficients once an object is instantiated. Frequency response
and the pole zero plot can also be plotted using supplied class methods.
For added robustness to floating point quantization all filtering
is done using the scipy.signal cascade of second-order sections filter
method y = sosfilter(sos,x).
Mark Wickert March 2017
"""
def __init__(self,sos):
"""
Object constructor method
"""
self.N_forder = np.sum(np.sign(np.abs(sos[:,2]))) \
+ np.sum(np.sign(np.abs(sos[:,1])))
self.sos = sos
log.info('IIR filter order = %d' % self.N_forder)
def filter(self,x):
"""
Filter the signal using second-order sections
"""
y = signal.sosfilt(self.sos,x)
return y
def up(self,x,L_change = 12):
"""
Upsample and filter the signal
"""
y = L_change*ssd.upsample(x,L_change)
y = signal.sosfilt(self.sos,y)
return y
def dn(self,x,M_change = 12):
"""
Downsample and filter the signal
"""
y = signal.sosfilt(self.sos,x)
y = ssd.downsample(y,M_change)
return y
def freq_resp(self, mode= 'dB', fs = 8000, ylim = [-100,2]):
"""
Frequency response plot
"""
iir_d.freqz_resp_cas_list([self.sos],mode,fs=fs)
pylab.grid()
pylab.ylim(ylim)
def zplane(self,auto_scale=True,size=2,detect_mult=True,tol=0.001):
"""
Plot the poles and zeros of the FIR filter in the z-plane
"""
iir_d.sos_zplane(self.sos,auto_scale,size,tol)
def freqz_resp(b,a=[1],mode = 'dB',fs=1.0,Npts = 1024,fsize=(6,4)):
"""
A method for displaying digital filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
b = ndarray of numerator coefficients
a = ndarray of denominator coefficents
mode = display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
Npts = number of points to plot; defult is 1024
fsize = figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
f = np.arange(0,Npts)/(2.0*Npts)
w,H = signal.freqz(b,a,2*np.pi*f)
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f*fs,20*np.log10(np.abs(H)))
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f*fs,np.angle(H))
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2*theta)/2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2)/np.diff(w)
# For gain almost zero set groupdelay = 0
idx = pylab.find(20*np.log10(H[:-1]) < -400)
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
#print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1]*fs,Tg/fs)
plt.ylim([0,1.2*max_Tg])
else:
plt.plot(f[:-1]*fs,Tg)
plt.ylim([0,1.2*max_Tg])
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
warnings.warn(s1 + s2) | /scikit-dsp-comm-2.0.3.tar.gz/scikit-dsp-comm-2.0.3/src/sk_dsp_comm/multirate_helper.py | 0.634317 | 0.493958 | multirate_helper.py | pypi |
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin, clone, is_regressor, is_classifier
from sklearn.utils.validation import check_is_fitted, check_X_y, check_array
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import train_test_split
class QuantileStackRegressor(BaseEstimator, RegressorMixin):
"""
A meta regressor for doing model stacking using underlying
quantile propensity models. The model will first learn a
series of quantile discriminator functions and then stack
them with out of sample predictions into a final regressor.
Particularly useful for zero-inflated or heavily skewed datasets,
`QuantileStackRegressor` consists of a series of classifiers and a regressor.
- The classifier's task is to build a series of propensity models
that predict if the target is above a given threshold.
These are built in a two fold CV, so that out of sample predictions
can be added to the x vector for the final regression model
- The regressor's task is to output the final prediction, aided by the
probabilities added by the underlying quantile classifiers.
At prediction time, the average of the two classifiers is used for all propensity models.
Credits: This structure of this code is based off the zero inflated regressor from sklego:
https://github.com/koaning/scikit-lego
Parameters
----------
classifier : Any, scikit-learn classifier
regressor : Any, scikit-learn regressor
Examples
--------
>>> import numpy as np
>>> from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
>>> np.random.seed(0)
>>> X = np.random.randn(10000, 4)
>>> y = ((X[:, 0]>0) & (X[:, 1]>0)) * np.abs(X[:, 2] * X[:, 3]**2)
>>> z = QuantileStackRegressor(
... classifier=ExtraTreesClassifier(random_state=0),
... regressor=ExtraTreesRegressor(random_state=0)
... )
>>> z.fit(X, y)
QuantileStackRegressor(classifier=ExtraTreesClassifier(random_state=0),
regressor=ExtraTreesRegressor(random_state=0))
>>> z.predict(X)[:5]
array([4.91483294, 0. , 0. , 0.04941909, 0. ])
"""
def __init__(self, classifier, regressor, cuts=[0]) -> None:
"""Initialize."""
self.classifier = classifier
self.regressor = regressor
self.cuts = cuts
def fit(self, X, y, sample_weight=None):
"""
Fit the model.
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
sample_weight : Optional[np.array], default=None
Individual weights for each sample.
Returns
-------
QuantileStackRegressor
Fitted regressor.
Raises
------
ValueError
If `classifier` is not a classifier or `regressor` is not a regressor.
"""
X, y = check_X_y(X, y)
self._check_n_features(X, reset=True)
if not is_classifier(self.classifier):
raise ValueError(
f"`classifier` has to be a classifier. Received instance of {type(self.classifier)} instead.")
if not is_regressor(self.regressor):
raise ValueError(f"`regressor` has to be a regressor. Received instance of {type(self.regressor)} instead.")
"""
Now we need to internally split the data and build two sets of the classifiers
to prevent target leakage
"""
X_ = [0] * 2
y_ = [0] * 2
X_[0], X_[1], y_[0], y_[1] = train_test_split(X, y, test_size=0.5)
"""
Build two sets of classifiers for each of the required cuts
"""
self.classifiers_ = [0] * 2
for index in [0,1]:
self.classifiers_[index] = [0] * len(self.cuts)
for c, cut in enumerate(self.cuts):
self.classifiers_[index][c] = clone(self.classifier)
self.classifiers_[index][c].fit(X_[index], y_[index] > cut )
"""
Apply those classifier to the out of sample data
"""
Xfinal_ = [0] * 2
for index in [0,1]:
Xfinal_[index] = X_[index].copy()
c_index = 1 - index
for c, cut in enumerate(self.cuts):
preds = self.classifiers_[c_index][c].predict_proba( X_[index] )[:,1]
Xfinal_[index] = np.append(Xfinal_[index], preds.T[:, None], axis=1)
"""
Join the split data into a final dataset for the regression model
"""
Xfinale = np.concatenate((Xfinal_[0], Xfinal_[1] ), axis=0)
Yfinale = np.concatenate((y_[0], y_[1] ), axis=0)
self.regressor_ = clone(self.regressor)
self.regressor_.fit( Xfinale, Yfinale, sample_weight=sample_weight)
return self
def predict(self, X):
"""
Get predictions.
Parameters
----------
X : np.ndarray, shape (n_samples, n_features)
Samples to get predictions of.
Returns
-------
y : np.ndarray, shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
"""
Apply classifiers to generate new colums
"""
Xfinale = X.copy()
for c, cut in enumerate(self.cuts):
temp = np.zeros(len(X))
for index in [0,1]:
temp = temp + self.classifiers_[index][c].predict_proba(X)[:,1]
temp = temp/2
Xfinale = np.append(Xfinale, temp[:, None], axis=1)
return self.regressor_.predict(Xfinale) | /scikit-duplo-0.1.7.tar.gz/scikit-duplo-0.1.7/skduplo/meta/quantile_stack_regressor.py | 0.926183 | 0.812012 | quantile_stack_regressor.py | pypi |
from sklearn.base import BaseEstimator, RegressorMixin, clone, is_regressor
from sklearn.utils.validation import check_is_fitted, check_X_y, check_array
from sklearn.exceptions import NotFittedError
import pandas as pd
import numpy as np
class BaselineProportionalRegressor(BaseEstimator, RegressorMixin):
"""
A meta regressor for learning the target value as a proportional difference
relative to a mean value for a subset of other features.
Creates and maintains an internal lookup table for the baseline during the
model fir process.
Parameters
----------
regressor : Any, scikit-learn regressor that will be learned for the adjust target
"""
def __init__(self, baseline_cols, regressor) -> None:
"""Initialize."""
self.baseline_cols = baseline_cols
self.regressor = regressor
self.baseline_func = 'mean'
def generate_baseline(self, df):
self.lookup = df.groupby(self.baseline_cols).agg({'baseline':self.baseline_func}).reset_index()
self.baseline_default = df['baseline'].agg(self.baseline_func)
def get_baseline_predictions(self, df):
new_df = pd.merge(df, self.lookup, how='left', on=self.baseline_cols)
new_df['baseline'] = np.where(new_df['baseline'].isnull(), self.baseline_default, new_df['baseline'])
return new_df['baseline']
def get_relative_target(self, baseline, y):
return (y-baseline)/baseline
def invert_relative_target(self, preds, baseline):
return (preds*baseline)+baseline
def get_params(self, deep=True):
return self.regressor.get_params()
def set_params(self, **parameters):
for parameter, value in parameters.items():
if parameter == "baseline_cols":
self.baseline_cols = value
else:
self.regressor.setattr(parameter, value)
return self
def fit(self, X, y, sample_weight=None):
"""
Fit the model.
Note: this model diverges from the scikit-learn standard in that it needs a
pandas dataframe.
Parameters
----------
X : pandas.DataFrame of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
sample_weight : Optional[np.array], default=None
Individual weights for each sample.
Returns
-------
BaselineProportionalRegressor
Fitted regressor.
Raises
------
ValueError
If `regressor` is not a regressor.
"""
column_names = X.columns
X, y = check_X_y(X, y)
self._check_n_features(X, reset=True)
X = pd.DataFrame(X, columns=column_names)
if not is_regressor(self.regressor):
raise ValueError(f"`regressor` has to be a regressor. Received instance of {type(self.regressor)} instead.")
for col in self.baseline_cols:
if col not in X.columns:
raise ValueError(f"pandas.DataFrame required with baseline columns: `{col}` NOT FOUND.")
df = X.copy()
df['baseline'] = y
self.generate_baseline(df)
baseline = self.get_baseline_predictions(X)
Yfinale = self.get_relative_target(baseline, y)
self.regressor_ = clone(self.regressor)
self.regressor_.fit( X, Yfinale, sample_weight=sample_weight)
return self
def predict(self, X):
"""
Get predictions.
Parameters
----------
X : pd.DataFrame - shape (n_samples, n_features)
DataFrame of samples to get predictions for.
Note: DataFrame is required because the baseline uses column names.
Returns
-------
y : np.ndarray, shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
column_names = X.columns
X = check_array(X)
self._check_n_features(X, reset=False)
X = pd.DataFrame(X, columns=column_names)
for col in self.baseline_cols:
if col not in X.columns:
raise ValueError(f"pandas.DataFrame required with baseline columns: `{col}` NOT FOUND.")
baseline = self.get_baseline_predictions(X)
preds = self.regressor_.predict(X)
return self.invert_relative_target(preds, baseline) | /scikit-duplo-0.1.7.tar.gz/scikit-duplo-0.1.7/skduplo/meta/baseline_proportional_regressor.py | 0.942275 | 0.498901 | baseline_proportional_regressor.py | pypi |
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin, clone, is_regressor, is_classifier
from sklearn.utils.validation import check_is_fitted, check_X_y, check_array
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import train_test_split
class RegressorStack(BaseEstimator, RegressorMixin):
"""
A meta regressor for doing model stacking for regression using underlying
quantile propensity models and internal regressors.
Particularly designed for zero-inflated or heavily skewed datasets,
`RegressorStack` consists of a series of internal regressors
all of which are fitted in an internal cross validation and scored out-of-sample
A final regressor is trained over the original features and the output
of these stacked regression models.
Parameters
----------
regressor : Any, scikit-learn regressor
A regressor for predicting the target.
Examples
--------
>>> import numpy as np
>>> from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
>>> np.random.seed(0)
>>> X = np.random.randn(10000, 4)
>>> y = ((X[:, 0]>0) & (X[:, 1]>0)) * np.abs(X[:, 2] * X[:, 3]**2)
>>> z = RegressorStack(
... [KNeighborsRegressor(), BayesianRidge()],
... regressor=ExtraTreesRegressor(random_state=0)
... )
>>> z.fit(X, y)
RegressorStack([KNeighborsRegressor(), BayesianRidge()],
regressor=ExtraTreesRegressor(random_state=0))
>>> z.predict(X)[:5]
array([4.91483294, 0. , 0. , 0.04941909, 0. ])
"""
def __init__(self, regressor_list, regressor) -> None:
"""Initialize."""
self.regressor_list = regressor_list
self.regressor = regressor
def fit(self, X, y, sample_weight=None):
"""
Fit the model.
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
sample_weight : Optional[np.array], default=None
Individual weights for each sample.
Returns
-------
StackedRegressor
Fitted regressor.
Raises
------
ValueError
If `regressor` is not a regressor.
"""
X, y = check_X_y(X, y)
self._check_n_features(X, reset=True)
if not is_regressor(self.regressor):
raise ValueError(f"`regressor` has to be a regressor. Received instance of {type(self.regressor)} instead.")
"""
Now we need to internally split the data and build two sets of
internal regressors to prevent leakage
"""
X_ = [0] * 2
y_ = [0] * 2
X_[0], X_[1], y_[0], y_[1] = train_test_split(X, y, test_size=0.5)
"""
Build the internal regressors
"""
self.regressors_ = [0] * 2
for index in [0,1]:
self.regressors_[index] = [0] * len(self.regressor_list)
for c, reg in enumerate(self.regressor_list):
self.regressors_[index][c] = clone(reg)
self.regressors_[index][c].fit(X_[index], y_[index] )
"""
Apply those classifier to the out of sample data
"""
Xfinal_reg_ = [0] * 2
for index in [0,1]:
Xfinal_reg_[index] = X_[index].copy()
c_index = 1 - index
for c, reg in enumerate(self.regressor_list):
preds = self.regressors_[c_index][c].predict( X_[index] )
Xfinal_reg_[index] = np.append(Xfinal_reg_[index], preds.T[:, None], axis=1)
"""
Join the split data into a final dataset for the regression model
"""
Xfinale = np.concatenate((Xfinal_reg_[0], Xfinal_reg_[1] ), axis=0)
Yfinale = np.concatenate((y_[0], y_[1] ), axis=0)
self.regressor_ = clone(self.regressor)
self.regressor_.fit( Xfinale, Yfinale, sample_weight=sample_weight)
return self
def predict(self, X):
"""
Get predictions.
Parameters
----------
X : np.ndarray, shape (n_samples, n_features)
Samples to get predictions of.
Returns
-------
y : np.ndarray, shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
Xfinale = X.copy()
for c, reg in enumerate(self.regressor_list):
temp = np.zeros(len(X))
for index in [0,1]:
temp = temp + self.regressors_[index][c].predict( X )
temp = temp/2
Xfinale = np.append(Xfinale, temp[:, None], axis=1)
return self.regressor_.predict(Xfinale) | /scikit-duplo-0.1.7.tar.gz/scikit-duplo-0.1.7/skduplo/meta/regressor_stack.py | 0.933073 | 0.737962 | regressor_stack.py | pypi |
import time
# --------------------------------------
class Timer:
def __init__(self):
# Global Time objects
self.globalStartRef = time.time()
self.globalTime = 0.0
self.globalAdd = 0
# Match Time Variables
self.startRefMatching = 0.0
self.globalMatching = 0.0
# Deletion Time Variables
self.startRefDeletion = 0.0
self.globalDeletion = 0.0
# Subsumption Time Variables
self.startRefSubsumption = 0.0
self.globalSubsumption = 0.0
# Selection Time Variables
self.startRefSelection = 0.0
self.globalSelection = 0.0
# Evaluation Time Variables
self.startRefEvaluation = 0.0
self.globalEvaluation = 0.0
# ************************************************************
def startTimeMatching(self):
""" Tracks MatchSet Time """
self.startRefMatching = time.time()
def stopTimeMatching(self):
""" Tracks MatchSet Time """
diff = time.time() - self.startRefMatching
self.globalMatching += diff
# ************************************************************
def startTimeDeletion(self):
""" Tracks Deletion Time """
self.startRefDeletion = time.time()
def stopTimeDeletion(self):
""" Tracks Deletion Time """
diff = time.time() - self.startRefDeletion
self.globalDeletion += diff
# ************************************************************
def startTimeSubsumption(self):
"""Tracks Subsumption Time """
self.startRefSubsumption = time.time()
def stopTimeSubsumption(self):
"""Tracks Subsumption Time """
diff = time.time() - self.startRefSubsumption
self.globalSubsumption += diff
# ************************************************************
def startTimeSelection(self):
""" Tracks Selection Time """
self.startRefSelection = time.time()
def stopTimeSelection(self):
""" Tracks Selection Time """
diff = time.time() - self.startRefSelection
self.globalSelection += diff
# ************************************************************
def startTimeEvaluation(self):
""" Tracks Evaluation Time """
self.startRefEvaluation = time.time()
def stopTimeEvaluation(self):
""" Tracks Evaluation Time """
diff = time.time() - self.startRefEvaluation
self.globalEvaluation += diff
# ************************************************************
def updateGlobalTime(self):
self.globalTime = (time.time() - self.globalStartRef)+self.globalAdd | /scikit-eLCS-1.2.4.tar.gz/scikit-eLCS-1.2.4/skeLCS/Timer.py | 0.520253 | 0.173743 | Timer.py | pypi |
import random
import copy
import math
class Classifier:
def __init__(self,elcs,a=None,b=None,c=None,d=None):
#Major Parameters
self.specifiedAttList = []
self.condition = []
self.phenotype = None #arbitrary
self.fitness = elcs.init_fit
self.accuracy = 0.0
self.numerosity = 1
self.aveMatchSetSize = None
self.deletionProb = None
# Experience Management
self.timeStampGA = None
self.initTimeStamp = None
# Classifier Accuracy Tracking --------------------------------------
self.matchCount = 0 # Known in many LCS implementations as experience i.e. the total number of times this classifier was in a match set
self.correctCount = 0 # The total number of times this classifier was in a correct set
if isinstance(c, list):
self.classifierCovering(elcs, a, b, c, d)
elif isinstance(a, Classifier):
self.classifierCopy(a, b)
# Classifier Construction Methods
def classifierCovering(self, elcs, setSize, exploreIter, state, phenotype):
# Initialize new classifier parameters----------
self.timeStampGA = exploreIter
self.initTimeStamp = exploreIter
self.aveMatchSetSize = setSize
dataInfo = elcs.env.formatData
# -------------------------------------------------------
# DISCRETE PHENOTYPE
# -------------------------------------------------------
if dataInfo.discretePhenotype:
self.phenotype = phenotype
# -------------------------------------------------------
# CONTINUOUS PHENOTYPE
# -------------------------------------------------------
else:
phenotypeRange = dataInfo.phenotypeList[1] - dataInfo.phenotypeList[0]
rangeRadius = random.randint(25,75) * 0.01 * phenotypeRange / 2.0 # Continuous initialization domain radius.
Low = float(phenotype) - rangeRadius
High = float(phenotype) + rangeRadius
self.phenotype = [Low, High]
while len(self.specifiedAttList) < 1:
for attRef in range(len(state)):
if random.random() < elcs.p_spec and not(state[attRef] == None):
self.specifiedAttList.append(attRef)
self.buildMatch(elcs, attRef, state) # Add classifierConditionElement
def classifierCopy(self, toCopy, exploreIter):
self.specifiedAttList = copy.deepcopy(toCopy.specifiedAttList)
self.condition = copy.deepcopy(toCopy.condition)
self.phenotype = copy.deepcopy(toCopy.phenotype)
self.timeStampGA = exploreIter
self.initTimeStamp = exploreIter
self.aveMatchSetSize = copy.deepcopy(toCopy.aveMatchSetSize)
self.fitness = toCopy.fitness
self.accuracy = toCopy.accuracy
def buildMatch(self, elcs, attRef, state):
attributeInfoType = elcs.env.formatData.attributeInfoType[attRef]
if not(attributeInfoType): #Discrete
attributeInfoValue = elcs.env.formatData.attributeInfoDiscrete[attRef]
else:
attributeInfoValue = elcs.env.formatData.attributeInfoContinuous[attRef]
# Continuous attribute
if attributeInfoType:
attRange = attributeInfoValue[1] - attributeInfoValue[0]
rangeRadius = random.randint(25, 75) * 0.01 * attRange / 2.0 # Continuous initialization domain radius.
ar = state[attRef]
Low = ar - rangeRadius
High = ar + rangeRadius
condList = [Low, High]
self.condition.append(condList)
# Discrete attribute
else:
condList = state[attRef]
self.condition.append(condList)
# Matching
def match(self, state, elcs):
for i in range(len(self.condition)):
specifiedIndex = self.specifiedAttList[i]
attributeInfoType = elcs.env.formatData.attributeInfoType[specifiedIndex]
# Continuous
if attributeInfoType:
instanceValue = state[specifiedIndex]
if elcs.match_for_missingness:
if instanceValue == None:
pass
elif self.condition[i][0] < instanceValue < self.condition[i][1]:
pass
else:
return False
else:
if instanceValue == None:
return False
elif self.condition[i][0] < instanceValue < self.condition[i][1]:
pass
else:
return False
# Discrete
else:
stateRep = state[specifiedIndex]
if elcs.match_for_missingness:
if stateRep == self.condition[i] or stateRep == None:
pass
else:
return False
else:
if stateRep == self.condition[i]:
pass
elif stateRep == None:
return False
else:
return False
return True
def equals(self, elcs, cl):
if cl.phenotype == self.phenotype and len(cl.specifiedAttList) == len(self.specifiedAttList):
clRefs = sorted(cl.specifiedAttList)
selfRefs = sorted(self.specifiedAttList)
if clRefs == selfRefs:
for i in range(len(cl.specifiedAttList)):
tempIndex = self.specifiedAttList.index(cl.specifiedAttList[i])
if not (cl.condition[i] == self.condition[tempIndex]):
return False
return True
return False
def updateNumerosity(self, num):
""" Updates the numberosity of the classifier. Notice that 'num' can be negative! """
self.numerosity += num
def updateExperience(self):
""" Increases the experience of the classifier by one. Once an epoch has completed, rule accuracy can't change."""
self.matchCount += 1
def updateCorrect(self):
""" Increases the correct phenotype tracking by one. Once an epoch has completed, rule accuracy can't change."""
self.correctCount += 1
def updateMatchSetSize(self, elcs, matchSetSize):
""" Updates the average match set size. """
if self.matchCount < 1.0 / elcs.beta:
self.aveMatchSetSize = (self.aveMatchSetSize * (self.matchCount - 1) + matchSetSize) / float(
self.matchCount)
else:
self.aveMatchSetSize = self.aveMatchSetSize + elcs.beta * (matchSetSize - self.aveMatchSetSize)
def updateAccuracy(self):
""" Update the accuracy tracker """
self.accuracy = self.correctCount / float(self.matchCount)
def updateFitness(self, elcs):
""" Update the fitness parameter. """
if elcs.env.formatData.discretePhenotype or (
self.phenotype[1] - self.phenotype[0]) / elcs.env.formatData.phenotypeRange < 0.5:
self.fitness = pow(self.accuracy, elcs.nu)
else:
if (self.phenotype[1] - self.phenotype[0]) >= elcs.env.formatData.phenotypeRange:
self.fitness = 0.0
else:
self.fitness = math.fabs(pow(self.accuracy, elcs.nu) - (
self.phenotype[1] - self.phenotype[0]) / elcs.env.formatData.phenotypeRange)
def isSubsumer(self, elcs):
if self.matchCount > elcs.theta_sub and self.accuracy > elcs.acc_sub:
return True
return False
def isMoreGeneral(self, cl, elcs):
if len(self.specifiedAttList) >= len(cl.specifiedAttList):
return False
for i in range(len(self.specifiedAttList)):
attributeInfoType = elcs.env.formatData.attributeInfoType[self.specifiedAttList[i]]
if self.specifiedAttList[i] not in cl.specifiedAttList:
return False
# Continuous
if attributeInfoType:
otherRef = cl.specifiedAttList.index(self.specifiedAttList[i])
if self.condition[i][0] < cl.condition[otherRef][0]:
return False
if self.condition[i][1] > cl.condition[otherRef][1]:
return False
return True
def uniformCrossover(self, elcs, cl):
if elcs.env.formatData.discretePhenotype or random.random() < 0.5:
p_self_specifiedAttList = copy.deepcopy(self.specifiedAttList)
p_cl_specifiedAttList = copy.deepcopy(cl.specifiedAttList)
# Make list of attribute references appearing in at least one of the parents.-----------------------------
comboAttList = []
for i in p_self_specifiedAttList:
comboAttList.append(i)
for i in p_cl_specifiedAttList:
if i not in comboAttList:
comboAttList.append(i)
elif not elcs.env.formatData.attributeInfoType[i]:
comboAttList.remove(i)
comboAttList.sort()
changed = False
for attRef in comboAttList:
attributeInfoType = elcs.env.formatData.attributeInfoType[attRef]
probability = 0.5
ref = 0
if attRef in p_self_specifiedAttList:
ref += 1
if attRef in p_cl_specifiedAttList:
ref += 1
if ref == 0:
pass
elif ref == 1:
if attRef in p_self_specifiedAttList and random.random() > probability:
i = self.specifiedAttList.index(attRef)
cl.condition.append(self.condition.pop(i))
cl.specifiedAttList.append(attRef)
self.specifiedAttList.remove(attRef)
changed = True
if attRef in p_cl_specifiedAttList and random.random() < probability:
i = cl.specifiedAttList.index(attRef)
self.condition.append(cl.condition.pop(i))
self.specifiedAttList.append(attRef)
cl.specifiedAttList.remove(attRef)
changed = True
else:
# Continuous Attribute
if attributeInfoType:
i_cl1 = self.specifiedAttList.index(attRef)
i_cl2 = cl.specifiedAttList.index(attRef)
tempKey = random.randint(0, 3)
if tempKey == 0:
temp = self.condition[i_cl1][0]
self.condition[i_cl1][0] = cl.condition[i_cl2][0]
cl.condition[i_cl2][0] = temp
elif tempKey == 1:
temp = self.condition[i_cl1][1]
self.condition[i_cl1][1] = cl.condition[i_cl2][1]
cl.condition[i_cl2][1] = temp
else:
allList = self.condition[i_cl1] + cl.condition[i_cl2]
newMin = min(allList)
newMax = max(allList)
if tempKey == 2:
self.condition[i_cl1] = [newMin, newMax]
cl.condition.pop(i_cl2)
cl.specifiedAttList.remove(attRef)
else:
cl.condition[i_cl2] = [newMin, newMax]
self.condition.pop(i_cl1)
self.specifiedAttList.remove(attRef)
# Discrete Attribute
else:
pass
tempList1 = copy.deepcopy(p_self_specifiedAttList)
tempList2 = copy.deepcopy(cl.specifiedAttList)
tempList1.sort()
tempList2.sort()
if changed and len(set(tempList1) & set(tempList2)) == len(tempList2):
changed = False
return changed
else:
return self.phenotypeCrossover(cl)
def phenotypeCrossover(self, cl):
changed = False
if self.phenotype == cl.phenotype:
return changed
else:
tempKey = random.random() < 0.5 # Make random choice between 4 scenarios, Swap minimums, Swap maximums, Children preserve parent phenotypes.
if tempKey: # Swap minimum
temp = self.phenotype[0]
self.phenotype[0] = cl.phenotype[0]
cl.phenotype[0] = temp
changed = True
elif tempKey: # Swap maximum
temp = self.phenotype[1]
self.phenotype[1] = cl.phenotype[1]
cl.phenotype[1] = temp
changed = True
return changed
def Mutation(self, elcs, state, phenotype):
changed = False
# Mutate Condition
for attRef in range(elcs.env.formatData.numAttributes):
attributeInfoType = elcs.env.formatData.attributeInfoType[attRef]
if not (attributeInfoType): # Discrete
attributeInfoValue = elcs.env.formatData.attributeInfoDiscrete[attRef]
else:
attributeInfoValue = elcs.env.formatData.attributeInfoContinuous[attRef]
if random.random() < elcs.mu and not(state[attRef] == None):
# Mutation
if attRef not in self.specifiedAttList:
self.specifiedAttList.append(attRef)
self.buildMatch(elcs, attRef, state)
changed = True
elif attRef in self.specifiedAttList:
i = self.specifiedAttList.index(attRef)
if not attributeInfoType or random.random() > 0.5:
del self.specifiedAttList[i]
del self.condition[i]
changed = True
else:
attRange = float(attributeInfoValue[1]) - float(attributeInfoValue[0])
mutateRange = random.random() * 0.5 * attRange
if random.random() > 0.5:
if random.random() > 0.5:
self.condition[i][0] += mutateRange
else:
self.condition[i][0] -= mutateRange
else:
if random.random() > 0.5:
self.condition[i][1] += mutateRange
else:
self.condition[i][1] -= mutateRange
self.condition[i] = sorted(self.condition[i])
changed = True
else:
pass
# Mutate Phenotype
if elcs.env.formatData.discretePhenotype:
nowChanged = self.discretePhenotypeMutation(elcs)
else:
nowChanged = self.continuousPhenotypeMutation(elcs, phenotype)
if changed or nowChanged:
return True
def discretePhenotypeMutation(self, elcs):
changed = False
if random.random() < elcs.mu:
phenotypeList = copy.deepcopy(elcs.env.formatData.phenotypeList)
phenotypeList.remove(self.phenotype)
newPhenotype = random.choice(phenotypeList)
self.phenotype = newPhenotype
changed = True
return changed
def continuousPhenotypeMutation(self, elcs, phenotype):
changed = False
if random.random() < elcs.mu:
phenRange = self.phenotype[1] - self.phenotype[0]
mutateRange = random.random() * 0.5 * phenRange
tempKey = random.randint(0,2) # Make random choice between 3 scenarios, mutate minimums, mutate maximums, mutate both
if tempKey == 0: # Mutate minimum
if random.random() > 0.5 or self.phenotype[0] + mutateRange <= phenotype: # Checks that mutated range still contains current phenotype
self.phenotype[0] += mutateRange
else: # Subtract
self.phenotype[0] -= mutateRange
changed = True
elif tempKey == 1: # Mutate maximum
if random.random() > 0.5 or self.phenotype[1] - mutateRange >= phenotype: # Checks that mutated range still contains current phenotype
self.phenotype[1] -= mutateRange
else: # Subtract
self.phenotype[1] += mutateRange
changed = True
else: # mutate both
if random.random() > 0.5 or self.phenotype[0] + mutateRange <= phenotype: # Checks that mutated range still contains current phenotype
self.phenotype[0] += mutateRange
else: # Subtract
self.phenotype[0] -= mutateRange
if random.random() > 0.5 or self.phenotype[1] - mutateRange >= phenotype: # Checks that mutated range still contains current phenotype
self.phenotype[1] -= mutateRange
else: # Subtract
self.phenotype[1] += mutateRange
changed = True
self.phenotype.sort()
return changed
def updateTimeStamp(self, ts):
""" Sets the time stamp of the classifier. """
self.timeStampGA = ts
def setAccuracy(self, acc):
""" Sets the accuracy of the classifier """
self.accuracy = acc
def setFitness(self, fit):
""" Sets the fitness of the classifier. """
self.fitness = fit
def subsumes(self, elcs, cl):
# Discrete Phenotype
if elcs.env.formatData.discretePhenotype:
if cl.phenotype == self.phenotype:
if self.isSubsumer(elcs) and self.isMoreGeneral(cl, elcs):
return True
return False
# Continuous Phenotype
else:
if self.phenotype[0] >= cl.phenotype[0] and self.phenotype[1] <= cl.phenotype[1]:
if self.isSubsumer(elcs) and self.isMoreGeneral(cl, elcs):
return True
return False
def getDelProp(self, elcs, meanFitness):
""" Returns the vote for deletion of the classifier. """
if self.fitness / self.numerosity >= elcs.delta * meanFitness or self.matchCount < elcs.theta_del:
deletionVote = self.aveMatchSetSize * self.numerosity
elif self.fitness == 0.0:
deletionVote = self.aveMatchSetSize * self.numerosity * meanFitness / (elcs.init_fit / self.numerosity)
else:
deletionVote = self.aveMatchSetSize * self.numerosity * meanFitness / (self.fitness / self.numerosity)
return deletionVote | /scikit-eLCS-1.2.4.tar.gz/scikit-eLCS-1.2.4/skeLCS/Classifier.py | 0.615088 | 0.229018 | Classifier.py | pypi |
import numpy as np
import scipy as sp
import warnings
from scipy.linalg import LinAlgWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils import check_X_y, check_array
warnings.simplefilter("ignore", LinAlgWarning)
class BatchCholeskySolver(BaseEstimator, RegressorMixin):
def __init__(self, alpha=1e-7):
self.alpha = alpha
def _init_XY(self, X, y):
"""Initialize covariance matrices, including a separate bias term.
"""
d_in = X.shape[1]
self._XtX = np.eye(d_in + 1) * self.alpha
self._XtX[0, 0] = 0
if len(y.shape) == 1:
self._XtY = np.zeros((d_in + 1,))
else:
self._XtY = np.zeros((d_in + 1, y.shape[1]))
@property
def XtY_(self):
return self._XtY
@property
def XtX_(self):
return self._XtX
@XtY_.setter
def XtY_(self, value):
self._XtY = value
@XtX_.setter
def XtX_(self, value):
self._XtX = value
def _solve(self):
"""Second stage of solution (X'X)B = X'Y using Cholesky decomposition.
Sets `is_fitted_` to True.
"""
B = sp.linalg.solve(self._XtX, self._XtY, assume_a='sym', overwrite_a=False, overwrite_b=False)
self.coef_ = B[1:]
self.intercept_ = B[0]
self.is_fitted_ = True
def _reset(self):
"""Erase solution and data matrices.
"""
[delattr(self, attr) for attr in ('_XtX', '_XtY', 'coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]
def fit(self, X, y):
"""Solves an L2-regularized linear system like Ridge regression, overwrites any previous solutions.
"""
self._reset() # remove old solution
self.partial_fit(X, y, compute_output_weights=True)
return self
def partial_fit(self, X, y, forget=False, compute_output_weights=True):
"""Update model with a new batch of data.
Output weight computation can be temporary turned off for faster processing. This will mark model as
not fit. Enable `compute_output_weights` in the final call to `partial_fit`.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
Training input samples
y : array-like, shape=[n_samples, n_targets]
Training targets
forget : boolean, default False
Performs a negative update, effectively removing the information given by training
samples from the model. Output weights need to be re-computed after forgetting data.
compute_output_weights : boolean, optional, default True
Whether to compute new output weights (coef_, intercept_). Disable this in intermediate `partial_fit`
steps to run computations faster, then enable in the last call to compute the new solution.
.. Note::
Solution can be updated without extra data by setting `X=None` and `y=None`.
"""
if self.alpha < 0:
raise ValueError("Regularization parameter alpha must be non-negative.")
# solution only
if X is None and y is None and compute_output_weights:
self._solve()
return self
# validate parameters
X, y = check_X_y(X, y, accept_sparse=True, multi_output=True, y_numeric=True, ensure_2d=True)
if len(y.shape) > 1 and y.shape[1] == 1:
msg = "A column-vector y was passed when a 1d array was expected.\
Please change the shape of y to (n_samples, ), for example using ravel()."
warnings.warn(msg, DataConversionWarning)
# init temporary data storage
if not hasattr(self, '_XtX'):
self._init_XY(X, y)
else:
if X.shape[1] + 1 != self._XtX.shape[0]:
n_new, n_old = X.shape[1], self._XtX.shape[0] - 1
raise ValueError("Number of features %d does not match previous data %d." % (n_new, n_old))
# compute temporary data
X_sum = safe_sparse_dot(X.T, np.ones((X.shape[0],)))
y_sum = safe_sparse_dot(y.T, np.ones((y.shape[0],)))
if not forget:
self._XtX[0, 0] += X.shape[0]
self._XtX[1:, 0] += X_sum
self._XtX[0, 1:] += X_sum
self._XtX[1:, 1:] += X.T @ X
self._XtY[0] += y_sum
self._XtY[1:] += X.T @ y
else:
print("!!! forgetting")
self._XtX[0, 0] -= X.shape[0]
self._XtX[1:, 0] -= X_sum
self._XtX[0, 1:] -= X_sum
self._XtX[1:, 1:] -= X.T @ X
self._XtY[0] -= y_sum
self._XtY[1:] -= X.T @ y
# solve
if not compute_output_weights:
# mark as not fitted
[delattr(self, attr) for attr in ('coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]
else:
self._solve()
return self
def predict(self, X):
check_is_fitted(self, 'is_fitted_')
X = check_array(X, accept_sparse=True)
return safe_sparse_dot(X, self.coef_, dense_output=True) + self.intercept_ | /scikit_elm-0.21a0-py3-none-any.whl/skelm/solver_batch.py | 0.89875 | 0.61086 | solver_batch.py | pypi |
import numpy as np
import warnings
from scipy.special import expit
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import unique_labels, type_of_target
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.exceptions import DataConversionWarning, DataDimensionalityWarning
from .hidden_layer import HiddenLayer
from .solver_batch import BatchCholeskySolver
from .utils import _dense
warnings.simplefilter("ignore", DataDimensionalityWarning)
class _BaseELM(BaseEstimator):
def __init__(self, alpha=1e-7, batch_size=None, include_original_features=False,
n_neurons=None, ufunc="tanh", density=None, pairwise_metric=None,
random_state=None):
self.alpha = alpha
self.n_neurons = n_neurons
self.batch_size = batch_size
self.ufunc = ufunc
self.include_original_features = include_original_features
self.density = density
self.pairwise_metric = pairwise_metric
self.random_state = random_state
def _init_hidden_layers(self, X):
"""Init an empty model, creating objects for hidden layers and solver.
Also validates inputs for several hidden layers.
"""
# only one type of neurons
if not hasattr(self.n_neurons, '__iter__'):
hl = HiddenLayer(n_neurons=self.n_neurons, density=self.density, ufunc=self.ufunc,
pairwise_metric=self.pairwise_metric, random_state=self.random_state)
hl.fit(X)
self.hidden_layers_ = (hl, )
# several different types of neurons
else:
k = len(self.n_neurons)
# fix default values
ufuncs = self.ufunc
if isinstance(ufuncs, str) or not hasattr(ufuncs, "__iter__"):
ufuncs = [ufuncs] * k
densities = self.density
if densities is None or not hasattr(densities, "__iter__"):
densities = [densities] * k
pw_metrics = self.pairwise_metric
if pw_metrics is None or isinstance(pw_metrics, str):
pw_metrics = [pw_metrics] * k
if not k == len(ufuncs) == len(densities) == len(pw_metrics):
raise ValueError("Inconsistent parameter lengths for model with {} different types of neurons.\n"
"Set 'ufunc', 'density' and 'pairwise_distances' by lists "
"with {} elements, or leave the default values.".format(k, k))
self.hidden_layers_ = []
for n_neurons, ufunc, density, metric in zip(self.n_neurons, ufuncs, densities, pw_metrics):
hl = HiddenLayer(n_neurons=n_neurons, density=density, ufunc=ufunc,
pairwise_metric=metric, random_state=self.random_state)
hl.fit(X)
self.hidden_layers_.append(hl)
def _reset(self):
[delattr(self, attr) for attr in ('n_features_', 'solver_', 'hidden_layers_', 'is_fitted_', 'label_binarizer_') if hasattr(self, attr)]
@property
def n_neurons_(self):
if not hasattr(self, 'hidden_layers_'):
return None
neurons_count = sum([hl.n_neurons_ for hl in self.hidden_layers_])
if self.include_original_features:
neurons_count += self.n_features_
return neurons_count
@property
def coef_(self):
return self.solver_.coef_
@property
def intercept_(self):
return self.solver_.intercept_
def partial_fit(self, X, y=None, forget=False, compute_output_weights=True):
"""Update model with a new batch of data.
|method_partial_fit|
.. |method_partial_fit| replace:: Output weight computation can be temporary turned off
for faster processing. This will mark model as not fit. Enable `compute_output_weights`
in the final call to `partial_fit`.
.. |param_forget| replace:: Performs a negative update, effectively removing the information
given by training samples from the model. Output weights need to be re-computed after forgetting
data. Forgetting data that have not been learned before leads to unpredictable results.
.. |param_compute_output_weights| replace:: Whether to compute new output weights
(coef_, intercept_). Disable this in intermediate `partial_fit`
steps to run computations faster, then enable in the last call to compute the new solution.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
Training input samples
y : array-like, shape=[n_samples, n_targets]
Training targets
forget : boolean, default False
|param_forget|
compute_output_weights : boolean, optional, default True
|param_compute_output_weights|
.. Note::
Solution can be updated without extra data by setting `X=None` and `y=None`.
Example:
>>> model.partial_fit(X_1, y_1)
... model.partial_fit(X_2, y_2)
... model.partial_fit(X_3, y_3) # doctest: +SKIP
Faster, option 1:
>>> model.partial_fit(X_1, y_1, compute_output_weights=False)
... model.partial_fit(X_2, y_2, compute_output_weights=False)
... model.partial_fit(X_3, y_3) # doctest: +SKIP
Faster, option 2:
>>> model.partial_fit(X_1, y_1, compute_output_weights=False)
... model.partial_fit(X_2, y_2, compute_output_weights=False)
... model.partial_fit(X_3, y_3, compute_output_weights=False)
... model.partial_fit(X=None, y=None) # doctest: +SKIP
"""
# compute output weights only
if X is None and y is None and compute_output_weights:
self.solver_.partial_fit(None, None, compute_output_weights=True)
self.is_fitted_ = True
return self
X, y = check_X_y(X, y, accept_sparse=True, multi_output=True)
if len(y.shape) > 1 and y.shape[1] == 1:
msg = ("A column-vector y was passed when a 1d array was expected. "
"Please change the shape of y to (n_samples, ), for example using ravel().")
warnings.warn(msg, DataConversionWarning)
n_samples, n_features = X.shape
if hasattr(self, 'n_features_') and self.n_features_ != n_features:
raise ValueError('Shape of input is different from what was seen in `fit`')
# set batch size, default is bsize=2000 or all-at-once with less than 10_000 samples
self.bsize_ = self.batch_size
if self.bsize_ is None:
self.bsize_ = n_samples if n_samples < 10 * 1000 else 2000
# init model if not fit yet
if not hasattr(self, 'hidden_layers_'):
self.n_features_ = n_features
self.solver_ = BatchCholeskySolver(alpha=self.alpha)
self._init_hidden_layers(X)
# special case of one-shot processing
if self.bsize_ >= n_samples:
H = [hl.transform(X) for hl in self.hidden_layers_]
H = np.hstack(H if not self.include_original_features else [_dense(X)] + H)
self.solver_.partial_fit(H, y, forget=forget, compute_output_weights=False)
else: # batch processing
for b_start in range(0, n_samples, self.bsize_):
b_end = min(b_start + self.bsize_, n_samples)
b_X = X[b_start:b_end]
b_y = y[b_start:b_end]
b_H = [hl.transform(b_X) for hl in self.hidden_layers_]
b_H = np.hstack(b_H if not self.include_original_features else [_dense(b_X)] + b_H)
self.solver_.partial_fit(b_H, b_y, forget=forget, compute_output_weights=False)
# output weights if needed
if compute_output_weights:
self.solver_.partial_fit(None, None, compute_output_weights=True)
self.is_fitted_ = True
# mark as needing a solution
elif hasattr(self, 'is_fitted_'):
del self.is_fitted_
return self
def fit(self, X, y=None):
"""Reset model and fit on the given data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data samples.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
Target values used as real numbers.
Returns
-------
self : object
Returns self.
"""
#todo: add X as bunch of files support
self._reset()
self.partial_fit(X, y)
return self
def predict(self, X):
"""Predict real valued outputs for new inputs X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data samples.
Returns
-------
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Predicted outputs for inputs X.
.. attention::
:mod:`predict` always returns a dense matrix of predicted outputs -- unlike
in :meth:`fit`, this may cause memory issues at high number of outputs
and very high number of samples. Feed data by smaller batches in such case.
"""
X = check_array(X, accept_sparse=True)
check_is_fitted(self, "is_fitted_")
H = [hl.transform(X) for hl in self.hidden_layers_]
if self.include_original_features:
H = [_dense(X)] + H
H = np.hstack(H)
return self.solver_.predict(H)
class ELMRegressor(_BaseELM, RegressorMixin):
"""Extreme Learning Machine for regression problems.
This model solves a regression problem, that is a problem of predicting continuous outputs.
It supports multi-variate regression (when ``y`` is a 2d array of shape [n_samples, n_targets].)
ELM uses ``L2`` regularization, and optionally includes the original data features to
capture linear dependencies in the data natively.
Parameters
----------
alpha : float
Regularization strength; must be a positive float. Larger values specify stronger effect.
Regularization improves model stability and reduces over-fitting at the cost of some learning
capacity. The same value is used for all targets in multi-variate regression.
The optimal regularization strength is suggested to select from a large range of logarithmically
distributed values, e.g. :math:`[10^{-5}, 10^{-4}, 10^{-3}, ..., 10^4, 10^5]`. A small default
regularization value of :math:`10^{-7}` should always be present to counter numerical instabilities
in the solution; it does not affect overall model performance.
.. attention::
The model may automatically increase the regularization value if the solution
becomes unfeasible otherwise. The actual used value contains in ``alpha_`` attribute.
batch_size : int, optional
Actual computations will proceed in batches of this size, except the last batch that may be smaller.
Default behavior is to process all data at once with <10,000 samples, otherwise use batches
of size 2000.
include_original_features : boolean, default=False
Adds extra hidden layer neurons that simpy copy the input data features, adding a linear part
to the final model solution that can directly capture linear relations between data and
outputs. Effectively increases `n_neurons` by `n_inputs` leading to a larger model.
Including original features is generally a good thing if the number of data features is low.
n_neurons : int or [int], optional
Number of hidden layer neurons in ELM model, controls model size and learning capacity.
Generally number of neurons should be less than the number of training data samples, as
otherwise the model will learn the training set perfectly resulting in overfitting.
Several different kinds of neurons can be used in the same model by specifying a list of
neuron counts. ELM will create a separate neuron type for each element in the list.
In that case, the following attributes ``ufunc``, ``density`` and ``pairwise_metric``
should be lists of the same length; default values will be automatically expanded into a list.
.. note::
Models with <1,000 neurons are very fast to compute, while GPU acceleration is efficient
starting from 1,000-2,000 neurons. A standard computer should handle up to 10,000 neurons.
Very large models will not fit in memory but can still be trained by an out-of-core solver.
ufunc : {'tanh', 'sigm', 'relu', 'lin' or callable}, or a list of those (see n_neurons)
Transformation function of hidden layer neurons. Includes the following options:
- 'tanh' for hyperbolic tangent
- 'sigm' for sigmoid
- 'relu' for rectified linear unit (clamps negative values to zero)
- 'lin' for linear neurons, transformation function does nothing
- any custom callable function like members of ``Numpu.ufunc``
density : float in range (0, 1], or a list of those (see n_neurons), optional
Specifying density replaces dense projection layer by a sparse one with the specified
density of the connections. For instance, ``density=0.1`` means each hidden neuron will
be connected to a random 10% of input features. Useful for working on very high-dimensional
data, or for large numbers of neurons.
pairwise_metric : {'euclidean', 'cityblock', 'cosine' or other}, or a list of those (see n_neurons), optional
Specifying pairwise metric replaces multiplicative hidden neurons by distance-based hidden
neurons. This ELM model is known as Radial Basis Function ELM (RBF-ELM).
.. note::
Pairwise function neurons ignore ufunc and density.
Typical metrics are `euclidean`, `cityblock` and `cosine`. For a full list of metrics check
the `webpage <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html>`_
of :mod:`sklearn.metrics.pairwise_distances`.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when generating random numbers e.g.
for hidden neuron parameters. Random state instance is passed to lower level objects and routines.
Use it for repeatable experiments.
Attributes
----------
n_neurons_ : int
Number of automatically generated neurons.
ufunc_ : function
Tranformation function of hidden neurons.
projection_ : object
Hidden layer projection function.
solver_ : object
Solver instance, read solution from there.
Examples
--------
Combining ten sigmoid and twenty RBF neurons in one model:
>>> model = ELMRegressor(n_neurons=(10, 20),
... ufunc=('sigm', None),
... density=(None, None),
... pairwise_metric=(None, 'euclidean')) # doctest: +SKIP
Default values in multi-neuron ELM are automatically expanded to a list
>>> model = ELMRegressor(n_neurons=(10, 20),
... ufunc=('sigm', None),
... pairwise_metric=(None, 'euclidean')) # doctest: +SKIP
>>> model = ELMRegressor(n_neurons=(30, 30),
... pairwise_metric=('cityblock', 'cosine')) # doctest: +SKIP
"""
pass
class ELMClassifier(_BaseELM, ClassifierMixin):
"""ELM classifier, modified for multi-label classification support.
:param classes: Set of classes to consider in the model; can be expanded at runtime.
Samples of other classes will have their output set to zero.
:param solver: Solver to use, "default" for build-in Least Squares or "ridge" for Ridge regression
Example descr...
Attributes
----------
X_ : ndarray, shape (n_samples, n_features)
The input passed during :meth:`fit`.
y_ : ndarray, shape (n_samples,)
The labels passed during :meth:`fit`.
classes_ : ndarray, shape (n_classes,)
The classes seen at :meth:`fit`.
"""
def __init__(self, classes=None, alpha=1e-7, batch_size=None, include_original_features=False, n_neurons=None,
ufunc="tanh", density=None, pairwise_metric=None, random_state=None):
super().__init__(alpha, batch_size, include_original_features, n_neurons, ufunc, density, pairwise_metric,
random_state)
self.classes = classes
@property
def classes_(self):
return self.label_binarizer_.classes_
def _get_tags(self):
return {"multioutput": True, "multilabel": True}
def _update_classes(self, y):
if not isinstance(self.solver_, BatchCholeskySolver):
raise ValueError("Only iterative solver supports dynamic class update")
old_classes = self.label_binarizer_.classes_
partial_classes = clone(self.label_binarizer_).fit(y).classes_
# no new classes detected
if set(partial_classes) <= set(old_classes):
return
if len(old_classes) < 3:
raise ValueError("Dynamic class update has to start with at least 3 classes to function correctly; "
"provide 3 or more 'classes=[...]' during initialization.")
# get new classes sorted by LabelBinarizer
self.label_binarizer_.fit(np.hstack((old_classes, partial_classes)))
new_classes = self.label_binarizer_.classes_
# convert existing XtY matrix to new classes
if hasattr(self.solver_, 'XtY_'):
XtY_old = self.solver_.XtY_
XtY_new = np.zeros((XtY_old.shape[0], new_classes.shape[0]))
for i, c in enumerate(old_classes):
j = np.where(new_classes == c)[0][0]
XtY_new[:, j] = XtY_old[:, i]
self.solver_.XtY_ = XtY_new
# reset the solution
if hasattr(self.solver_, 'is_fitted_'):
del self.solver_.is_fitted_
def partial_fit(self, X, y=None, forget=False, update_classes=False, compute_output_weights=True):
"""Update classifier with a new batch of data.
|method_partial_fit|
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
Training input samples
y : array-like, shape=[n_samples, n_targets]
Training targets
forget : boolean, default False
|param_forget|
update_classes : boolean, default False
Include new classes from `y` into the model, assuming they were 0 in all previous samples.
compute_output_weights : boolean, optional, default True
|param_compute_output_weights|
"""
#todo: Warning on strongly non-normalized data
X, y = check_X_y(X, y, accept_sparse=True, multi_output=True)
# init label binarizer if needed
if not hasattr(self, 'label_binarizer_'):
self.label_binarizer_ = LabelBinarizer()
if type_of_target(y).endswith("-multioutput"):
self.label_binarizer_ = MultiLabelBinarizer()
self.label_binarizer_.fit(self.classes if self.classes is not None else y)
if update_classes:
self._update_classes(y)
y_numeric = self.label_binarizer_.transform(y)
if len(y_numeric.shape) > 1 and y_numeric.shape[1] == 1:
y_numeric = y_numeric[:, 0]
super().partial_fit(X, y_numeric, forget=forget, compute_output_weights=compute_output_weights)
return self
def fit(self, X, y=None):
"""Fit a classifier erasing any previously trained model.
Returns
-------
self : object
Returns self.
"""
self._reset()
self.partial_fit(X, y, compute_output_weights=True)
return self
def predict(self, X):
"""Predict classes of new inputs X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Returns one most probable class for multi-class problem, or
a binary vector of all relevant classes for multi-label problem.
"""
check_is_fitted(self, "is_fitted_")
scores = super().predict(X)
return self.label_binarizer_.inverse_transform(scores)
def predict_proba(self, X):
"""Probability estimation for all classes.
Positive class probabilities are computed as
1. / (1. + np.exp(-self.decision_function(X)));
multiclass is handled by normalizing that over all classes.
"""
check_is_fitted(self, "is_fitted_")
prob = super().predict(X)
expit(prob, out=prob)
if prob.ndim == 1:
return np.vstack([1 - prob, prob]).T
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob | /scikit_elm-0.21a0-py3-none-any.whl/skelm/elm.py | 0.877844 | 0.352425 | elm.py | pypi |
import scipy as sp
from enum import Enum
from sklearn.metrics import pairwise_distances
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_array, check_is_fitted, check_random_state
class HiddenLayerType(Enum):
RANDOM = 1 # Gaussian random projection
SPARSE = 2 # Sparse Random Projection
PAIRWISE = 3 # Pairwise kernel with a number of centroids
def dummy(x):
return x
def flatten(items):
"""Yield items from any nested iterable."""
for x in items:
# don't break strings into characters
if hasattr(x, '__iter__') and not isinstance(x, (str, bytes)):
yield from flatten(x)
else:
yield x
def _is_list_of_strings(obj):
return obj is not None and all(isinstance(elem, str) for elem in obj)
def _dense(X):
if sp.sparse.issparse(X):
return X.todense()
else:
return X
class PairwiseRandomProjection(BaseEstimator, TransformerMixin):
def __init__(self, n_components=100, pairwise_metric='l2', n_jobs=None, random_state=None):
"""Pairwise distances projection with random centroids.
Parameters
----------
n_components : int
Number of components (centroids) in the projection. Creates the same number of output features.
pairwise_metric : str
A valid pairwise distance metric, see pairwise-distances_.
.. _pairwise-distances: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html#sklearn.metrics.pairwise_distances
n_jobs : int or None, optional, default=None
Number of jobs to use in distance computations, or `None` for no parallelism.
Passed to _pairwise-distances function.
random_state
Used for random generation of centroids.
"""
self.n_components = n_components
self.pairwise_metric = pairwise_metric
self.n_jobs = n_jobs
self.random_state = random_state
def fit(self, X, y=None):
"""Generate artificial centroids.
Centroids are sampled from a normal distribution. They work best if the data is normalized.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
Input data
"""
X = check_array(X, accept_sparse=True)
self.random_state_ = check_random_state(self.random_state)
if self.n_components <= 0:
raise ValueError("n_components must be greater than 0, got %s" % self.n_components)
self.components_ = self.random_state_.randn(self.n_components, X.shape[1])
self.n_jobs_ = 1 if self.n_jobs is None else self.n_jobs
return self
def transform(self, X):
"""Compute distance matrix between input data and the centroids.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Input data samples.
Returns
-------
X_dist : numpy array
Distance matrix between input data samples and centroids.
"""
X = check_array(X, accept_sparse=True)
check_is_fitted(self, 'components_')
if X.shape[1] != self.components_.shape[1]:
raise ValueError(
'Impossible to perform projection: X at fit stage had a different number of features. '
'(%s != %s)' % (X.shape[1], self.components_.shape[1]))
try:
X_dist = pairwise_distances(X, self.components_, n_jobs=self.n_jobs_, metric=self.pairwise_metric)
except TypeError:
# scipy distances that don't support sparse matrices
X_dist = pairwise_distances(_dense(X), _dense(self.components_), n_jobs=self.n_jobs_, metric=self.pairwise_metric)
return X_dist | /scikit_elm-0.21a0-py3-none-any.whl/skelm/utils.py | 0.932522 | 0.450359 | utils.py | pypi |
import numpy as np
import scipy as sp
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_array, check_is_fitted
from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection
from .utils import PairwiseRandomProjection, HiddenLayerType, dummy
# suppress annoying warning of random projection into a higher-dimensional space
import warnings
warnings.filterwarnings("ignore", message="DataDimensionalityWarning")
def auto_neuron_count(n, d):
# computes default number of neurons for `n` data samples with `d` features
return min(int(250 * np.log(1 + d/10) - 15), n//3 + 1)
ufuncs = {"tanh": np.tanh,
"sigm": sp.special.expit,
"relu": lambda x: np.maximum(x, 0),
"lin": dummy,
None: dummy}
class HiddenLayer(BaseEstimator, TransformerMixin):
def __init__(self, n_neurons=None, density=None, ufunc="tanh", pairwise_metric=None, random_state=None):
self.n_neurons = n_neurons
self.density = density
self.ufunc = ufunc
self.pairwise_metric = pairwise_metric
self.random_state = random_state
def _fit_random_projection(self, X):
self.hidden_layer_ = HiddenLayerType.RANDOM
self.projection_ = GaussianRandomProjection(n_components=self.n_neurons_, random_state=self.random_state_)
self.projection_.fit(X)
def _fit_sparse_projection(self, X):
self.hidden_layer_ = HiddenLayerType.SPARSE
self.projection_ = SparseRandomProjection(n_components=self.n_neurons_, density=self.density,
dense_output=True, random_state=self.random_state_)
self.projection_.fit(X)
def _fit_pairwise_projection(self, X):
self.hidden_layer_ = HiddenLayerType.PAIRWISE
self.projection_ = PairwiseRandomProjection(n_components=self.n_neurons_,
pairwise_metric=self.pairwise_metric,
random_state=self.random_state_)
self.projection_.fit(X)
def fit(self, X, y=None):
# basic checks
X = check_array(X, accept_sparse=True)
# handle random state
self.random_state_ = check_random_state(self.random_state)
# get number of neurons
n, d = X.shape
self.n_neurons_ = int(self.n_neurons) if self.n_neurons is not None else auto_neuron_count(n, d)
# fit a projection
if self.pairwise_metric is not None:
self._fit_pairwise_projection(X)
elif self.density is not None:
self._fit_sparse_projection(X)
else:
self._fit_random_projection(X)
if self.ufunc in ufuncs.keys():
self.ufunc_ = ufuncs[self.ufunc]
elif callable(self.ufunc):
self.ufunc_ = self.ufunc
else:
raise ValueError("Ufunc transformation function not understood: ", self.ufunc)
self.is_fitted_ = True
return self
def transform(self, X):
check_is_fitted(self, "is_fitted_")
X = check_array(X, accept_sparse=True)
n_features = self.projection_.components_.shape[1]
if X.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d" % (X.shape[1], n_features))
if self.hidden_layer_ == HiddenLayerType.PAIRWISE:
return self.projection_.transform(X) # pairwise projection ignores ufunc
return self.ufunc_(self.projection_.transform(X)) | /scikit_elm-0.21a0-py3-none-any.whl/skelm/hidden_layer.py | 0.875282 | 0.437343 | hidden_layer.py | pypi |
import numpy as np
import scipy as sp
import warnings
from sklearn.exceptions import DataConversionWarning
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils import check_X_y, check_array
from dask import distributed
from dask.distributed import Client, LocalCluster
import dask.dataframe as dd
import dask.array as da
class DaskCholeskySolver(BaseEstimator, RegressorMixin):
"""Out-of-core linear system solver with Dask back-end.
Parameters
----------
alpha : float, non-negative
L2 regularization parameter, larger value means stronger effect. The value may be
increased if the system fails to converge; actual used value stored in `alpha_` parameter.
batch_size : int
Batch size for **samples and features**. Computations proceed on square blocks of data.
For optimal performance, use a number of features that is equal or a bit less than multiple
of a batch size; e.g. 8912 features with 3000 batch size.
swap_dir : str
Directory for temporary storage of Dask data that does not fit in memory. A large and fast
storage is advised, like a local SSD.
Attributes
----------
cluster_ : object
An instance of `dask.distributed.LocalCluster`.
client_ : object
Dask client for running computations.
"""
def __init__(self, alpha=1e-7, batch_size=2000, swap_dir=None):
self.alpha = alpha
self.batch_size = batch_size
self.swap_dir = swap_dir
def _init_dask(self):
self.cluster_ = LocalCluster( n_workers=2, local_dir=self.swap_dir)
self.client_ = Client(self.cluster_)
print("Running on:")
print(self.client_)
def fit(self, X, y):
self.W_ = da.random.normal
return self
def predict(self, X):
return None
class BBvdsnjvlsdnjhbgfndjvksdjkvlndsf(BaseEstimator, RegressorMixin):
def __init__(self, alpha=1e-7):
self.alpha = alpha
def _init_XY(self, X, y):
"""Initialize covariance matrices, including a separate bias term.
"""
d_in = X.shape[1]
self._XtX = np.eye(d_in + 1) * self.alpha
self._XtX[0, 0] = 0
if len(y.shape) == 1:
self._XtY = np.zeros((d_in + 1,))
else:
self._XtY = np.zeros((d_in + 1, y.shape[1]))
@property
def XtY_(self):
return self._XtY
@property
def XtX_(self):
return self._XtX
@XtY_.setter
def XtY_(self, value):
self._XtY = value
@XtX_.setter
def XtX_(self, value):
self._XtX = value
def _solve(self):
"""Second stage of solution (X'X)B = X'Y using Cholesky decomposition.
Sets `is_fitted_` to True.
"""
B = sp.linalg.solve(self._XtX, self._XtY, assume_a='pos', overwrite_a=False, overwrite_b=False)
self.coef_ = B[1:]
self.intercept_ = B[0]
self.is_fitted_ = True
def _reset(self):
"""Erase solution and data matrices.
"""
[delattr(self, attr) for attr in ('_XtX', '_XtY', 'coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]
def fit(self, X, y):
"""Solves an L2-regularized linear system like Ridge regression, overwrites any previous solutions.
"""
self._reset() # remove old solution
self.partial_fit(X, y, compute_output_weights=True)
return self
def partial_fit(self, X, y, compute_output_weights=True):
"""Update model with a new batch of data.
Output weight computation can be temporary turned off for faster processing. This will mark model as
not fit. Enable `compute_output_weights` in the final call to `partial_fit`.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
Training input samples
y : array-like, shape=[n_samples, n_targets]
Training targets
compute_output_weights : boolean, optional, default True
Whether to compute new output weights (coef_, intercept_). Disable this in intermediate `partial_fit`
steps to run computations faster, then enable in the last call to compute the new solution.
.. Note::
Solution can be updated without extra data by setting `X=None` and `y=None`.
"""
if self.alpha < 0:
raise ValueError("Regularization parameter alpha must be non-negative.")
# solution only
if X is None and y is None and compute_output_weights:
self._solve()
return self
# validate parameters
X, y = check_X_y(X, y, accept_sparse=True, multi_output=True, y_numeric=True, ensure_2d=True)
if len(y.shape) > 1 and y.shape[1] == 1:
msg = "A column-vector y was passed when a 1d array was expected.\
Please change the shape of y to (n_samples, ), for example using ravel()."
warnings.warn(msg, DataConversionWarning)
# init temporary data storage
if not hasattr(self, '_XtX'):
self._init_XY(X, y)
else:
if X.shape[1] + 1 != self._XtX.shape[0]:
n_new, n_old = X.shape[1], self._XtX.shape[0] - 1
raise ValueError("Number of features %d does not match previous data %d." % (n_new, n_old))
# compute temporary data
X_sum = safe_sparse_dot(X.T, np.ones((X.shape[0],)))
y_sum = safe_sparse_dot(y.T, np.ones((y.shape[0],)))
self._XtX[0, 0] += X.shape[0]
self._XtX[1:, 0] += X_sum
self._XtX[0, 1:] += X_sum
self._XtX[1:, 1:] += X.T @ X
self._XtY[0] += y_sum
self._XtY[1:] += X.T @ y
# solve
if not compute_output_weights:
# mark as not fitted
[delattr(self, attr) for attr in ('coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]
else:
self._solve()
return self
def predict(self, X):
check_is_fitted(self, 'is_fitted_')
X = check_array(X, accept_sparse=True)
return safe_sparse_dot(X, self.coef_, dense_output=True) + self.intercept_ | /scikit_elm-0.21a0-py3-none-any.whl/skelm/solver_dask.py | 0.84075 | 0.609292 | solver_dask.py | pypi |
# scikit-embeddings
Utilites for training word, document and sentence embeddings in scikit-learn pipelines.
## Features
- Train Word, Paragraph or Sentence embeddings in scikit-learn compatible pipelines.
- Stream texts easily from disk and chunk them so you can use large datasets for training embeddings.
- spaCy tokenizers with lemmatization, stop word removal and augmentation with POS-tags/Morphological information etc. for highest quality embeddings for literary analysis.
- Fast and performant trainable tokenizer components from `tokenizers`.
- Easy to integrate components and pipelines in your scikit-learn workflows and machine learning pipelines.
- Easy serialization and integration with HugginFace Hub for quickly publishing your embedding pipelines.
### What scikit-embeddings is not for:
- Using pretrained embeddings in scikit-learn pipelines (for these purposes I recommend [embetter](https://github.com/koaning/embetter/tree/main))
- Training transformer models and deep neural language models (if you want to do this, do it with [transformers](https://huggingface.co/docs/transformers/index))
## Examples
### Streams
scikit-embeddings comes with a handful of utilities for streaming data from disk or other sources,
chunking and filtering. Here's an example of how you would go about obtaining chunks of text from jsonl files with a "content field".
```python
from skembedding.streams import Stream
# let's say you have a list of file paths
files: list[str] = [...]
# Stream text chunks from jsonl files with a 'content' field.
text_chunks = (
Stream(files)
.read_files(lines=True)
.json()
.grab("content")
.chunk(10_000)
)
```
### Word Embeddings
You can train classic vanilla word embeddings by building a pipeline that contains a `WordLevel` tokenizer and an embedding model:
```python
from skembedding.tokenizers import WordLevelTokenizer
from skembedding.models import Word2VecEmbedding
from skembeddings.pipeline import EmbeddingPipeline
embedding_pipe = EmbeddingPipeline(
WordLevelTokenizer(),
Word2VecEmbedding(n_components=100, algorithm="cbow")
)
embedding_pipe.fit(texts)
```
### Fasttext-like
You can train an embedding pipeline that uses subword information by using a tokenizer that does that.
You may want to use `Unigram`, `BPE` or `WordPiece` for these purposes.
Fasttext also uses skip-gram by default so let's change to that.
```python
from skembedding.tokenizers import UnigramTokenizer
from skembedding.models import Word2VecEmbedding
from skembeddings.pipeline import EmbeddingPipeline
embedding_pipe = EmbeddingPipeline(
UnigramTokenizer(),
Word2VecEmbedding(n_components=250, algorithm="sg")
)
embedding_pipe.fit(texts)
```
### Sense2Vec
We provide a spaCy tokenizer that can lemmatize tokens and append morphological information so you can get fine-grained
semantic information even on relatively small corpora. I recommend using this for literary analysis.
```python
from skembeddings.models import Word2VecEmbedding
from skembeddings.tokenizers import SpacyTokenizer
from skembeddings.pipeline import EmbeddingPipeline
# Single token pattern that lets alphabetical tokens pass, but not stopwords
pattern = [[{"IS_ALPHA": True, "IS_STOP": False}]]
# Build tokenizer that lemmatizes and appends POS-tags to the lemmas
tokenizer = SpacyTokenizer(
"en_core_web_sm",
out_attrs=("LEMMA", "UPOS"),
patterns=pattern,
)
# Build a pipeline
embedding_pipeline = EmbeddingPipeline(
tokenizer,
Word2VecEmbedding(50, algorithm="cbow")
)
# Fitting pipeline on corpus
embedding_pipeline.fit(corpus)
```
### Paragraph Embeddings
You can train Doc2Vec paragpraph embeddings with the chosen choice of tokenization.
```python
from skembedding.tokenizers import WordPieceTokenizer
from skembedding.models import ParagraphEmbedding
from skembeddings.pipeline import EmbeddingPipeline
embedding_pipe = EmbeddingPipeline(
WordPieceTokenizer(),
ParagraphEmbedding(n_components=250, algorithm="dm")
)
embedding_pipe.fit(texts)
```
### Iterative training
In the case of large datasets you can train on individual chunks with `partial_fit()`.
```python
for chunk in text_chunks:
embedding_pipe.partial_fit(chunk)
```
### Serialization
Pipelines can be safely serialized to disk:
```python
embedding_pipe.to_disk("output_folder/")
embedding_pipe = EmbeddingPipeline.from_disk("output_folder/")
```
Or published to HugginFace Hub:
```python
from huggingface_hub import login
login()
embedding_pipe.to_hub("username/name_of_pipeline")
embedding_pipe = EmbeddingPipeline.from_hub("username/name_of_pipeline")
```
### Text Classification
You can include an embedding model in your classification pipelines by adding some classification head.
```python
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
X_train, X_test, y_train, y_test = train_test_split(X, y)
cls_pipe = make_pipeline(embedding_pipe, LogisticRegression())
cls_pipe.fit(X_train, y_train)
y_pred = cls_pipe.predict(X_test)
print(classification_report(y_test, y_pred))
```
### Feature Extraction
If you intend to use the features produced by tokenizers in other text pipelines, such as topic models,
you can use `ListCountVectorizer` or `Joiner`.
Here's an example of an NMF topic model that use lemmata enriched with POS tags.
```python
from sklearn.decomposition import NMF
from sklearn.pipelines import make_pipeline
from sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer
from skembedding.tokenizers import SpacyTokenizer
from skembedding.feature_extraction import ListCountVectorizer
from skembedding.preprocessing import Joiner
# Single token pattern that lets alphabetical tokens pass, but not stopwords
pattern = [[{"IS_ALPHA": True, "IS_STOP": False}]]
# Build tokenizer that lemmatizes and appends POS-tags to the lemmas
tokenizer = SpacyTokenizer(
"en_core_web_sm",
out_attrs=("LEMMA", "UPOS"),
patterns=pattern,
)
# Example with ListCountVectorizer
topic_pipeline = make_pipeline(
tokenizer,
ListCountVectorizer(),
TfidfTransformer(), # tf-idf weighting (optional)
NMF(15), # 15 topics in the model
)
# Alternatively you can just join the tokens together with whitespace
topic_pipeline = make_pipeline(
tokenizer,
Joiner(),
TfidfVectorizer(),
NMF(15),
)
```
| /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/README.md | 0.762954 | 0.936576 | README.md | pypi |
import tempfile
from pathlib import Path
from typing import Union
from confection import Config, registry
from huggingface_hub import HfApi, snapshot_download
from sklearn.pipeline import Pipeline
# THIS IS IMPORTANT DO NOT REMOVE
from skembeddings import models, tokenizers
from skembeddings._hub import DEFAULT_README
from skembeddings.base import Serializable
class EmbeddingPipeline(Pipeline):
def __init__(
self,
tokenizer: Serializable,
model: Serializable,
frozen: bool = False,
):
self.tokenizer = tokenizer
self.model = model
self.frozen = frozen
steps = [("tokenizer_model", tokenizer), ("embedding_model", model)]
super().__init__(steps=steps)
def freeze(self):
self.frozen = True
return self
def unfreeze(self):
self.frozen = False
return self
def fit(self, X, y=None, **kwargs):
if self.frozen:
return self
super().fit(X, y=y, **kwargs)
def partial_fit(self, X, y=None, classes=None, **kwargs):
"""
Fits the components, but allow for batches.
"""
if self.frozen:
return self
for name, step in self.steps:
if not hasattr(step, "partial_fit"):
raise ValueError(
f"Step {name} is a {step} which does"
"not have `.partial_fit` implemented."
)
for name, step in self.steps:
if hasattr(step, "predict"):
step.partial_fit(X, y, classes=classes, **kwargs)
else:
step.partial_fit(X, y)
if hasattr(step, "transform"):
X = step.transform(X)
return self
@property
def config(self) -> Config:
embedding: Serializable = self["embedding_model"] # type: ignore
tokenizer: Serializable = self["tokenizer_model"] # type: ignore
return tokenizer.config.merge(embedding.config)
def to_disk(self, path: Union[str, Path]) -> None:
embedding: Serializable = self["embedding_model"] # type: ignore
tokenizer: Serializable = self["tokenizer_model"] # type: ignore
path = Path(path)
path.mkdir(exist_ok=True)
config_path = path.joinpath("config.cfg")
tokenizer_path = path.joinpath("tokenizer.bin")
embedding_path = path.joinpath("embedding.bin")
with open(embedding_path, "wb") as embedding_file:
embedding_file.write(embedding.to_bytes())
with open(tokenizer_path, "wb") as tokenizer_file:
tokenizer_file.write(tokenizer.to_bytes())
self.config.to_disk(config_path)
@classmethod
def from_disk(cls, path: Union[str, Path]) -> "EmbeddingPipeline":
path = Path(path)
config_path = path.joinpath("config.cfg")
tokenizer_path = path.joinpath("tokenizer.bin")
embedding_path = path.joinpath("embedding.bin")
config = Config().from_disk(config_path)
resolved = registry.resolve(config)
with open(tokenizer_path, "rb") as tokenizer_file:
tokenizer = resolved["tokenizer"].from_bytes(tokenizer_file.read())
with open(embedding_path, "rb") as embedding_file:
embedding = resolved["embedding"].from_bytes(embedding_file.read())
return cls(tokenizer, embedding)
def to_hub(self, repo_id: str, add_readme: bool = True) -> None:
api = HfApi()
api.create_repo(repo_id, exist_ok=True)
with tempfile.TemporaryDirectory() as tmp_dir:
self.to_disk(tmp_dir)
if add_readme:
with open(
Path(tmp_dir).joinpath("README.md"), "w"
) as readme_f:
readme_f.write(DEFAULT_README.format(repo=repo_id))
api.upload_folder(
folder_path=tmp_dir, repo_id=repo_id, repo_type="model"
)
@classmethod
def from_hub(cls, repo_id: str) -> "EmbeddingPipeline":
in_dir = snapshot_download(repo_id=repo_id)
res = cls.from_disk(in_dir)
return res.freeze() | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/pipeline.py | 0.817829 | 0.197212 | pipeline.py | pypi |
from abc import ABC, abstractmethod
from typing import Iterable
from confection import Config, registry
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from tokenizers import Tokenizer
from tokenizers.models import BPE, Unigram, WordLevel, WordPiece
from tokenizers.normalizers import BertNormalizer, Normalizer
from tokenizers.pre_tokenizers import ByteLevel, Whitespace
from tokenizers.trainers import (
BpeTrainer,
Trainer,
UnigramTrainer,
WordLevelTrainer,
WordPieceTrainer,
)
from skembeddings.base import Serializable
class HuggingFaceTokenizerBase(
BaseEstimator, TransformerMixin, Serializable, ABC
):
def __init__(self, normalizer: Normalizer = BertNormalizer()):
self.tokenizer = None
self.trainer = None
self.normalizer = normalizer
@abstractmethod
def _init_tokenizer(self) -> Tokenizer:
pass
@abstractmethod
def _init_trainer(self) -> Trainer:
pass
def fit(self, X: Iterable[str], y=None):
self.tokenizer = self._init_tokenizer()
self.trainer = self._init_trainer()
self.tokenizer.train_from_iterator(X, self.trainer)
return self
def partial_fit(self, X: Iterable[str], y=None):
if (self.tokenizer is None) or (self.trainer is None):
self.fit(X)
else:
new_tokenizer = self._init_tokenizer()
new_tokenizer.train_from_iterator(X, self.trainer)
new_vocab = new_tokenizer.get_vocab()
self.tokenizer.add_tokens(new_vocab)
return self
def transform(self, X: Iterable[str]) -> list[list[str]]:
if self.tokenizer is None:
raise NotFittedError("Tokenizer has not been trained yet.")
if isinstance(X, str):
raise TypeError(
"str passed instead of iterable, did you mean to pass [X]?"
)
res = []
for text in X:
encoding = self.tokenizer.encode(text)
res.append(encoding.tokens)
return res
def get_feature_names_out(self, input_features=None):
return None
def to_bytes(self) -> bytes:
if self.tokenizer is None:
raise NotFittedError(
"Tokenizer has not been fitted, cannot serialize."
)
return self.tokenizer.to_str().encode("utf-8")
def from_bytes(self, data: bytes):
tokenizer = Tokenizer.from_str(data.decode("utf-8"))
self.tokenizer = tokenizer
return self
class WordPieceTokenizer(HuggingFaceTokenizerBase):
def _init_tokenizer(self) -> Tokenizer:
tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
tokenizer.pre_tokenizer = Whitespace()
tokenizer.normalizer = self.normalizer
return tokenizer
def _init_trainer(self) -> Trainer:
return WordPieceTrainer(special_tokens=["[UNK]"])
@property
def config(self) -> Config:
return Config(
{
"tokenizer": {
"@tokenizers": "wordpiece_tokenizer.v1",
}
}
)
@classmethod
def from_config(cls, config: Config) -> "WordPieceTokenizer":
resolved = registry.resolve(config)
return resolved["tokenizer"]
class WordLevelTokenizer(HuggingFaceTokenizerBase):
def _init_tokenizer(self) -> Tokenizer:
tokenizer = Tokenizer(WordLevel(unk_token="[UNK]"))
tokenizer.pre_tokenizer = Whitespace()
tokenizer.normalizer = self.normalizer
return tokenizer
def _init_trainer(self) -> Trainer:
return WordLevelTrainer(special_tokens=["[UNK]"])
@property
def config(self) -> Config:
return Config(
{
"tokenizer": {
"@tokenizers": "word_level_tokenizer.v1",
}
}
)
@classmethod
def from_config(cls, config: Config) -> "WordLevelTokenizer":
resolved = registry.resolve(config)
return resolved["tokenizer"]
class UnigramTokenizer(HuggingFaceTokenizerBase):
def _init_tokenizer(self) -> Tokenizer:
tokenizer = Tokenizer(Unigram())
tokenizer.pre_tokenizer = ByteLevel()
tokenizer.normalizer = self.normalizer
return tokenizer
def _init_trainer(self) -> Trainer:
return UnigramTrainer(unk_token="[UNK]", special_tokens=["[UNK]"])
@property
def config(self) -> Config:
return Config(
{
"tokenizer": {
"@tokenizers": "unigram_tokenizer.v1",
}
}
)
@classmethod
def from_config(cls, config: Config) -> "UnigramTokenizer":
resolved = registry.resolve(config)
return resolved["tokenizer"]
class BPETokenizer(HuggingFaceTokenizerBase):
def _init_tokenizer(self) -> Tokenizer:
tokenizer = Tokenizer(BPE(unk_token="[UNK]"))
tokenizer.pre_tokenizer = ByteLevel()
tokenizer.normalizer = self.normalizer
return tokenizer
def _init_trainer(self) -> Trainer:
return BpeTrainer(special_tokens=["[UNK]"])
@property
def config(self) -> Config:
return Config(
{
"tokenizer": {
"@tokenizers": "bpe_tokenizer.v1",
}
}
)
@classmethod
def from_config(cls, config: Config) -> "BPETokenizer":
resolved = registry.resolve(config)
return resolved["tokenizer"] | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/tokenizers/_huggingface.py | 0.893655 | 0.183832 | _huggingface.py | pypi |
from typing import Any, Iterable, Optional, Union
import spacy
from sklearn.base import BaseEstimator, TransformerMixin
from spacy.language import Language
from spacy.matcher import Matcher
from spacy.tokens import Doc, Token
from skembeddings.base import Serializable
# We create a new extension on tokens.
if not Token.has_extension("filter_pass"):
Token.set_extension("filter_pass", default=False)
ATTRIBUTES = {
"ORTH": "orth_",
"NORM": "norm_",
"LEMMA": "lemma_",
"UPOS": "pos_",
"TAG": "tag_",
"DEP": "dep_",
"LOWER": "lower_",
"SHAPE": "shape_",
"ENT_TYPE": "ent_type_",
}
class SpacyTokenizer(BaseEstimator, TransformerMixin, Serializable):
tokenizer_type_ = "spacy_tokenizer"
def __init__(
self,
model: Union[str, Language] = "en_core_web_sm",
patterns: Optional[list[list[dict[str, Any]]]] = None,
out_attrs: Iterable[str] = ("NORM",),
):
self.model = model
if isinstance(model, Language):
self.nlp = model
elif isinstance(model, str):
self.nlp = spacy.load(model)
else:
raise TypeError(
"'model' either has to be a spaCy"
"nlp object or the name of a model."
)
self.patterns = patterns
self.out_attrs = tuple(out_attrs)
for attr in self.out_attrs:
if attr not in ATTRIBUTES:
raise ValueError(f"{attr} is not a valid out attribute.")
self.matcher = Matcher(self.nlp.vocab)
self.matcher.add(
"FILTER_PASS",
patterns=[] if self.patterns is None else self.patterns,
)
def fit(self, X, y=None):
"""Exists for compatiblity, doesn't do anything."""
return self
def partial_fit(self, X, y=None):
"""Exists for compatiblity, doesn't do anything."""
return self
def label_matching_tokens(self, docs: list[Doc]):
"""Labels tokens that match one of the given patterns."""
for doc in docs:
if self.patterns is not None:
matches = self.matcher(doc)
else:
matches = [(None, 0, len(doc))]
for _, start, end in matches:
for token in doc[start:end]:
token._.set("filter_pass", True)
def token_to_str(self, token: Token) -> str:
"""Returns textual representation of token."""
attributes = [
getattr(token, ATTRIBUTES[attr]) for attr in self.out_attrs
]
return "|".join(attributes)
def transform(self, X: Iterable[str]) -> list[list[str]]:
if isinstance(X, str):
raise TypeError(
"str passed instead of iterable, did you mean to pass [X]?"
)
docs = list(self.nlp.pipe(X))
# Label all tokens according to the patterns.
self.label_matching_tokens(docs)
res: list[list[str]] = []
for doc in docs:
tokens = [
self.token_to_str(token)
for token in doc
if token._.filter_pass
]
res.append(tokens)
return res
def get_feature_names_out(self, input_features=None):
return None | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/tokenizers/spacy.py | 0.905659 | 0.204025 | spacy.py | pypi |
import tempfile
from typing import Iterable, Literal
import numpy as np
from confection import Config, registry
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from sklearn.utils import murmurhash3_32
from skembeddings.base import Serializable
from skembeddings.streams.utils import deeplist
def _tag_enumerate(docs: Iterable[list[str]]) -> list[TaggedDocument]:
"""Tags documents with their integer positions."""
return [TaggedDocument(doc, [i]) for i, doc in enumerate(docs)]
class ParagraphEmbedding(BaseEstimator, TransformerMixin, Serializable):
"""Scikit-learn compatible Doc2Vec model."""
def __init__(
self,
n_components: int = 100,
window: int = 5,
algorithm: Literal["dm", "dbow"] = "dm",
tagging_scheme: Literal["hash", "closest"] = "hash",
max_docs: int = 100_000,
epochs: int = 10,
random_state: int = 0,
negative: int = 5,
ns_exponent: float = 0.75,
dm_agg: Literal["mean", "sum", "concat"] = "mean",
dm_tag_count: int = 1,
dbow_words: bool = False,
sample: float = 0.001,
hs: bool = False,
batch_words: int = 10000,
shrink_windows: bool = True,
learning_rate: float = 0.025,
min_learning_rate: float = 0.0001,
n_jobs: int = 1,
):
self.model_ = None
self.loss_: list[float] = []
self.seen_docs_ = 0
if tagging_scheme not in ["hash", "closest"]:
raise ValueError(
"Tagging scheme should either be 'hash' or 'closest'"
)
self.algorithm = algorithm
self.max_docs = max_docs
self.n_components = n_components
self.n_jobs = n_jobs
self.window = window
self.tagging_scheme = tagging_scheme
self.epochs = epochs
self.random_state = random_state
self.negative = negative
self.ns_exponent = ns_exponent
self.dm_agg = dm_agg
self.dm_tag_count = dm_tag_count
self.dbow_words = dbow_words
self.sample = sample
self.hs = hs
self.batch_words = batch_words
self.shrink_windows = shrink_windows
self.learning_rate = learning_rate
self.min_learning_rate = min_learning_rate
def _tag_documents(
self, documents: list[list[str]]
) -> list[TaggedDocument]:
if self.model_ is None:
raise TypeError(
"You should not call _tag_documents"
"before model is initialised."
)
res = []
for document in documents:
# While we have available slots we just add new documents to those
if self.seen_docs_ < self.max_docs:
res.append(TaggedDocument(document, [self.seen_docs_]))
else:
# If we run out, we choose a tag based on a scheme
if self.tagging_scheme == "hash":
# Here we use murmur hash
hash = murmurhash3_32("".join(document))
id = hash % self.max_docs
res.append(TaggedDocument(document, [id]))
elif self.tagging_scheme == "closest":
# We obtain the key of the most semantically
# similar document and use that.
doc_vector = self.model_.infer_vector(document)
key, _ = self.model_.dv.similar_by_key(doc_vector, topn=1)[
0
]
res.append(TaggedDocument(document, [key]))
else:
raise ValueError(
"Tagging scheme should either be 'hash' or 'closest'"
f" but {self.tagging_scheme} was provided."
)
self.seen_docs_ += 1
return res
def _init_model(self, docs=None) -> Doc2Vec:
return Doc2Vec(
documents=docs,
vector_size=self.n_components,
min_count=0,
alpha=self.learning_rate,
window=self.window,
sample=self.sample,
seed=self.random_state,
workers=self.n_jobs,
min_alpha=self.min_learning_rate,
dm=int(self.algorithm == "dm"),
dm_mean=int(self.dm_agg == "mean"),
dm_concat=int(self.dm_agg == "concat"),
dbow_words=int(self.dbow_words),
dm_tag_count=self.dm_tag_count,
hs=int(self.hs),
negative=self.negative,
ns_exponent=self.ns_exponent,
epochs=self.epochs,
trim_rule=None,
batch_words=self.batch_words,
compute_loss=True,
shrink_windows=self.shrink_windows,
)
def _append_loss(self):
self.loss_.append(self.model_.get_latest_training_loss()) # type: ignore
def fit(self, X: Iterable[Iterable[str]], y=None):
"""Fits a new doc2vec model to the given documents."""
self.seen_docs_ = 0
# Forcing evaluation
X_eval: list[list[str]] = deeplist(X)
n_docs = len(X_eval)
if self.max_docs < n_docs:
init_batch = _tag_enumerate(X_eval[: self.max_docs])
self.model_ = self._init_model(init_batch)
self._append_loss()
self.partial_fit(X_eval[self.max_docs :])
return self
docs = _tag_enumerate(X_eval)
self.model_ = self._init_model(docs)
self._append_loss()
return self
def partial_fit(self, X: Iterable[Iterable[str]], y=None):
"""Partially fits doc2vec model (online fitting)."""
# Force evaluation on iterable
X_eval: list[list[str]] = deeplist(X)
if self.model_ is None:
self.fit(X_eval)
return self
# We obtained tagged documents
tagged_docs = self._tag_documents(X_eval)
# Then build vocabulary
self.model_.build_vocab(tagged_docs, update=True)
self.model_.train(
tagged_docs,
total_examples=self.model_.corpus_count,
epochs=1,
compute_loss=True,
)
self._append_loss()
return self
def transform(self, X: Iterable[Iterable[str]]) -> np.ndarray:
"""Infers vectors for all of the given documents."""
if self.model_ is None:
raise NotFittedError(
"Model ha been not fitted, please fit before inference."
)
vectors = [self.model_.infer_vector(list(doc)) for doc in X]
return np.stack(vectors)
@property
def components_(self) -> np.ndarray:
if self.model_ is None:
raise NotFittedError("Model has not been fitted yet.")
return np.array(self.model_.dv.vectors).T
def to_bytes(self) -> bytes:
if self.model_ is None:
raise NotFittedError(
"Can't save model if it hasn't been fitted yet."
)
with tempfile.NamedTemporaryFile(prefix="gensim-model-") as tmp:
temporary_filepath = tmp.name
self.model_.save(temporary_filepath)
with open(temporary_filepath, "rb") as temp_buffer:
return temp_buffer.read()
def from_bytes(self, data: bytes) -> "ParagraphEmbedding":
with tempfile.NamedTemporaryFile(prefix="gensim-model-") as tmp:
tmp.write(data)
model = Doc2Vec.load(tmp.name)
self.model_ = model
return self
@property
def config(self) -> Config:
return Config(
{
"embedding": {
"@models": "paragraph_embedding.v1",
**self.get_params(),
}
}
)
@classmethod
def from_config(cls, config: Config) -> "ParagraphEmbedding":
resolved = registry.resolve(config)
return resolved["embedding"] | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/models/doc2vec.py | 0.863147 | 0.212784 | doc2vec.py | pypi |
import tempfile
from typing import Iterable, Literal
import numpy as np
from confection import Config, registry
from gensim.models import KeyedVectors, Word2Vec
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from skembeddings.base import Serializable
from skembeddings.streams.utils import deeplist
class Word2VecEmbedding(BaseEstimator, TransformerMixin, Serializable):
def __init__(
self,
n_components: int = 100,
window: int = 5,
algorithm: Literal["cbow", "sg"] = "cbow",
agg: Literal["mean", "max", "both"] = "mean",
epochs: int = 5,
random_state: int = 0,
negative: int = 5,
ns_exponent: float = 0.75,
cbow_agg: Literal["mean", "sum"] = "mean",
sample: float = 0.001,
hs: bool = False,
batch_words: int = 10000,
shrink_windows: bool = True,
learning_rate: float = 0.025,
min_learning_rate: float = 0.0001,
n_jobs: int = 1,
):
self.agg = agg
self.n_components = n_components
self.n_jobs = n_jobs
self.window = window
self.algorithm = algorithm
self.random_state = random_state
self.learning_rate = learning_rate
self.min_learning_rate = min_learning_rate
self.negative = negative
self.ns_exponent = ns_exponent
self.cbow_agg = cbow_agg
self.sample = sample
self.hs = hs
self.batch_words = batch_words
self.shrink_windows = shrink_windows
self.epochs = epochs
self.model_ = None
self.loss_: list[float] = []
self.n_features_out = (
self.n_components if agg != "both" else self.n_components * 2
)
def _init_model(self, sentences=None) -> Word2Vec:
return Word2Vec(
sentences=sentences,
vector_size=self.n_components,
min_count=0,
alpha=self.learning_rate,
window=self.window,
sample=self.sample,
seed=self.random_state,
workers=self.n_jobs,
min_alpha=self.min_learning_rate,
sg=int(self.algorithm == "sg"),
hs=int(self.hs),
negative=self.negative,
ns_exponent=self.ns_exponent,
cbow_mean=int(self.cbow_agg == "mean"),
epochs=self.epochs,
trim_rule=None,
batch_words=self.batch_words,
compute_loss=True,
shrink_windows=self.shrink_windows,
)
def fit(self, X: Iterable[Iterable[str]], y=None):
self._check_inputs(X)
X = deeplist(X)
self.loss_ = []
self.model_ = self._init_model(sentences=X)
self.loss_.append(self.model_.get_latest_training_loss())
return self
def partial_fit(self, X: Iterable[Iterable[str]], y=None):
self._check_inputs(X)
X = deeplist(X)
if self.model_ is None:
self.fit(X, y)
else:
self.model_.build_vocab(X, update=True)
self.model_.train(
X,
total_examples=self.model_.corpus_count,
epochs=self.model_.epochs,
comput_loss=True,
)
self.loss_.append(self.model_.get_latest_training_loss())
return self
def _check_inputs(self, X):
options = ["mean", "max", "both"]
if self.agg not in options:
raise ValueError(
f"The `agg` value must be in {options}. Got {self.agg}."
)
def _collect_vectors_single(self, tokens: list[str]) -> np.ndarray:
embeddings = []
for token in tokens:
try:
embeddings.append(self.model_.wv[token]) # type: ignore
except KeyError:
continue
if not embeddings:
return np.full((1, self.n_features_out), np.nan)
return np.stack(embeddings)
def transform(self, X: Iterable[Iterable[str]], y=None):
"""Transforms the phrase text into a numeric
representation using word embeddings."""
self._check_inputs(X)
X: list[list[str]] = deeplist(X)
embeddings = np.empty((len(X), self.n_features_out))
for i_doc, doc in enumerate(X):
if not len(doc):
embeddings[i_doc, :] = np.nan
doc_vectors = self._collect_vectors_single(doc)
if self.agg == "mean":
embeddings[i_doc, :] = np.mean(doc_vectors, axis=0)
elif self.agg == "max":
embeddings[i_doc, :] = np.max(doc_vectors, axis=0)
elif self.agg == "both":
mean_vector = np.mean(doc_vectors, axis=0)
max_vector = np.max(doc_vectors, axis=0)
embeddings[i_doc, :] = np.concatenate(
(mean_vector, max_vector)
)
return embeddings
@property
def keyed_vectors(self) -> KeyedVectors:
if self.model_ is None:
raise NotFittedError(
"Can't access keyed vectors, model has not been fitted yet."
)
return self.model_.wv
def to_bytes(self) -> bytes:
if self.model_ is None:
raise NotFittedError(
"Can't save model if it hasn't been fitted yet."
)
with tempfile.NamedTemporaryFile(prefix="gensim-model-") as tmp:
temporary_filepath = tmp.name
self.model_.save(temporary_filepath)
with open(temporary_filepath, "rb") as temp_buffer:
return temp_buffer.read()
def from_bytes(self, data: bytes) -> "Word2VecEmbedding":
with tempfile.NamedTemporaryFile(prefix="gensim-model-") as tmp:
tmp.write(data)
model = Word2Vec.load(tmp.name)
self.model_ = model
return self
@property
def config(self) -> Config:
return Config(
{
"embedding": {
"@models": "word2vec_embedding.v1",
**self.get_params(),
}
}
)
@classmethod
def from_config(cls, config: Config) -> "Word2VecEmbedding":
resolved = registry.resolve(config)
return resolved["embedding"] | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/models/word2vec.py | 0.828973 | 0.182753 | word2vec.py | pypi |
import collections
from itertools import islice
from typing import Iterable
import mmh3
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from thinc.api import Adam, CategoricalCrossentropy, Relu, Softmax, chain
from thinc.types import Floats2d
from tqdm import tqdm
from skembeddings.streams.utils import deeplist
def sliding_window(iterable, n):
# sliding_window('ABCDEFG', 4) --> ABCD BCDE CDEF DEFG
it = iter(iterable)
window = collections.deque(islice(it, n), maxlen=n)
if len(window) == n:
yield tuple(window)
for x in it:
window.append(x)
yield tuple(window)
def hash_embed(
tokens: list[str], n_buckets: int, seeds: tuple[int]
) -> np.ndarray:
"""Embeds ids with the bloom hashing trick."""
embedding = np.zeros((len(tokens), n_buckets), dtype=np.float16)
n_seeds = len(seeds)
prob = 1 / n_seeds
for i_token, token in enumerate(tokens):
for seed in seeds:
i_bucket = mmh3.hash(token, seed=seed) % n_buckets
embedding[i_token, i_bucket] = prob
return embedding
class BloomWordEmbedding(BaseEstimator, TransformerMixin):
def __init__(
self,
vector_size: int = 100,
window_size: int = 5,
n_buckets: int = 1000,
n_seeds: int = 4,
epochs: int = 5,
):
self.vector_size = vector_size
self.n_buckets = n_buckets
self.window_size = window_size
self.epochs = epochs
self.encoder = None
self.seeds = tuple(range(n_seeds))
self.n_seeds = n_seeds
def _extract_target_context(
self, docs: list[list[str]]
) -> tuple[list[str], list[str]]:
target: list[str] = []
context: list[str] = []
for doc in docs:
for window in sliding_window(doc, n=self.window_size * 2 + 1):
middle_index = (len(window) - 1) // 2
_target = window[middle_index]
_context = [
token
for i, token in enumerate(window)
if i != middle_index
]
target.extend([_target] * len(_context))
context.extend(_context)
return target, context
def _init_model(self):
self.encoder = Relu(self.vector_size)
self.context_predictor = chain(self.encoder, Softmax(self.n_buckets))
self.loss_calc = CategoricalCrossentropy()
self.optimizer = Adam(
learn_rate=0.001,
beta1=0.9,
beta2=0.999,
eps=1e-08,
L2=1e-6,
grad_clip=1.0,
use_averages=True,
L2_is_weight_decay=True,
)
def _hash_embed(self, tokens: list[str]) -> Floats2d:
ops = self.context_predictor.ops
emb = hash_embed(tokens, self.n_buckets, self.seeds)
return ops.asarray2f(emb)
def _train_batch(self, batch: tuple[list[str], list[str]]):
targets, contexts = batch
_targets = self._hash_embed(targets)
_contexts = self._hash_embed(contexts)
try:
Yh, backprop = self.context_predictor.begin_update(_targets)
except KeyError:
self.context_predictor.initialize(_targets, _contexts)
Yh, backprop = self.context_predictor.begin_update(_targets)
dYh = self.loss_calc.get_grad(Yh, _contexts)
backprop(dYh)
self.context_predictor.finish_update(self.optimizer)
def fit(self, X: Iterable[Iterable[str]], y=None):
X_eval = deeplist(X)
self._init_model()
ops = self.context_predictor.ops
targets, contexts = self._extract_target_context(X_eval)
batches = ops.multibatch(128, targets, contexts, shuffle=True)
for batch in tqdm(batches):
self._train_batch(batch)
return self
def partial_fit(self, X: Iterable[Iterable[str]], y=None):
if self.encoder is None:
return self.fit(X)
X_eval = deeplist(X)
targets, contexts = self._extract_target_context(X_eval)
ops = self.context_predictor.ops
batches = ops.multibatch(128, targets, contexts, shuffle=True)
for batch in batches:
self._train_batch(batch)
return self
def transform(self, X: Iterable[Iterable[str]], y=None) -> np.ndarray:
"""Transforms the phrase text into a numeric
representation using word embeddings."""
if self.encoder is None:
raise NotFittedError(
"Model has not been trained yet, can't transform."
)
ops = self.encoder.ops
X_eval = deeplist(X)
X_new = []
for doc in X_eval:
doc_emb = hash_embed(doc, self.n_buckets, self.seeds)
doc_emb = ops.asarray2f(doc_emb) # type: ignore
doc_vecs = ops.to_numpy(self.encoder.predict(doc_emb))
X_new.append(np.nanmean(doc_vecs, axis=0))
return np.stack(X_new) | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/models/bloom.py | 0.855791 | 0.228028 | bloom.py | pypi |
from typing import Literal
from confection import registry
from skembeddings.error import NotInstalled
try:
from skembeddings.models.word2vec import Word2VecEmbedding
except ModuleNotFoundError:
Word2VecEmbedding = NotInstalled("Word2VecEmbedding", "gensim")
try:
from skembeddings.models.doc2vec import ParagraphEmbedding
except ModuleNotFoundError:
ParagraphEmbedding = NotInstalled("ParagraphEmbedding", "gensim")
@registry.models.register("word2vec_embedding.v1")
def make_word2vec_embedding(
n_components: int = 100,
window: int = 5,
algorithm: Literal["cbow", "sg"] = "cbow",
agg: Literal["mean", "max", "both"] = "mean",
epochs: int = 5,
random_state: int = 0,
negative: int = 5,
ns_exponent: float = 0.75,
cbow_agg: Literal["mean", "sum"] = "mean",
sample: float = 0.001,
hs: bool = False,
batch_words: int = 10000,
shrink_windows: bool = True,
learning_rate: float = 0.025,
min_learning_rate: float = 0.0001,
n_jobs: int = 1,
):
return Word2VecEmbedding(
n_components=n_components,
window=window,
algorithm=algorithm,
agg=agg,
epochs=epochs,
random_state=random_state,
negative=negative,
ns_exponent=ns_exponent,
cbow_agg=cbow_agg,
sample=sample,
hs=hs,
batch_words=batch_words,
shrink_windows=shrink_windows,
learning_rate=learning_rate,
min_learning_rate=min_learning_rate,
n_jobs=n_jobs,
)
@registry.models.register("paragraph_embedding.v1")
def make_paragraph_embedding(
n_components: int = 100,
window: int = 5,
algorithm: Literal["dm", "dbow"] = "dm",
tagging_scheme: Literal["hash", "closest"] = "hash",
max_docs: int = 100_000,
epochs: int = 10,
random_state: int = 0,
negative: int = 5,
ns_exponent: float = 0.75,
dm_agg: Literal["mean", "sum", "concat"] = "mean",
dm_tag_count: int = 1,
dbow_words: bool = False,
sample: float = 0.001,
hs: bool = False,
batch_words: int = 10000,
shrink_windows: bool = True,
learning_rate: float = 0.025,
min_learning_rate: float = 0.0001,
n_jobs: int = 1,
):
return ParagraphEmbedding(
n_components=n_components,
window=window,
algorithm=algorithm,
tagging_scheme=tagging_scheme,
max_docs=max_docs,
epochs=epochs,
random_state=random_state,
negative=negative,
ns_exponent=ns_exponent,
dm_agg=dm_agg,
dm_tag_count=dm_tag_count,
dbow_words=dbow_words,
sample=sample,
hs=hs,
batch_words=batch_words,
shrink_windows=shrink_windows,
learning_rate=learning_rate,
min_learning_rate=min_learning_rate,
n_jobs=n_jobs,
)
__all__ = ["Word2VecEmbedding", "ParagraphEmbedding"] | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/models/__init__.py | 0.791781 | 0.164315 | __init__.py | pypi |
from typing import Iterable, Literal, Union
import numpy as np
from gensim.models import KeyedVectors
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.cluster import MiniBatchKMeans
from sklearn.exceptions import NotFittedError
from tqdm import tqdm
from skembeddings.streams.utils import deeplist
class VlaweEmbedding(BaseEstimator, TransformerMixin):
"""Scikit-learn compatible VLAWE model."""
def __init__(
self,
word_embeddings: Union[TransformerMixin, KeyedVectors],
prefit: bool = False,
n_clusters: int = 10,
):
self.word_embeddings = word_embeddings
self.prefit = prefit
self.kmeans = None
self.n_clusters = n_clusters
def _collect_vectors_single(self, tokens: list[str]) -> np.ndarray:
if isinstance(self.word_embeddings, KeyedVectors):
kv = self.word_embeddings
embeddings = []
for token in tokens:
try:
embeddings.append(kv[token]) # type: ignore
except KeyError:
continue
if not embeddings:
return np.full((1, kv.vector_size), np.nan)
return np.stack(embeddings)
else:
return self.word_embeddings.transform(tokens)
def _infer_single(self, doc: list[str]) -> np.ndarray:
if self.kmeans is None:
raise NotFittedError(
"Embeddings have not been fitted yet, can't infer."
)
vectors = self._collect_vectors_single(doc)
residuals = []
for centroid in self.kmeans.cluster_centers_:
residual = np.sum(vectors - centroid, axis=0)
residuals.append(residual)
return np.concatenate(residuals)
def fit(self, X: Iterable[Iterable[str]], y=None):
"""Fits a model to the given documents."""
X_eval = deeplist(X)
if (
not isinstance(self.word_embeddings, KeyedVectors)
and not self.prefit
):
print("Fitting word embeddings")
self.word_embeddings.fit(X_eval)
print("Collecting vectors")
all_vecs = np.concatenate(
[self._collect_vectors_single(doc) for doc in X_eval]
)
print("Fitting Kmeans")
self.kmeans = MiniBatchKMeans(n_clusters=self.n_clusters)
self.kmeans.fit(all_vecs)
return self
def partial_fit(self, X: Iterable[Iterable[str]], y=None):
"""Partially fits model (online fitting)."""
if self.kmeans is None:
return self.fit(X)
X_eval = deeplist(X)
if (
not isinstance(self.word_embeddings, KeyedVectors)
and not self.prefit
):
self.word_embeddings.partial_fit(X_eval)
all_vecs = np.concatenate(
[self._collect_vectors_single(doc) for doc in X_eval]
)
self.kmeans.partial_fit(all_vecs)
return self
def transform(self, X: Iterable[Iterable[str]]) -> np.ndarray:
"""Infers vectors for all of the given documents."""
vectors = [self._infer_single(doc) for doc in tqdm(deeplist(X))]
return np.stack(vectors) | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/models/vlawe.py | 0.883808 | 0.287893 | vlawe.py | pypi |
import functools
import random
from itertools import islice
from typing import Callable, Iterable, List, Literal, Optional, TypeVar
from sklearn.base import BaseEstimator
def filter_batches(
chunks: Iterable[list], estimator: BaseEstimator, prefit: bool
) -> Iterable[list]:
for chunk in chunks:
if prefit:
predictions = estimator.predict(chunk) # type: ignore
else:
predictions = estimator.fit_predict(chunk) # type: ignore
passes = predictions != -1
filtered_chunk = [elem for elem, _pass in zip(chunk, passes) if _pass]
yield filtered_chunk
def pipe_streams(*transforms: Callable) -> Callable:
"""Pipes iterator transformations together.
Parameters
----------
*transforms: Callable
Generator funcitons that transform an iterable into another iterable.
Returns
-------
Callable
Generator function composing all of the other ones.
"""
def _pipe(x: Iterable) -> Iterable:
for f in transforms:
x = f(x)
return x
return _pipe
def reusable(gen_func: Callable) -> Callable:
"""
Function decorator that turns your generator function into an
iterator, thereby making it reusable.
Parameters
----------
gen_func: Callable
Generator function, that you want to be reusable
Returns
----------
_multigen: Callable
Sneakily created iterator class wrapping the generator function
"""
@functools.wraps(gen_func, updated=())
class _multigen:
def __init__(self, *args, limit=None, **kwargs):
self.__args = args
self.__kwargs = kwargs
self.limit = limit
# functools.update_wrapper(self, gen_func)
def __iter__(self):
if self.limit is not None:
return islice(
gen_func(*self.__args, **self.__kwargs), self.limit
)
return gen_func(*self.__args, **self.__kwargs)
return _multigen
U = TypeVar("U")
def chunk(
iterable: Iterable[U], chunk_size: int, sample_size: Optional[int] = None
) -> Iterable[List[U]]:
"""
Generator function that chunks an iterable for you.
Parameters
----------
iterable: Iterable of T
The iterable you'd like to chunk.
chunk_size: int
The size of chunks you would like to get back
sample_size: int or None, default None
If specified the yielded lists will be randomly sampled with the buffer
with replacement. Sample size determines how big you want
those lists to be.
Yields
------
buffer: list of T
sample_size or chunk_size sized lists chunked from
the original iterable.
"""
buffer = []
for index, elem in enumerate(iterable):
buffer.append(elem)
if (index % chunk_size == (chunk_size - 1)) and (index != 0):
if sample_size is None:
yield buffer
else:
yield random.choices(buffer, k=sample_size)
buffer = []
def stream_files(
paths: Iterable[str],
lines: bool = False,
not_found_action: Literal["exception", "none", "drop"] = "exception",
) -> Iterable[Optional[str]]:
"""Streams text contents from files on disk.
Parameters
----------
paths: iterable of str
Iterable of file paths on disk.
lines: bool, default False
Indicates whether you want to get a stream over lines
or file contents.
not_found_action: {'exception', 'none', 'drop'}, default 'exception'
Indicates what should happen if a file was not found.
'exception' propagates the exception to top level, 'none' yields
None for each file that fails, 'drop' ignores them completely.
Yields
------
str or None
File contents or lines in files if lines is True.
Can only yield None if not_found_action is 'none'.
"""
for path in paths:
try:
with open(path) as in_file:
if lines:
for line in in_file:
yield line
else:
yield in_file.read()
except FileNotFoundError as e:
if not_found_action == "exception":
raise FileNotFoundError(
f"Streaming failed as file {path} could not be found"
) from e
elif not_found_action == "none":
yield None
elif not_found_action == "drop":
continue
else:
raise ValueError(
"""Unrecognized `not_found_action`.
Please chose one of `"exception", "none", "drop"`"""
)
def flatten_stream(nested: Iterable, axis: int = 1) -> Iterable:
"""Turns nested stream into a flat stream.
If multiple levels are nested, the iterable will be flattenned along
the given axis.
To match the behaviour of Awkward Array flattening, axis=0 only
removes None elements from the array along the outermost axis.
Negative axis values are not yet supported.
Parameters
----------
nested: iterable
Iterable of iterables of unknown depth.
axis: int, default 1
Axis/level of depth at which the iterable should be flattened.
Returns
-------
iterable
Iterable with one lower level of nesting.
"""
if not isinstance(nested, Iterable):
raise ValueError(
f"Nesting is too deep, values at level {axis} are not iterables"
)
if axis == 0:
return (elem for elem in nested if elem is not None and (elem != []))
if axis == 1:
for sub in nested:
for elem in sub:
yield elem
elif axis > 1:
for sub in nested:
yield flatten_stream(sub, axis=axis - 1)
else:
raise ValueError("Flattening axis needs to be greater than 0.")
def deeplist(nested) -> list:
"""Recursively turns nested iterable to list.
Parameters
----------
nested: iterable
Nested iterable.
Returns
-------
list
Nested list.
"""
if not isinstance(nested, Iterable) or isinstance(nested, str):
return nested # type: ignore
else:
return [deeplist(sub) for sub in nested] | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/streams/utils.py | 0.868381 | 0.326352 | utils.py | pypi |
import functools
import json
from dataclasses import dataclass
from itertools import islice
from typing import Callable, Iterable, Literal
from sklearn.base import BaseEstimator
from skembeddings.streams.utils import (chunk, deeplist, filter_batches,
flatten_stream, reusable, stream_files)
@dataclass
class Stream:
"""Utility class for streaming, batching and filtering texts
from an external source.
Parameters
----------
iterable: Iterable
Core iterable object in the stream.
"""
iterable: Iterable
def __iter__(self):
return iter(self.iterable)
def filter(self, func: Callable, *args, **kwargs):
"""Filters the stream given a function that returns a bool."""
@functools.wraps(func)
def _func(elem):
return func(elem, *args, **kwargs)
_iterable = reusable(filter)(_func, self.iterable)
return Stream(_iterable)
def map(self, func: Callable, *args, **kwargs):
"""Maps a function over the stream."""
@functools.wraps(func)
def _func(elem):
return func(elem, *args, **kwargs)
_iterable = reusable(map)(_func, self.iterable)
return Stream(_iterable)
def pipe(self, func: Callable, *args, **kwargs):
"""Pipes the stream into a function that takes
the whole stream and returns a new one."""
@functools.wraps(func)
def _func(iterable):
return func(iterable, *args, **kwargs)
_iterable = reusable(_func)(self.iterable)
return Stream(_iterable)
def islice(self, *args):
"""Equivalent to itertools.islice()."""
return self.pipe(islice, *args)
def evaluate(self, deep: bool = False):
"""Evaluates the entire iterable and collects it into
a list.
Parameters
----------
deep: bool, default False
Indicates whether nested iterables should be deeply
evaluated. Uses deeplist() internally.
"""
if deep:
_iterable = deeplist(self.iterable)
else:
_iterable = list(self.iterable)
return Stream(_iterable)
def read_files(
self,
lines: bool = True,
not_found_action: Literal["exception", "none", "drop"] = "exception",
):
"""Reads a stream of file paths from disk.
Parameters
----------
lines: bool, default True
Indicates whether lines should be streamed or not.
not_found_action: str, default 'exception'
Indicates what should be done if a given file is not found.
'exception' raises an exception,
'drop' ignores it,
'none' returns a None for each nonexistent file.
"""
return self.pipe(
stream_files,
lines=lines,
not_found_action=not_found_action,
)
def json(self):
"""Parses a stream of texts into JSON objects."""
return self.map(json.loads)
def grab(self, field: str):
"""Grabs one field from a stream of records."""
return self.map(lambda record: record[field])
def flatten(self, axis=1):
"""Flattens a nested stream along a given axis."""
return self.pipe(flatten_stream, axis=axis)
def chunk(self, size: int):
"""Chunks stream with the given batch size."""
return self.pipe(chunk, chunk_size=size)
def filter_batches(self, estimator: BaseEstimator, prefit: bool = True):
"""Filters batches with a scikit-learn compatible
estimator.
Parameters
----------
estimator: BaseEstimator
Scikit-learn estimator to use for filtering the batches.
Either needs a .predict() or .fit_predict() method.
Every sample that gets labeled -1 will be removed from the
batch.
prefit: bool, default True
Indicates whether the estimator is prefit.
If it is .predict() will be used (novelty detection), else
.fit_predict() will be used (outlier detection).
"""
return self.pipe(filter_batches, estimator=estimator, prefit=prefit)
def collect(self, deep: bool = False):
"""Does the same as evaluate()."""
return self.evaluate(deep) | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/streams/_stream.py | 0.906366 | 0.326258 | _stream.py | pypi |
.. figure:: https://github.com/Ibotta/pure-predict/blob/master/doc/images/pure-predict.png
:alt: pure-predict
pure-predict: Machine learning prediction in pure Python
========================================================
|License| |Build Status| |PyPI Package| |Downloads| |Python Versions|
``pure-predict`` speeds up and slims down machine learning prediction applications. It is
a foundational tool for serverless inference or small batch prediction with popular machine
learning frameworks like `scikit-learn <https://scikit-learn.org/stable/>`__ and `fasttext <https://fasttext.cc/>`__.
It implements the predict methods of these frameworks in pure Python.
Primary Use Cases
-----------------
The primary use case for ``pure-predict`` is the following scenario:
#. A model is trained in an environment without strong container footprint constraints. Perhaps a long running "offline" job on one or many machines where installing a number of python packages from PyPI is not at all problematic.
#. At prediction time the model needs to be served behind an API. Typical access patterns are to request a prediction for one "record" (one "row" in a ``numpy`` array or one string of text to classify) per request or a mini-batch of records per request.
#. Preferred infrastructure for the prediction service is either serverless (`AWS Lambda <https://aws.amazon.com/lambda/>`__) or a container service where the memory footprint of the container is constrained.
#. The fitted model object's artifacts needed for prediction (coefficients, weights, vocabulary, decision tree artifacts, etc.) are relatively small (10s to 100s of MBs).
.. figure:: https://github.com/Ibotta/pure-predict/blob/master/doc/images/diagram.png
:alt: diagram
In this scenario, a container service with a large dependency footprint can be overkill for a microservice, particularly if the access patterns favor the pricing model of a serverless application. Additionally, for smaller models and single record predictions per request, the ``numpy`` and ``scipy`` functionality in the prediction methods of popular machine learning frameworks work against the application in terms of latency, `underperforming pure python <https://github.com/Ibotta/pure-predict/blob/master/examples/performance_rf.py>`__ in some cases.
Check out the `blog post <https://medium.com/building-ibotta/predict-with-sklearn-20x-faster-9f2803944446>`__
for more information on the motivation and use cases of ``pure-predict``.
Package Details
---------------
It is a Python package for machine learning prediction distributed under
the `Apache 2.0 software license <https://github.com/Ibotta/sk-dist/blob/master/LICENSE>`__.
It contains multiple subpackages which mirror their open source
counterpart (``scikit-learn``, ``fasttext``, etc.). Each subpackage has utilities to
convert a fitted machine learning model into a custom object containing prediction methods
that mirror their native counterparts, but converted to pure python. Additionally, all
relevant model artifacts needed for prediction are converted to pure python.
A ``pure-predict`` model object can then be pickled and later
unpickled without any 3rd party dependencies other than ``pure-predict``.
This eliminates the need to have large dependency packages installed in order to
make predictions with fitted machine learning models using popular open source packages for
training models. These dependencies (``numpy``, ``scipy``, ``scikit-learn``, ``fasttext``, etc.)
are large in size and `not always necessary to make fast and accurate
predictions <https://github.com/Ibotta/pure-predict/blob/master/examples/performance_rf.py>`__.
Additionally, they rely on C extensions that may not be ideal for serverless applications with a python runtime.
Quick Start Example
-------------------
In a python enviornment with ``scikit-learn`` and its dependencies installed:
.. code-block:: python
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import load_iris
from pure_sklearn.map import convert_estimator
# fit sklearn estimator
X, y = load_iris(return_X_y=True)
clf = RandomForestClassifier()
clf.fit(X, y)
# convert to pure python estimator
clf_pure_predict = convert_estimator(clf)
with open("model.pkl", "wb") as f:
pickle.dump(clf_pure_predict, f)
# make prediction with sklearn estimator
y_pred = clf.predict([[0.25, 2.0, 8.3, 1.0]])
print(y_pred)
[2]
In a python enviornment with only ``pure-predict`` installed:
.. code-block:: python
import pickle
# load pickled model
with open("model.pkl", "rb") as f:
clf = pickle.load(f)
# make prediction with pure-predict object
y_pred = clf.predict([[0.25, 2.0, 8.3, 1.0]])
print(y_pred)
[2]
Subpackages
-----------
`pure_sklearn <https://github.com/Ibotta/pure-predict/tree/master/pure_sklearn>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Prediction in pure python for a subset of ``scikit-learn`` estimators and transformers.
- **estimators**
- **linear models** - supports the majority of linear models for classification
- **trees** - decision trees, random forests, gradient boosting and xgboost
- **naive bayes** - a number of popular naive bayes classifiers
- **svm** - linear SVC
- **transformers**
- **preprocessing** - normalization and onehot/ordinal encoders
- **impute** - simple imputation
- **feature extraction** - text (tfidf, count vectorizer, hashing vectorizer) and dictionary vectorization
- **pipeline** - pipelines and feature unions
Sparse data - supports a custom pure python sparse data object - sparse data is handled as would be expected by the relevent transformers and estimators
`pure_fasttext <https://github.com/Ibotta/pure-predict/tree/master/pure_fasttext>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Prediction in pure python for ``fasttext``.
- **supervised** - predicts labels for supervised models; no support for quantized models (blocked by `this issue <https://github.com/facebookresearch/fastText/issues/984>`__)
- **unsupervised** - lookup of word or sentence embeddings given input text
Installation
------------
Dependencies
~~~~~~~~~~~~
``pure-predict`` requires:
- `Python <https://www.python.org/>`__ (>= 3.6)
Dependency Notes
~~~~~~~~~~~~~~~~
- ``pure_sklearn`` has been tested with ``scikit-learn`` versions >= 0.20 -- certain functionality may work with lower versions but are not guaranteed. Some functionality is explicitly not supported for certain ``scikit-learn`` versions and exceptions will be raised as appropriate.
- ``xgboost`` requires version >= 0.82 for support with ``pure_sklearn``.
- ``pure-predict`` is not supported with Python 2.
- ``fasttext`` versions <= 0.9.1 have been tested.
User Installation
~~~~~~~~~~~~~~~~~
The easiest way to install ``pure-predict`` is with ``pip``:
::
pip install --upgrade pure-predict
You can also download the source code:
::
git clone https://github.com/Ibotta/pure-predict.git
Testing
~~~~~~~
With ``pytest`` installed, you can run tests locally:
::
pytest pure-predict
Examples
--------
The package contains `examples <https://github.com/Ibotta/pure-predict/tree/master/examples>`__
on how to use ``pure-predict`` in practice.
Calls for Contributors
----------------------
Contributing to ``pure-predict`` is `welcomed by any contributors <https://github.com/Ibotta/pure-predict/blob/master/CONTRIBUTING.md>`__. Specific calls for contribution are as follows:
#. Examples, tests and documentation -- particularly more detailed examples with performance testing of various estimators under various constraints.
#. Adding more ``pure_sklearn`` estimators. The ``scikit-learn`` package is extensive and only partially covered by ``pure_sklearn``. `Regression <https://scikit-learn.org/stable/supervised_learning.html#supervised-learning>`__ tasks in particular missing from ``pure_sklearn``. `Clustering <https://scikit-learn.org/stable/modules/clustering.html#clustering>`__, `dimensionality reduction <https://scikit-learn.org/stable/modules/decomposition.html#decompositions>`__, `nearest neighbors <https://scikit-learn.org/stable/modules/neighbors.html>`__, `feature selection <https://scikit-learn.org/stable/modules/feature_selection.html>`__, non-linear `SVM <https://scikit-learn.org/stable/modules/svm.html>`__, and more are also omitted and would be good candidates for extending ``pure_sklearn``.
#. General efficiency. There is likely low hanging fruit for improving the efficiency of the ``numpy`` and ``scipy`` functionality that has been ported to ``pure-predict``.
#. `Threading <https://docs.python.org/3/library/threading.html>`__ could be considered to improve performance -- particularly for making predictions with multiple records.
#. A public `AWS lambda layer <https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html>`__ containing ``pure-predict``.
Background
----------
The project was started at `Ibotta
Inc. <https://medium.com/building-ibotta>`__ on the machine learning
team and open sourced in 2020. It is currently maintained by the machine
learning team at Ibotta.
Acknowledgements
~~~~~~~~~~~~~~~~
Thanks to `David Mitchell <https://github.com/dlmitchell>`__ and `Andrew Tilley <https://github.com/tilleyand>`__ for internal review before open source. Thanks to `James Foley <https://github.com/chadfoley36>`__ for logo artwork.
.. figure:: https://github.com/Ibotta/pure-predict/blob/master/doc/images/ibottaml.png
:alt: IbottaML
.. |License| image:: https://img.shields.io/badge/License-Apache%202.0-blue.svg
:target: https://opensource.org/licenses/Apache-2.0
.. |Build Status| image:: https://travis-ci.com/Ibotta/pure-predict.png?branch=master
:target: https://travis-ci.com/Ibotta/pure-predict
.. |PyPI Package| image:: https://badge.fury.io/py/pure-predict.svg
:target: https://pypi.org/project/pure-predict/
.. |Downloads| image:: https://pepy.tech/badge/pure-predict
:target: https://pepy.tech/project/pure-predict
.. |Python Versions| image:: https://img.shields.io/pypi/pyversions/pure-predict
:target: https://pypi.org/project/pure-predict/
| /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/README.rst | 0.968738 | 0.825027 | README.rst | pypi |
MAPPING = {
"LogisticRegression": "scikit_endpoint.linear_model.LogisticRegressionPure",
"RidgeClassifier": "scikit_endpoint.linear_model.RidgeClassifierPure",
"SGDClassifier": "scikit_endpoint.linear_model.SGDClassifierPure",
"Perceptron": "scikit_endpoint.linear_model.PerceptronPure",
"PassiveAggressiveClassifier": "scikit_endpoint.linear_model.PassiveAggressiveClassifierPure",
"LinearSVC": "scikit_endpoint.svm.LinearSVCPure",
"DecisionTreeClassifier": "scikit_endpoint.tree.DecisionTreeClassifierPure",
"DecisionTreeRegressor": "scikit_endpoint.tree.DecisionTreeRegressorPure",
"ExtraTreeClassifier": "scikit_endpoint.tree.ExtraTreeClassifierPure",
"ExtraTreeRegressor": "scikit_endpoint.tree.ExtraTreeRegressorPure",
"RandomForestClassifier": "scikit_endpoint.ensemble.RandomForestClassifierPure",
"BaggingClassifier": "scikit_endpoint.ensemble.BaggingClassifierPure",
"GradientBoostingClassifier": "scikit_endpoint.ensemble.GradientBoostingClassifierPure",
"XGBClassifier": "scikit_endpoint.xgboost.XGBClassifierPure",
"ExtraTreesClassifier": "scikit_endpoint.ensemble.ExtraTreesClassifierPure",
"GaussianNB": "scikit_endpoint.naive_bayes.GaussianNBPure",
"MultinomialNB": "scikit_endpoint.naive_bayes.MultinomialNBPure",
"ComplementNB": "scikit_endpoint.naive_bayes.ComplementNBPure",
"SimpleImputer": "scikit_endpoint.impute.SimpleImputerPure",
"MissingIndicator": "scikit_endpoint.impute.MissingIndicatorPure",
"DummyClassifier": "scikit_endpoint.dummy.DummyClassifierPure",
"Pipeline": "scikit_endpoint.pipeline.PipelinePure",
"FeatureUnion": "scikit_endpoint.pipeline.FeatureUnionPure",
"OneHotEncoder": "scikit_endpoint.preprocessing.OneHotEncoderPure",
"OrdinalEncoder": "scikit_endpoint.preprocessing.OrdinalEncoderPure",
"StandardScaler": "scikit_endpoint.preprocessing.StandardScalerPure",
"MinMaxScaler": "scikit_endpoint.preprocessing.MinMaxScalerPure",
"MaxAbsScaler": "scikit_endpoint.preprocessing.MaxAbsScalerPure",
"Normalizer": "scikit_endpoint.preprocessing.NormalizerPure",
"DictVectorizer": "scikit_endpoint.feature_extraction.DictVectorizerPure",
"TfidfVectorizer": "scikit_endpoint.feature_extraction.text.TfidfVectorizerPure",
"CountVectorizer": "scikit_endpoint.feature_extraction.text.CountVectorizerPure",
"TfidfTransformer": "scikit_endpoint.feature_extraction.text.TfidfTransformerPure",
"HashingVectorizer": "scikit_endpoint.feature_extraction.text.HashingVectorizerPure",
"VarianceThreshold": "scikit_endpoint.feature_selection.VarianceThresholdPure",
}
def _instantiate_class(module, name):
module = __import__(module, fromlist=[name])
return getattr(module, name)
def convert_estimator(est, min_version=None):
"""Convert scikit-learn estimator to its scikit_endpoint counterpart"""
est_name = est.__class__.__name__
pure_est_name = MAPPING.get(est_name)
if pure_est_name is None:
raise ValueError(
"Cannot find 'scikit_endpoint' counterpart for {}".format(est_name)
)
module = ".".join(pure_est_name.split(".")[:-1])
name = pure_est_name.split(".")[-1]
return _instantiate_class(module, name)(est) | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/map.py | 0.580828 | 0.511717 | map.py | pypi |
from math import exp, log
from operator import mul
from .utils import shape, sparse_list, issparse
def dot(A, B):
"""
Dot product between two arrays.
A -> n_dim = 1
B -> n_dim = 2
"""
arr = []
for i in range(len(B)):
if isinstance(A, dict):
val = sum([v * B[i][k] for k, v in A.items()])
else:
val = sum(map(mul, A, B[i]))
arr.append(val)
return arr
def dot_2d(A, B):
"""
Dot product between two arrays.
A -> n_dim = 2
B -> n_dim = 2
"""
return [dot(a, B) for a in A]
def matmult_same_dim(A, B):
"""Multiply two matrices of the same dimension"""
shape_A = shape(A)
issparse_A = issparse(A)
issparse_B = issparse(B)
if shape_A != shape(B):
raise ValueError("Shape A must equal shape B.")
if not (issparse_A == issparse_B):
raise ValueError("Both A and B must be sparse or dense.")
X = []
if not issparse_A:
for i in range(shape_A[0]):
X.append([(A[i][j] * B[i][j]) for j in range(shape_A[1])])
else:
for i in range(shape_A[0]):
nested_res = [
[(k_b, v_a * v_b) for k_b, v_b in B[i].items() if k_b == k_a]
for k_a, v_a in A[i].items()
]
X.append(dict([item for sublist in nested_res for item in sublist]))
X = sparse_list(X, size=A.size, dtype=A.dtype)
return X
def transpose(A):
"""Transpose 2-D list"""
if issparse(A):
raise ValueError("Sparse input not supported.")
return list(map(list, [*zip(*A)]))
def expit(x):
"""Expit function for scaler input"""
return 1.0 / (1.0 + safe_exp(-x))
def sfmax(arr):
"""Softmax function for 1-D list or a single sparse_list element"""
if isinstance(arr, dict):
expons = {k: safe_exp(v) for k, v in arr.items()}
denom = sum(expons.values())
out = {k: (v / float(denom)) for k, v in expons.items()}
else:
expons = list(map(safe_exp, arr))
out = list(map(lambda x: x / float(sum(expons)), expons))
return out
def safe_log(x):
"""Equivalent to numpy log with scalar input"""
if x == 0:
return -float("Inf")
elif x < 0:
return float("Nan")
else:
return log(x)
def safe_exp(x):
"""Equivalent to numpy exp with scalar input"""
try:
return exp(x)
except OverflowError:
return float("Inf")
def operate_2d(A, B, func):
"""Apply elementwise function to 2-D lists"""
if issparse(A) or issparse(B):
raise ValueError("Sparse input not supported.")
if shape(A) != shape(B):
raise ValueError("'A' and 'B' must have the same shape")
return [list(map(func, A[index], B[index])) for index in range(len(A))]
def apply_2d(A, func):
"""Apply function to every element of 2-D list"""
if issparse(A):
raise ValueError("Sparse input not supported.")
return [list(map(func, a)) for a in A]
def apply_2d_sparse(A, func):
"""Apply function to every non-zero element of sparse_list"""
if not issparse(A):
raise ValueError("Dense input not supported.")
A_ = [{k: func(v) for k, v in a.items()} for a in A]
return sparse_list(A_, size=A.size, dtype=A.dtype)
def apply_axis_2d(A, func, axis=1):
"""
Apply function along axis of 2-D list or non-zero
elements of sparse_list.
"""
if issparse(A) and (axis == 0):
raise ValueError("Sparse input not supported when axis=0.")
if axis == 1:
if issparse(A):
return [func(a.values()) for a in A]
else:
return [func(a) for a in A]
elif axis == 0:
return [func(a) for a in transpose(A)]
else:
raise ValueError("Input 'axis' must be 0 or 1")
def ravel(A):
"""Equivalent of numpy ravel on 2-D list"""
if issparse(A):
raise ValueError("Sparse input not supported.")
return list(transpose(A)[0])
def slice_column(A, idx):
"""Slice columns from 2-D list A. Handles sparse data"""
if isinstance(idx, int):
if issparse(A):
return [a.get(idx, A.dtype(0)) for a in A]
else:
return [a[idx] for a in A]
if isinstance(idx, (list, tuple)):
if issparse(A):
A_ = [{k: v for k, v in a.items() if k in idx} for a in A]
return sparse_list(A_, size=A.size, dtype=A.dtype)
else:
return [[a[i] for i in idx] for a in A]
def accumu(lis):
"""Cumulative sum of list"""
total = 0
for x in lis:
total += x
yield total | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/base.py | 0.723798 | 0.71867 | base.py | pypi |
import pickle
import time
from warnings import warn
from distutils.version import LooseVersion
CONTAINERS = (list, dict, tuple)
TYPES = (int, float, str, bool, type)
MIN_VERSION = "0.20"
def check_types(obj, containers=CONTAINERS, types=TYPES):
"""
Checks if input object is an allowed type. Objects can be
acceptable containers or acceptable types themselves.
Containers are checked recursively to ensure all contained
types are valid. If object is a `scikit_endpoint` type, its
attributes are all recursively checked.
"""
if isinstance(obj, containers):
if isinstance(obj, (list, tuple)):
for ob in obj:
check_types(ob)
else:
for k, v in obj.items():
check_types(k)
check_types(v)
elif isinstance(obj, types):
pass
elif "scikit_endpoint" in str(type(obj)):
for attr in vars(obj):
check_types(getattr(obj, attr))
elif obj is None:
pass
else:
raise ValueError("Object contains invalid type: {}".format(type(obj)))
def check_version(estimator, min_version=None):
"""Checks the version of the scikit-learn estimator"""
warning_str = (
"Estimators fitted with sklearn version < {} are not guaranteed to work".format(
MIN_VERSION
)
)
try:
version_ = estimator.__getstate__()["_sklearn_version"]
except: # noqa E722
warn(warning_str)
return
if (min_version is not None) and (
LooseVersion(version_) < LooseVersion(min_version)
):
raise Exception(
"The sklearn version is too low for this estimator; must be >= {}".format(
min_version
)
)
elif LooseVersion(version_) < LooseVersion(MIN_VERSION):
warn(warning_str)
def convert_type(dtype):
"""Converts a datatype to its pure python equivalent"""
val = dtype(0)
if hasattr(val, "item"):
return type(val.item())
else:
return dtype
def check_array(X, handle_sparse="error"):
"""
Checks if array is compatible for prediction with
`scikit_endpoint` classes. Input 'X' should be a non-empty
`list` or `sparse_list`. If 'X' is sparse, flexible
sparse handling is applied, allowing sparse by default,
or optionally erroring on sparse input.
"""
if issparse(X):
if handle_sparse == "allow":
return X
elif handle_sparse == "error":
raise ValueError("Sparse input is not supported " "for this estimator")
else:
raise ValueError(
"Invalid value for 'handle_sparse' "
"input. Acceptable values are 'allow' or 'error'"
)
if not isinstance(X, list):
raise TypeError("Input 'X' must be a list")
if len(X) == 0:
return ValueError("Input 'X' must not be empty")
return X
def shape(X):
"""
Checks the shape of input list. Similar to
numpy `ndarray.shape()`. Handles `list` or
`sparse_list` input.
"""
if ndim(X) == 1:
return (len(X),)
elif ndim(X) == 2:
if issparse(X):
return (len(X), X.size)
else:
return (len(X), len(X[0]))
def ndim(X):
"""Computes the dimension of input list"""
if isinstance(X[0], (list, dict)):
return 2
else:
return 1
def tosparse(A):
"""Converts input dense list to a `sparse_list`"""
return sparse_list(A)
def todense(A):
"""Converts input `sparse_list` to a dense list"""
return A.todense()
def issparse(A):
"""Checks if input list is a `sparse_list`"""
return isinstance(A, sparse_list)
class sparse_list(list):
"""
Pure python implementation of a 2-D sparse data structure.
The data structure is a list of dictionaries. Each dictionary
represents a 'row' of data. The dictionary keys correspond to the
indices of 'columns' and the dictionary values correspond to the
data value associated with that index. Missing keys are assumed
to have values of 0.
Args:
A (list): 2-D list of lists or list of dicts
size (int): Number of 'columns' of the data structure
dtype (type): Data type of data values
Examples:
>>> A = [[0,1,0], [0,1,1]]
>>> print(sparse_list(A))
... [{1:1}, {2:1, 3:1}]
>>>
>>> B = [{3:0.5}, {1:0.9, 10:0.2}]
>>> print(sparse_list(B, size=11, dtype=float))
... [{3:0.5}, {1:0.9, 10:0.2}]
"""
def __init__(self, A, size=None, dtype=None):
if isinstance(A[0], dict):
self.dtype = float if dtype is None else dtype
self.size = size
for row in A:
self.append(row)
else:
A = check_array(A)
self.size = shape(A)[1]
self.dtype = type(A[0][0])
for row in A:
self.append(
dict([(i, row[i]) for i in range(self.size) if row[i] != 0])
)
def todense(self):
"""Converts `sparse_list` instance to a dense list"""
A_dense = []
zero_val = self.dtype(0)
for row in self:
A_dense.append([row.get(i, zero_val) for i in range(self.size)])
return A_dense
def performance_comparison(sklearn_estimator, pure_sklearn_estimator, X):
"""
Profile performance characteristics between sklearn estimator and
corresponding pure-predict estimator.
Args:
sklearn_estimator (object)
pure_sklearn_estimator (object)
X (numpy ndarray): features for prediction
"""
# -- profile pickled object size: sklearn vs pure-predict
pickled = pickle.dumps(sklearn_estimator)
pickled_ = pickle.dumps(pure_sklearn_estimator)
print("Pickle Size sklearn: {}".format(len(pickled)))
print("Pickle Size pure-predict: {}".format(len(pickled_)))
print("Difference: {}".format(len(pickled_) / float(len(pickled))))
# -- profile unpickle time: sklearn vs pure-predict
start = time.time()
_ = pickle.loads(pickled)
pickle_t = time.time() - start
print("Unpickle time sklearn: {}".format(pickle_t))
start = time.time()
_ = pickle.loads(pickled_)
pickle_t_ = time.time() - start
print("Unpickle time pure-predict: {}".format(pickle_t_))
print("Difference: {}".format(pickle_t_ / pickle_t))
# -- profile single record predict latency: sklearn vs pure-predict
X_pred = X[:1]
X_pred_ = X_pred if isinstance(X_pred, list) else X_pred.tolist()
start = time.time()
_ = sklearn_estimator.predict(X_pred)
pred_t = time.time() - start
print("Predict 1 record sklearn: {}".format(pred_t))
start = time.time()
_ = pure_sklearn_estimator.predict(X_pred_)
pred_t_ = time.time() - start
print("Predict 1 record pure-predict: {}".format(pred_t_))
print("Difference: {}".format(pred_t_ / pred_t)) | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/utils.py | 0.785267 | 0.296508 | utils.py | pypi |