code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
import pandas as pd
from rdkit.Chem import GetDistanceMatrix
from rdkit.DataStructs import ConvertToNumpyArray
from rdkit.Chem.rdMolDescriptors import (GetMorganFingerprint,
GetHashedMorganFingerprint,
GetMorganFingerprintAsBitVect,
GetAtomPairFingerprint,
GetHashedAtomPairFingerprint,
GetHashedAtomPairFingerprintAsBitVect,
GetTopologicalTorsionFingerprint,
GetHashedTopologicalTorsionFingerprint,
GetHashedTopologicalTorsionFingerprintAsBitVect,
GetMACCSKeysFingerprint,
GetFeatureInvariants,
GetConnectivityInvariants)
from rdkit.Chem.rdReducedGraphs import GetErGFingerprint
from rdkit.Chem.rdmolops import RDKFingerprint
import numpy as np
from ..base import Transformer, Featurizer
class MorganFeaturizer(Transformer, Featurizer):
""" Morgan fingerprints, implemented by RDKit.
Notes:
Currently, folded bits are by far the fastest implementation.
Examples:
>>> import skchem
>>> import pandas as pd
>>> pd.options.display.max_rows = pd.options.display.max_columns = 5
>>> mf = skchem.descriptors.MorganFeaturizer()
>>> m = skchem.Mol.from_smiles('CCC')
Can transform an individual molecule to yield a Series:
>>> mf.transform(m)
morgan_fp_idx
0 0
1 0
..
2046 0
2047 0
Name: MorganFeaturizer, dtype: uint8
Can transform a list of molecules to yield a DataFrame:
>>> mf.transform([m])
morgan_fp_idx 0 1 ... 2046 2047
0 0 0 ... 0 0
<BLANKLINE>
[1 rows x 2048 columns]
Change the number of features the fingerprint is folded down to using `n_feats`.
>>> mf.n_feats = 1024
>>> mf.transform(m)
morgan_fp_idx
0 0
1 0
..
1022 0
1023 0
Name: MorganFeaturizer, dtype: uint8
Count fingerprints with `as_bits` = False
>>> mf.as_bits = False
>>> res = mf.transform(m); res[res > 0]
morgan_fp_idx
33 2
80 1
294 2
320 1
Name: MorganFeaturizer, dtype: int64
Pseudo-gradient with `grad` shows which atoms contributed to which feature.
>>> mf.grad(m)[res > 0]
atom_idx 0 1 2
features
33 1 0 1
80 0 1 0
294 1 2 1
320 1 1 1
"""
def __init__(self, radius=2, n_feats=2048, as_bits=True, use_features=False,
use_bond_types=True, use_chirality=False, **kwargs):
""" Initialize the fingerprinter object.
Args:
radius (int):
The maximum radius for atom environments.
Default is `2`.
n_feats (int):
The number of features to which to fold the fingerprint down.
For unfolded, use `-1`.
Default is `2048`.
as_bits (bool):
Whether to return bits (`True`) or counts (`False`).
Default is `True`.
use_features (bool):
Whether to use map atom types to generic features (FCFP analog).
Default is `False`.
use_bond_types (bool):
Whether to use bond types to differentiate environments.
Default is `False`.
use_chirality (bool):
Whether to use chirality to differentiate environments.
Default is `False`.
"""
super(MorganFeaturizer, self).__init__(**kwargs)
self.radius = radius
self.n_feats = n_feats
self.sparse = self.n_feats < 0
self.as_bits = as_bits
self.use_features = use_features
self.use_bond_types = use_bond_types
self.use_chirality = use_chirality
def _transform_mol(self, mol):
"""Private method to transform a skchem molecule.
Use `transform` for the public method, which genericizes the argument to
iterables of mols.
Args:
mol (skchem.Mol): Molecule to calculate fingerprint for.
Returns:
np.array or dict:
Fingerprint as an array (or a dict if sparse).
"""
if self.as_bits and self.n_feats > 0:
fp = GetMorganFingerprintAsBitVect(mol, self.radius,
nBits=self.n_feats,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality)
res = np.array(0)
ConvertToNumpyArray(fp, res)
res = res.astype(np.uint8)
else:
if self.n_feats <= 0:
res = GetMorganFingerprint(mol, self.radius,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality)
res = res.GetNonzeroElements()
if self.as_bits:
res = {k: int(v > 0) for k, v in res.items()}
else:
res = GetHashedMorganFingerprint(mol, self.radius,
nBits=self.n_feats,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality)
res = np.array(list(res))
return res
@property
def name(self):
return 'morg'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='morgan_fp_idx')
def grad(self, mol):
""" Calculate the pseudo gradient with respect to the atoms.
The pseudo gradient is the number of times the atom set that particular
bit.
Args:
mol (skchem.Mol):
The molecule for which to calculate the pseudo gradient.
Returns:
pandas.DataFrame:
Dataframe of pseudogradients, with columns corresponding to
atoms, and rows corresponding to features of the fingerprint.
"""
cols = pd.Index(list(range(len(mol.atoms))), name='atom_idx')
dist = GetDistanceMatrix(mol)
info = {}
if self.n_feats < 0:
res = GetMorganFingerprint(mol, self.radius,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality,
bitInfo=info).GetNonzeroElements()
idx_list = list(res.keys())
idx = pd.Index(idx_list, name='features')
grad = np.zeros((len(idx), len(cols)))
for bit in info:
for atom_idx, radius in info[bit]:
grad[idx_list.index(bit)] += (dist <= radius)[atom_idx]
else:
res = list(GetHashedMorganFingerprint(mol, self.radius,
nBits=self.n_feats,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality,
bitInfo=info))
idx = pd.Index(range(self.n_feats), name='features')
grad = np.zeros((len(idx), len(cols)))
for bit in info:
for atom_idx, radius in info[bit]:
grad[bit] += (dist <= radius)[atom_idx]
grad = pd.DataFrame(grad, index=idx, columns=cols)
if self.as_bits:
grad = (grad > 0)
return grad.astype(int)
class AtomPairFeaturizer(Transformer, Featurizer):
""" Atom Pair Fingerprints, implemented by RDKit. """
def __init__(self, min_length=1, max_length=30, n_feats=2048, as_bits=False,
use_chirality=False, **kwargs):
""" Instantiate an atom pair fingerprinter.
Args:
min_length (int):
The minimum length of paths between pairs.
Default is `1`, i.e. pairs can be bonded together.
max_length (int):
The maximum length of paths between pairs.
Default is `30`.
n_feats (int):
The number of features to which to fold the fingerprint down.
For unfolded, use `-1`.
Default is `2048`.
as_bits (bool):
Whether to return bits (`True`) or counts (`False`).
Default is `False`.
use_chirality (bool):
Whether to use chirality to differentiate environments.
Default is `False`.
"""
super(AtomPairFeaturizer, self).__init__(**kwargs)
self.min_length = min_length
self.max_length = max_length
self.n_feats = n_feats
self.sparse = self.n_feats < 0
self.as_bits = as_bits
self.use_chirality = use_chirality
def _transform_mol(self, mol):
"""Private method to transform a skchem molecule.
Use transform` for the public method, which genericizes the argument to
iterables of mols.
Args:
mol (skchem.Mol): Molecule to calculate fingerprint for.
Returns:
np.array or dict:
Fingerprint as an array (or a dict if sparse).
"""
if self.as_bits and self.n_feats > 0:
fp = GetHashedAtomPairFingerprintAsBitVect(mol, nBits=self.n_feats,
minLength=self.min_length,
maxLength=self.max_length,
includeChirality=self.use_chirality)
res = np.array(0)
ConvertToNumpyArray(fp, res)
res = res.astype(np.uint8)
else:
if self.n_feats <= 0:
res = GetAtomPairFingerprint(mol, nBits=self.n_feats,
minLength=self.min_length,
maxLength=self.max_length,
includeChirality=self.use_chirality)
res = res.GetNonzeroElements()
if self.as_bits:
res = {k: int(v > 0) for k, v in res.items()}
else:
res = GetHashedAtomPairFingerprint(mol, nBits=self.n_feats,
minLength=self.min_length,
maxLength=self.max_length,
includeChirality=self.use_chirality)
res = np.array(list(res))
return res
@property
def name(self):
return 'atom_pair'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='ap_fp_idx')
class TopologicalTorsionFeaturizer(Transformer, Featurizer):
""" Topological Torsion fingerprints, implemented by RDKit. """
def __init__(self, target_size=4, n_feats=2048, as_bits=False,
use_chirality=False, **kwargs):
"""
Args:
target_size (int):
# TODO
n_feats (int):
The number of features to which to fold the fingerprint down.
For unfolded, use `-1`.
Default is `2048`.
as_bits (bool):
Whether to return bits (`True`) or counts (`False`).
Default is `False`.
use_chirality (bool):
Whether to use chirality to differentiate environments.
Default is `False`.
"""
self.target_size = target_size
self.n_feats = n_feats
self.sparse = self.n_feats < 0
self.as_bits = as_bits
self.use_chirality = use_chirality
super(TopologicalTorsionFeaturizer, self).__init__(**kwargs)
def _transform_mol(self, mol):
""" Private method to transform a skchem molecule.
Args:
mol (skchem.Mol): Molecule to calculate fingerprint for.
Returns:
np.array or dict:
Fingerprint as an array (or a dict if sparse).
"""
if self.as_bits and self.n_feats > 0:
fp = GetHashedTopologicalTorsionFingerprintAsBitVect(mol, nBits=self.n_feats,
targetSize=self.target_size,
includeChirality=self.use_chirality)
res = np.array(0)
ConvertToNumpyArray(fp, res)
res = res.astype(np.uint8)
else:
if self.n_feats <= 0:
res = GetTopologicalTorsionFingerprint(mol, nBits=self.n_feats,
targetSize=self.target_size,
includeChirality=self.use_chirality)
res = res.GetNonzeroElements()
if self.as_bits:
res = {k: int(v > 0) for k, v in res.items()}
else:
res = GetHashedTopologicalTorsionFingerprint(mol, nBits=self.n_feats,
targetSize=self.target_size,
includeChirality=self.use_chirality)
res = np.array(list(res))
return res
@property
def names(self):
return 'top_tort'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='tt_fp_idx')
class MACCSFeaturizer(Transformer, Featurizer):
""" MACCS Keys Fingerprints """
def __init__(self, **kwargs):
super(MACCSFeaturizer, self).__init__(**kwargs)
self.n_feats = 166
def _transform_mol(self, mol):
return np.array(list(GetMACCSKeysFingerprint(mol)))[1:]
@property
def name(self):
return 'maccs'
@property
def columns(self):
return pd.Index(
['ISOTOPE', '103 < ATOMIC NO. < 256', 'GROUP IVA,VA,VIA PERIODS 4-6 (Ge...)', 'ACTINIDE',
'GROUP IIIB,IVB (Sc...)', 'LANTHANIDE', 'GROUP VB,VIB,VIIB (V...)', 'QAAA@1', 'GROUP VIII (Fe...)',
'GROUP IIA (ALKALINE EARTH)', '4M RING', 'GROUP IB,IIB (Cu...)', 'ON(C)C', 'S-S', 'OC(O)O', 'QAA@1', 'CTC',
'GROUP IIIA (B...)', '7M RING', 'SI', 'C=C(Q)Q', '3M RING', 'NC(O)O', 'N-O', 'NC(N)N', 'C$=C($A)$A', 'I',
'QCH2Q', 'P', 'CQ(C)(C)A', 'QX', 'CSN', 'NS', 'CH2=A', 'GROUP IA (ALKALI METAL)', 'S HETEROCYCLE',
'NC(O)N', 'NC(C)N', 'OS(O)O', 'S-O', 'CTN', 'F', 'QHAQH', 'OTHER', 'C=CN', 'BR', 'SAN', 'OQ(O)O', 'CHARGE',
'C=C(C)C', 'CSO', 'NN', 'QHAAAQH', 'QHAAQH', 'OSO', 'ON(O)C', 'O HETEROCYCLE', 'QSQ', 'Snot%A%A', 'S=O',
'AS(A)A', 'A$A!A$A', 'N=O', 'A$A!S', 'C%N', 'CC(C)(C)A', 'QS', 'QHQH (&...)', 'QQH', 'QNQ', 'NO', 'OAAO',
'S=A', 'CH3ACH3', 'A!N$A', 'C=C(A)A', 'NAN', 'C=N', 'NAAN', 'NAAAN', 'SA(A)A', 'ACH2QH', 'QAAAA@1', 'NH2',
'CN(C)C', 'CH2QCH2', 'X!A$A', 'S', 'OAAAO', 'QHAACH2A', 'QHAAACH2A', 'OC(N)C', 'QCH3', 'QN', 'NAAO',
'5M RING', 'NAAAO', 'QAAAAA@1', 'C=C', 'ACH2N', '8M RING', 'QO', 'CL', 'QHACH2A', 'A$A($A)$A', 'QA(Q)Q',
'XA(A)A', 'CH3AAACH2A', 'ACH2O', 'NCO', 'NACH2A', 'AA(A)(A)A', 'Onot%A%A', 'CH3CH2A', 'CH3ACH2A',
'CH3AACH2A', 'NAO', 'ACH2CH2A > 1', 'N=A', 'HETEROCYCLIC ATOM > 1 (&...)', 'N HETEROCYCLE', 'AN(A)A',
'OCO', 'QQ', 'AROMATIC RING > 1', 'A!O!A', 'A$A!O > 1 (&...)', 'ACH2AAACH2A', 'ACH2AACH2A',
'QQ > 1 (&...)', 'QH > 1', 'OACH2A', 'A$A!N', 'X (HALOGEN)', 'Nnot%A%A', 'O=A > 1', 'HETEROCYCLE',
'QCH2A > 1 (&...)', 'OH', 'O > 3 (&...)', 'CH3 > 2 (&...)', 'N > 1', 'A$A!O', 'Anot%A%Anot%A',
'6M RING > 1', 'O > 2', 'ACH2CH2A', 'AQ(A)A', 'CH3 > 1', 'A!A$A!A', 'NH', 'OC(C)C', 'QCH2A', 'C=O',
'A!CH2!A', 'NA(A)A', 'C-O', 'C-N', 'O > 1', 'CH3', 'N', 'AROMATIC', '6M RING', 'O', 'RING', 'FRAGMENTS'],
name='maccs_idx')
class ErGFeaturizer(Transformer, Featurizer):
""" Extended Reduced Graph Fingerprints.
Implemented in RDKit."""
def __init__(self, atom_types=0, fuzz_increment=0.3, min_path=1, max_path=15, **kwargs):
super(ErGFeaturizer, self).__init__(**kwargs)
self.atom_types = atom_types
self.fuzz_increment = fuzz_increment
self.min_path = min_path
self.max_path = max_path
self.n_feats = 315
def _transform_mol(self, mol):
return np.array(GetErGFingerprint(mol))
@property
def name(self):
return 'erg'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='erg_fp_idx')
class FeatureInvariantsFeaturizer(Transformer, Featurizer):
""" Feature invariants fingerprints. """
def __init__(self, **kwargs):
super(FeatureInvariantsFeaturizer, self).__init__(**kwargs)
def _transform_mol(self, mol):
return np.array(GetFeatureInvariants(mol))
@property
def name(self):
return 'feat_inv'
@property
def columns(self):
return None
class ConnectivityInvariantsFeaturizer(Transformer, Featurizer):
""" Connectivity invariants fingerprints """
def __init__(self, include_ring_membership=True, **kwargs):
super(ConnectivityInvariantsFeaturizer, self).__init__(self, **kwargs)
self.include_ring_membership = include_ring_membership
raise NotImplementedError # this is a sparse descriptor
def _transform_mol(self, mol):
return np.array(GetConnectivityInvariants(mol))
@property
def name(self):
return 'conn_inv'
@property
def columns(self):
return None
class RDKFeaturizer(Transformer, Featurizer):
""" RDKit fingerprint """
# TODO: finish docstring
def __init__(self, min_path=1, max_path=7, n_feats=2048, n_bits_per_hash=2,
use_hs=True, target_density=0.0, min_size=128,
branched_paths=True, use_bond_types=True, **kwargs):
""" RDK fingerprints
Args:
min_path (int):
minimum number of bonds to include in the subgraphs.
max_path (int):
maximum number of bonds to include in the subgraphs.
n_feats (int):
The number of features to which to fold the fingerprint down. For unfolded, use `-1`.
n_bits_per_hash (int)
number of bits to set per path.
use_hs (bool):
include paths involving Hs in the fingerprint if the molecule has explicit Hs.
target_density (float):
fold the fingerprint until this minimum density has been reached.
min_size (int):
the minimum size the fingerprint will be folded to when trying to reach tgtDensity.
branched_paths (bool):
if set both branched and unbranched paths will be used in the fingerprint.
use_bond_types (bool):
if set both bond orders will be used in the path hashes.
"""
super(RDKFeaturizer, self).__init__(**kwargs)
self.min_path = min_path
self.max_path = max_path
self.n_feats = n_feats
self.n_bits_per_hash = n_bits_per_hash
self.use_hs = use_hs
self.target_density = target_density
self.min_size = min_size
self.branched_paths = branched_paths
self.use_bond_types = use_bond_types
def _transform_mol(self, mol):
return np.array(list(RDKFingerprint(mol, minPath=self.min_path,
maxPath=self.max_path,
fpSize=self.n_feats,
nBitsPerHash=self.n_bits_per_hash,
useHs=self.use_hs,
tgtDensity=self.target_density,
minSize=self.min_size,
branchedPaths=self.branched_paths,
useBondOrder=self.use_bond_types)))
@property
def name(self):
return 'rdkit'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='rdk_fp_idx') | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/descriptors/fingerprints.py | 0.821582 | 0.460107 | fingerprints.py | pypi |
import matplotlib.pyplot as plt
from .. import descriptors
from .. import core
from .. import vis
from ipywidgets import Dropdown, Text, VBox, HBox, Valid, HTML
from IPython import get_ipython
from IPython.display import clear_output, display
class Visualizer(object):
def __init__(self, fper='morgan', smiles='c1ccccc1O', dpi=200):
self.initialize_ipython()
if isinstance(fper, str):
self.fper = descriptors.get(fper)
else:
self.fper = fper
self.smiles_input = Text(smiles, description='smiles')
self.smiles_input.on_submit(self.update_smiles)
self.smiles_input.observe(self.typing)
self.valid = Valid(True)
self.dropdown = Dropdown(options=[], description='bit')
self.dropdown.observe(self.plot)
self.dpi_input = Text(str(dpi), description='dpi')
self.dpi_input.on_submit(self.plot)
self.ui = VBox([
HTML('<h2>Visualizer</h2>'),
HBox([self.smiles_input, self.valid]),
self.dropdown,
self.dpi_input])
self.update_smiles(None)
self.display()
def initialize_ipython(self):
ipython = get_ipython()
try:
ipython.magic('matplotlib inline')
except:
pass
def typing(self, _):
self.valid.visible = False
@property
def dpi(self):
try:
return int(self.dpi_input.value)
except:
return 50
@dpi.setter
def dpi(self, value):
self.dpi_input.value = str(value)
def display(self):
display(self.ui)
def update_smiles(self, _):
try:
self._mol = core.Mol.from_smiles(self.smiles_input.value)
self.valid.value = True
except ValueError:
self.valid.value = False
return
finally:
self.valid.visible = True
return self.calculate()
def calculate(self):
fp = self.fper.transform(self.mol)
self.fp = fp[fp == 1].index
self.fpg = self.fper.grad(self.mol).ix[self.fp]
return self.update_dropdown()
def update_dropdown(self):
self.dropdown.options.append(self.fp[0])
self.dropdown.value = self.fp[0]
self.dropdown.options = self.fp.tolist()
return self.plot(self.dropdown.value)
@property
def mol(self):
return self._mol
@mol.setter
def mol(self, mol):
self._mol = mol
self.smiles_input.value = mol.to_smiles()
self.calculate()
@property
def current_smiles(self):
return self.smiles_input.value
@property
def current_bit(self):
return self.dropdown.value
def plot(self, _):
clear_output()
plt.clf()
plt.rcParams['savefig.dpi'] = self.dpi
vis.plot_weights(self.mol, self.fpg.ix[self.current_bit], quality=4, ax=plt.gca()) | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/interact/desc_vis.py | 0.619817 | 0.283019 | desc_vis.py | pypi |
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
import pandas as pd
class ChemicalObject(object):
""" A mixin for each chemical object in scikit-chem """
@classmethod
def from_super(cls, obj):
"""A method that converts the class of an object of parent class to that of the child. """
obj.__class__ = cls
return obj
class AtomView(object):
""" Atom interface wrapper """
def __init__(self, owner):
self.owner = owner
self.props = AtomPropertyView(self)
def __getitem__(self, index):
from .atom import Atom
return Atom.from_super(self.owner.GetAtomWithIdx(index))
def __len__(self):
return self.owner.GetNumAtoms()
def __iter__(self):
return AtomIterator(self.owner)
def __str__(self):
return str(list(str(atom) for atom in self))
@property
def elements(self):
return pd.Series((atom.element for atom in self), index=self.index)
@property
def atomic_number(self):
return pd.Series((atom.atomic_number for atom in self), index=self.index)
@property
def atomic_mass(self):
return pd.Series((atom.mass for atom in self), index=self.index)
@property
def index(self):
return pd.RangeIndex(len(self), name='atom_idx')
def __repr__(self):
return '<{class_} values="{values}" at {address}>'.format(
class_=self.__class__.__name__,
values=str(self),
address=hex(id(self)))
class AtomIterator(AtomView):
""" Atom iterator """
def __init__(self, owner):
super(AtomIterator, self).__init__(owner)
self._current = 0
self._high = self.owner.GetNumAtoms()
def __next__(self):
if self._current >= self._high:
raise StopIteration
else:
self._current += 1
return self[self._current - 1]
# py2 compat
next = __next__
class View(object):
""" View wrapper interface """
__metaclass__ = ABCMeta
@abstractmethod
def keys(self):
return []
def get(self, index, default=None):
if index in self.keys():
return self[index]
else:
return default
def pop(self, index, default=None):
if default:
val = self.get(index, default)
else:
val = self[index]
self.remove(index)
return val
def clear(self):
for idx in self.keys():
self.remove(idx)
def items(self):
return list((k, self[k]) for k in self.keys())
def remove(self, key):
self.__delitem__(key)
def __getitem__(self, key):
raise NotImplemented
def __setitem__(self, key, value):
raise NotImplemented
def __delitem__(self, key):
raise NotImplemented
def __iter__(self):
return iter(self.keys())
def __str__(self):
return str(dict(self))
def __len__(self):
return len(self.keys())
def __repr__(self):
return '<{klass} values="{values}" at {address}>'.format(
klass=self.__class__.__name__,
values=str(self),
address=hex(id(self)))
class PropertyView(View):
""" Property object wrapper """
def __init__(self, owner):
self._owner = owner
def keys(self):
return list(k for k in self._owner.GetPropNames() if k[:1] != '_')
def __getitem__(self, key):
# we manually work out if it was a float that was stored, as GetProp
# returns floats and ints set by SetDoubleProp and SetIntProp as strings
value = self._owner.GetProp(str(key))
try:
return int(value)
except ValueError:
try:
return float(value)
except ValueError:
return value
def __setitem__(self, key, value):
if not isinstance(key, str):
warnings.warn("RDKit property keys can only be of type `str`. Using `{key}` as a `str`.".format(key=key))
key = str(key)
if key[0] == '_':
warnings.warn("`{value}` is a private RDKit property key. "
"Using this may have unintended consequences.".format(value=value))
if isinstance(value, str):
self._owner.SetProp(key, value)
elif isinstance(value, (int, np.int64, np.int32)):
self._owner.SetIntProp(key, value)
elif isinstance(value, (float, np.float64, np.float32)):
self._owner.SetDoubleProp(key, value)
else:
warnings.warn("RDKit property keys can only be `str`, `int` or `float`."
"Using `{value}` as a `str`.".format(value=value))
self._owner.SetProp(key, str(value))
def __delitem__(self, index):
self._owner.ClearProp(index)
class AtomPropertyView(View):
""" Atom property wrapper """
def __init__(self, atom_view):
self._atom_view = atom_view
def keys(self):
res = set()
for atom in self._atom_view:
res = res.union(set(atom.props.keys()))
return list(res)
def get(self, key, default=None):
return [a.props.get(key, default) for a in self._atom_view]
def __getitem__(self, key):
if key not in self.keys():
raise KeyError('No atoms have the property set.')
return self.get(key, None)
def __setitem__(self, key, value):
assert len(self._atom_view) == len(value), "Must pass same number of values as atoms."
for atom, val in zip(self._atom_view, value):
atom.props[key] = val
def __delitem__(self, key):
for atom in self._atom_view:
atom.props.remove(key) | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/core/base.py | 0.868213 | 0.356251 | base.py | pypi |
import warnings
import tempfile
import os
import pandas as pd
from fuel.datasets import H5PYDataset
from fuel.utils import find_in_data_path
from fuel import config
class Dataset(H5PYDataset):
""" Abstract base class providing an interface to the skchem data format."""
def __init__(self, **kwargs):
kwargs.setdefault('load_in_memory', True)
super(Dataset, self).__init__(
file_or_path=find_in_data_path(self.filename), **kwargs)
@classmethod
def load_set(cls, set_name, sources=()):
""" Load the sources for a single set.
Args:
set_name (str):
The set name.
sources (tuple[str]):
The sources to return data for.
Returns:
tuple[np.array]
The requested sources for the requested set.
"""
if set_name == 'all':
set_name = cls.set_names
else:
set_name = (set_name,)
if sources == 'all':
sources = cls.sources_names
return cls(which_sets=set_name, sources=sources, load_in_memory=True).data_sources
@classmethod
def load_data(cls, sets=(), sources=()):
""" Load a set of sources.
Args:
sets (tuple[str]):
The sets to return data for.
sources:
The sources to return data for.
Example:
(X_train, y_train), (X_test, y_test) = Dataset.load_data(sets=('train', 'test'), sources=('X', 'y'))
"""
for set_name in sets:
yield cls.load_set(set_name, sources)
@classmethod
def read_frame(cls, key, *args, **kwargs):
""" Load a set of features from the dataset as a pandas object.
Args:
key (str):
The HDF5 key for required data. Typically, this will be one of
- structure: for the raw molecules
- smiles: for the smiles
- features/{feat_name}: for the features
- targets/{targ_name}: for the targets
Returns:
pd.Series or pd.DataFrame or pd.Panel
The data as a dataframe.
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
data = pd.read_hdf(find_in_data_path(cls.filename), key, *args, **kwargs)
if isinstance(data, pd.Panel):
data = data.transpose(2, 1, 0)
return data
@classmethod
def download(cls, output_directory=None, download_directory=None):
""" Download the dataset and convert it.
Args:
output_directory (str):
The directory to save the data to. Defaults to the first
directory in the fuel data path.
download_directory (str):
The directory to save the raw files to. Defaults to a temporary
directory.
Returns:
str:
The path of the downloaded and processed dataset.
"""
if not output_directory:
output_directory = config.config['data_path']['yaml'].split(':')[0]
output_directory = os.path.expanduser(output_directory)
if not download_directory:
download_directory = tempfile.mkdtemp()
cls.downloader.download(directory=download_directory)
return cls.converter.convert(directory=download_directory,
output_directory=output_directory) | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/data/datasets/base.py | 0.829699 | 0.334141 | base.py | pypi |
import warnings
import logging
import os
from collections import namedtuple
import numpy as np
import pandas as pd
import h5py
from fuel.datasets import H5PYDataset
from ... import forcefields
from ... import filters
from ... import descriptors
from ... import standardizers
from ... import pipeline
logger = logging.getLogger(__name__)
def default_pipeline():
""" Return a default pipeline to be used for general datasets. """
return pipeline.Pipeline([
standardizers.ChemAxonStandardizer(keep_failed=True, warn_on_fail=False),
forcefields.UFF(add_hs=True, warn_on_fail=False),
filters.OrganicFilter(),
filters.AtomNumberFilter(above=5, below=100, include_hydrogens=True),
filters.MassFilter(below=1000)
])
DEFAULT_PYTABLES_KW = {
'complib': 'bzip2',
'complevel': 9
}
def contiguous_order(to_order, splits):
""" Determine a contiguous order from non-overlapping splits, and put data in that order.
Args:
to_order (iterable<pd.Series, pd.DataFrame, pd.Panel>):
The pandas objects to put in contiguous order.
splits (iterable<pd.Series>):
The non-overlapping splits, as boolean masks.
Returns:
iterable<pd.Series, pd.DataFrame, pd.Panel>: The data in contiguous order.
"""
member = pd.Series(0, index=splits[0].index)
for i, split in enumerate(splits):
member[split] = i
idx = member.sort_values().index
return (order.reindex(idx) for order in to_order)
Feature = namedtuple('Feature', ['fper', 'key', 'axis_names'])
def default_features():
return (
Feature(fper=descriptors.MorganFeaturizer(),
key='X_morg',
axis_names=['batch', 'features']),
Feature(fper=descriptors.PhysicochemicalFeaturizer(),
key='X_pc',
axis_names=['batch', 'features']),
Feature(fper=descriptors.AtomFeaturizer(max_atoms=100),
key='A',
axis_names=['batch', 'atom_idx', 'features']),
Feature(fper=descriptors.GraphDistanceTransformer(max_atoms=100),
key='G',
axis_names=['batch', 'atom_idx', 'atom_idx']),
Feature(fper=descriptors.SpacialDistanceTransformer(max_atoms=100),
key='G_d',
axis_names=['batch', 'atom_idx', 'atom_idx']),
Feature(fper=descriptors.ChemAxonFeaturizer(features='all'),
key='X_cx',
axis_names=['batch', 'features']),
Feature(fper=descriptors.ChemAxonAtomFeaturizer(features='all', max_atoms=100),
key='A_cx',
axis_names=['batch', 'atom_idx', 'features'])
)
class Split(object):
def __init__(self, mask, name, converter):
self.mask = mask
self.name = name
self.converter = converter
@property
def contiguous(self):
diff = np.ediff1d(self.mask.astype(int))
if self.mask.iloc[0] != 0:
diff[0] = 1
if self.mask.iloc[-1] != 0:
diff[-1] = -1
return sum(diff == -1) == 1 or sum(diff == 1) == 1
@property
def indices(self):
return np.nonzero(self.mask)[0]
def save(self):
self.converter.data_file[self.name + '_indices'] = self.indices
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.mask.to_hdf(self.converter.data_file.filename, '/indices/' + self.name)
@property
def ref(self):
return self.converter.data_file[self.name + '_indices'].ref
def to_dict(self):
idx = self.indices
if self.contiguous:
low, high = min(idx), max(idx)
return {source: (low, high) for source in self.converter.source_names}
else:
return {source: (-1, -1, self.ref) for source in self.converter.source_names}
class Converter(object):
""" Create a fuel dataset from molecules and targets. """
def __init__(self, directory, output_directory, output_filename='default.h5'):
raise NotImplemented
def run(self, ms, y, output_path, splits=None, features=None, pytables_kws=DEFAULT_PYTABLES_KW):
"""
Args:
ms (pd.Series):
The molecules of the dataset.
ys (pd.Series or pd.DataFrame):
The target labels of the dataset.
output_path (str):
The path to which the dataset should be saved.
features (list[Feature]):
The features to calculate. Defaults are used if `None`.
splits (iterable<(name, split)>):
An iterable of name, split tuples. Splits are provided as boolean arrays of the whole data.
"""
self.output_path = output_path
self.pytables_kws = pytables_kws
self.features = features if features is not None else default_features()
self.feature_names = [feat.key for feat in self.features]
self.task_names = ['y']
self.splits = [Split(split, name, self) for name, split in splits]
self.create_file(output_path)
self.save_splits()
self.save_molecules(ms)
self.save_targets(y)
self.save_features(ms)
@property
def source_names(self):
return self.feature_names + self.task_names
@property
def split_names(self):
return self.splits
def create_file(self, path):
logger.info('Creating h5 file at %s...', self.output_path)
self.data_file = h5py.File(path, 'w')
return self.data_file
def save_molecules(self, mols):
""" Save the molecules to the data file. """
logger.info('Writing molecules to file...')
logger.debug('Writing %s molecules to %s', len(mols), self.data_file.filename)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
mols.to_hdf(self.data_file.filename, 'structure', **self.pytables_kws)
mols.apply(lambda m: m.to_smiles().encode('utf-8')).to_hdf(self.data_file.filename, 'smiles')
def save_frame(self, data, name, prefix='targets'):
""" Save the a frame to the data file. """
logger.info('Writing %s', name)
logger.debug('Writing data of shape %s to %s', data.shape, self.data_file.filename)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if len(data.shape) > 2:
data = data.transpose(2, 1, 0) # panel serializes backwards for some reason...
data.to_hdf(self.data_file.filename,
key='/{prefix}/{name}'.format(prefix=prefix, name=name),
**self.pytables_kws)
if isinstance(data, pd.Series):
self.data_file[name] = h5py.SoftLink('/{prefix}/{name}/values'.format(prefix=prefix, name=name))
self.data_file[name].dims[0].label = data.index.name
elif isinstance(data, pd.DataFrame):
self.data_file[name] = h5py.SoftLink('/{prefix}/{name}/block0_values'.format(prefix=prefix, name=name))
self.data_file[name].dims[0].label = data.index.name
self.data_file[name].dims[1].label = data.columns.name
elif isinstance(data, pd.Panel):
self.data_file[name] = h5py.SoftLink('/{prefix}/{name}/block0_values'.format(prefix=prefix, name=name))
self.data_file[name].dims[0].label = data.minor_axis.name # as panel serializes backwards
self.data_file[name].dims[1].label = data.major_axis.name
self.data_file[name].dims[2].label = data.items.name
def save_targets(self, y):
self.save_frame(y, name='y', prefix='targets')
def save_features(self, ms):
""" Save all features for the dataset. """
logger.debug('Saving features')
for feat in self.features:
self._save_feature(ms, feat)
def _save_feature(self, ms, feat):
""" Calculate and save a feature to the data file. """
logger.info('Calculating %s', feat.key)
fps = feat.fper.transform(ms)
self.save_frame(fps, name=feat.key, prefix='feats')
def save_splits(self):
""" Save the splits to the data file. """
logger.info('Producing dataset splits...')
for split in self.splits:
split.save()
split_dict = {split.name: split.to_dict() for split in self.splits}
splits = H5PYDataset.create_split_array(split_dict)
logger.debug('split: %s', splits)
logger.info('Saving splits...')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.data_file.attrs['split'] = splits
@classmethod
def convert(cls, **kwargs):
kwargs.setdefault('directory', os.getcwd())
kwargs.setdefault('output_directory', os.getcwd())
return cls(**kwargs).output_path,
@classmethod
def fill_subparser(cls, subparser):
return cls.convert | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/data/converters/base.py | 0.787768 | 0.400046 | base.py | pypi |
import zipfile
import os
import logging
LOGGER = logging.getLogger(__name__)
import numpy as np
import pandas as pd
from .base import Converter, default_pipeline
from ... import io
from ... import core
class Tox21Converter(Converter):
""" Class to build tox21 dataset.
"""
def __init__(self, directory, output_directory, output_filename='tox21.h5'):
output_path = os.path.join(output_directory, output_filename)
# extract data
train, valid, test = self.extract(directory)
# read data
train = self.read_train(train)
valid = self.read_valid(valid)
test = self.read_test(test, os.path.join(directory, 'test.txt'))
# combine into full dataset
data = pd.concat([train, valid, test], keys=['train', 'valid', 'test']).sort_index()
data.index.names = 'ds', 'id'
ms, y = data.structure, data.drop('structure', axis=1)
pipeline = default_pipeline()
ms, y = pipeline.transform_filter(ms, y)
# generate splits
ms, y = ms.reset_index(0), y.reset_index(0)
split_arr = ms.pop('ds')
y.pop('ds')
splits = [(split, split_arr == split) for split in ('train', 'valid', 'test')]
y.columns.name = 'tasks'
# call the Converter to make the final dataset
self.run(ms, y, output_path, splits=splits)
@staticmethod
def fix_id(s):
return s.split('-')[0]
@staticmethod
def fix_assay_name(s):
return s.replace('-', '_')
@staticmethod
def patch_test(test):
test_1 = pd.Series({
'structure': core.Mol.from_smiles('FC(F)(F)c1[nH]c(c(C#N)c1Br)C1=CC=C(Cl)C=C1', name='NCGC00357062'),
'stochiometry': 0,
'Compound ID': 'NCGC00357062',
'Sample ID': 'NCGC00357062-01'}, name='NCGC00357062')
test['NCGC00357062'] = test_1
return test
def read_train(self, train):
train = io.read_sdf(train)
train.columns = train.columns.to_series().apply(self.fix_assay_name)
train.index = train.index.to_series().apply(self.fix_id)
self.assays = train.columns[-12:]
self.keep_cols = ['structure'] + self.assays.tolist()
train[self.assays] = train[self.assays].astype(float)
train = train[self.keep_cols]
train = train.sort_index()
ms = train.structure[~train.index.duplicated()]
train = train[self.assays].groupby(train.index).max()
train = ms.to_frame().join(train)
return train
def read_valid(self, valid):
valid = io.read_sdf(valid)
valid.columns = valid.columns.to_series().apply(self.fix_assay_name)
valid = valid[self.keep_cols]
valid[self.assays] = valid[self.assays].astype(float)
return valid
def read_test(self, test, test_data):
test = io.read_sdf(test)
test = self.patch_test(test)
test_data = pd.read_table(test_data)
test_data['Sample ID'] = test_data['Sample ID'].apply(self.fix_id)
test = test.join(test_data.set_index('Sample ID'))
test.columns = test.columns.to_series().apply(self.fix_assay_name)
test = test[self.keep_cols]
test[test == 'x'] = np.nan
test[self.assays] = test[self.assays].astype(float)
return test
def extract(self, directory):
with zipfile.ZipFile(os.path.join(directory, 'train.sdf.zip')) as f:
train = f.extract('tox21_10k_data_all.sdf')
with zipfile.ZipFile(os.path.join(directory, 'valid.sdf.zip')) as f:
valid = f.extract('tox21_10k_challenge_test.sdf')
with zipfile.ZipFile(os.path.join(directory, 'test.sdf.zip')) as f:
test = f.extract('tox21_10k_challenge_score.sdf')
return train, valid, test
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
LOGGER.info('Converting Tox21 Dataset...')
Tox21Converter.convert() | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/data/converters/tox21.py | 0.53048 | 0.389779 | tox21.py | pypi |
import os
import logging
import itertools
from collections import defaultdict
import pandas as pd
import numpy as np
from sklearn import metrics
from .base import Converter, default_pipeline, contiguous_order
from ... import io
from ... import utils
from ...cross_validation import SimThresholdSplit
LOGGER = logging.getLogger(__file__)
class NMRShiftDB2Converter(Converter):
def __init__(self, directory, output_directory, output_filename='nmrshiftdb2.h5'):
output_path = os.path.join(output_directory, output_filename)
input_path = os.path.join(directory, 'nmrshiftdb2.sdf')
data = self.parse_data(input_path)
ys = self.get_spectra(data)
ys = self.process_spectra(ys)
ys = self.combine_duplicates(ys)
self.log_dists(ys)
self.log_duplicates(ys)
ys = self.squash_duplicates(ys)
c13s = self.to_frame(ys.loc[ys['13c'].notnull(), '13c'])
data = data[['structure']].join(c13s, how='right')
ms, y = data.structure, data.drop('structure', axis=1)
pipeline = default_pipeline()
ms, y = pipeline.transform_filter(ms, y)
y.columns.name = 'shifts'
cv = SimThresholdSplit(ms, min_threshold=0.6, block_width=4000, n_jobs=-1)
train, valid, test = cv.split((70, 15, 15))
(ms, y, train, valid, test) = contiguous_order((ms, y, train, valid, test), (train, valid, test))
splits = (('train', train), ('valid', valid), ('test', test))
self.run(ms, y, output_path=output_path, splits=splits)
@staticmethod
def parse_data(filepath):
""" Reads the raw datafile. """
LOGGER.info('Reading file: %s', filepath)
data = io.read_sdf(filepath, removeHs=False, warn_bad_mol=False)
data.index = data['nmrshiftdb2 ID'].astype(int)
data.index.name = 'nmrshiftdb2_id'
data.columns = data.columns.to_series().apply(utils.free_to_snail)
data = data.sort_index()
LOGGER.info('Read %s molecules.', len(data))
return data
@staticmethod
def get_spectra(data):
""" Retrieves spectra from raw data. """
LOGGER.info('Retrieving spectra from raw data...')
isotopes = [
'1h',
'11b',
'13c',
'15n',
'17o',
'19f',
'29si',
'31p',
'33s',
'73ge',
'195pt'
]
def is_spectrum(col_name, ele='c'):
return any(isotope in col_name for isotope in isotopes)
spectrum_cols = [c for c in data if is_spectrum(c)]
data = data[spectrum_cols]
def index_pair(s):
return s[0], int(s[1])
data.columns = pd.MultiIndex.from_tuples([index_pair(i.split('_')[1:]) for i in data.columns])
return data
@staticmethod
def process_spectra(data):
""" Turn the string representations found in sdf file into a dictionary. """
def spectrum_dict(spectrum_string):
if not isinstance(spectrum_string, str):
return np.nan # no spectra are still nan
if spectrum_string == '':
return np.nan # empty spectra are nan
sigs = spectrum_string.strip().strip('|').strip().split('|') # extract signals
sig_tup = [tuple(s.split(';')) for s in sigs] # take tuples as (signal, coupling, atom)
return {int(s[2]): float(s[0]) for s in sig_tup} # make spectrum a dictionary of atom to signal
return data.applymap(spectrum_dict)
@staticmethod
def combine_duplicates(data):
""" Collect duplicate spectra into one dictionary. All shifts are collected into lists. """
def aggregate_dicts(ds):
res = defaultdict(list)
for d in ds:
if not isinstance(d, dict): continue
for k, v in d.items():
res[k].append(v)
return dict(res) if len(res) else np.nan
return data.groupby(level=0, axis=1).apply(lambda s: s.apply(aggregate_dicts, axis=1))
@staticmethod
def squash_duplicates(data):
""" Take the mean of all the duplicates. This is where we could do a bit more checking. """
def squash(d):
if not isinstance(d, dict):
return np.nan
else:
return {k: np.mean(v) for k, v in d.items()}
return data.applymap(squash)
@staticmethod
def to_frame(data):
""" Convert a series of dictionaries to a dataframe. """
res = pd.DataFrame(data.tolist(), index=data.index)
res.columns.name = 'atom_idx'
return res
@staticmethod
def extract_duplicates(data, kind='13c'):
""" Get all 13c duplicates. """
def is_duplicate(ele):
if not isinstance(ele, dict):
return False
else:
return len(list(ele.values())[0]) > 1
return data.loc[data[kind].apply(is_duplicate), kind]
@staticmethod
def log_dists(data):
def n_spect(ele):
return isinstance(ele, dict)
def n_shifts(ele):
return len(ele) if isinstance(ele, dict) else 0
def log_message(func):
return ' '.join('{k}: {v}'.format(k=k, v=v) for k, v in data.applymap(func).sum().to_dict().items())
LOGGER.info('Number of spectra: %s', log_message(n_spect))
LOGGER.info('Extracted shifts: %s', log_message(n_shifts))
def log_duplicates(self, data):
for kind in '1h', '13c':
dups = self.extract_duplicates(data, kind)
LOGGER.info('Number of duplicate %s spectra: %s', kind, len(dups))
res = pd.DataFrame(sum((list(itertools.combinations(l, 2)) for s in dups for k, l in s.items()), []))
LOGGER.info('Number of duplicate %s pairs: %f', kind, len(res))
LOGGER.info('MAE for duplicate %s: %.4f', kind, metrics.mean_absolute_error(res[0], res[1]))
LOGGER.info('MSE for duplicate %s: %.4f', kind, metrics.mean_squared_error(res[0], res[1]))
LOGGER.info('r2 for duplicate %s: %.4f', kind, metrics.r2_score(res[0], res[1]))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
LOGGER.info('Converting NMRShiftDB2 Dataset...')
NMRShiftDB2Converter.convert() | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/data/converters/nmrshiftdb2.py | 0.657098 | 0.379838 | nmrshiftdb2.py | pypi |
import os
import zipfile
import logging
LOGGER = logging.getLogger(__name__)
import pandas as pd
import numpy as np
import skchem
from .base import Converter
from ... import standardizers
PATCHES = {
'820-75-7': r'NNC(=O)CNC(=O)C=[N+]=[N-]',
'2435-76-9': r'[N-]=[N+]=C1C=NC(=O)NC1=O',
'817-99-2': r'NC(=O)CNC(=O)\C=[N+]=[N-]',
'116539-70-9': r'CCCCN(CC(O)C1=C\C(=[N+]=[N-])\C(=O)C=C1)N=O',
'115-02-6': r'NC(COC(=O)\C=[N+]=[N-])C(=O)O',
'122341-55-3': r'NC(COC(=O)\C=[N+]=[N-])C(=O)O'
}
class MullerAmesConverter(Converter):
def __init__(self, directory, output_directory, output_filename='muller_ames.h5'):
"""
Args:
directory (str):
Directory in which input files reside.
output_directory (str):
Directory in which to save the converted dataset.
output_filename (str):
Name of the saved dataset. Defaults to `muller_ames.h5`.
Returns:
tuple of str:
Single-element tuple containing the path to the converted dataset.
"""
zip_path = os.path.join(directory, 'ci900161g_si_001.zip')
output_path = os.path.join(output_directory, output_filename)
with zipfile.ZipFile(zip_path) as f:
f.extractall()
# create dataframe
data = pd.read_csv(os.path.join(directory, 'smiles_cas_N6512.smi'),
delimiter='\t', index_col=1,
converters={1: lambda s: s.strip()},
header=None, names=['structure', 'id', 'is_mutagen'])
data = self.patch_data(data, PATCHES)
data['structure'] = data.structure.apply(skchem.Mol.from_smiles)
data = self.standardize(data)
data = self.optimize(data)
keep = self.filter(data)
ms, ys = keep.structure, keep.is_mutagen
indices = data.reset_index().index.difference(keep.reset_index().index)
train = self.parse_splits(os.path.join('splits_train_N6512.csv'))
train = self.drop_indices(train, indices)
splits = self.create_split_dict(train, 'train')
test = self.parse_splits(os.path.join(directory, 'splits_test_N6512.csv'))
test = self.drop_indices(test, indices)
splits.update(self.create_split_dict(test, 'test'))
self.run(ms, ys, output_path, splits=splits)
def patch_data(self, data, patches):
""" Patch smiles in a DataFrame with rewritten ones that specify diazo
groups in rdkit friendly way. """
LOGGER.info('Patching data...')
for cas, smiles in patches.items():
data.loc[cas, 'structure'] = smiles
return data
def parse_splits(self, f_path):
LOGGER.info('Parsing splits...')
with open(f_path) as f:
splits = [split for split in f.read().strip().splitlines()]
splits = [[n for n in split.strip().split(',')] for split in splits]
splits = [sorted(int(n) for n in split) for split in splits] # sorted ints
return [np.array(split) - 1 for split in splits] # zero based indexing
def drop_indices(self, splits, indices):
LOGGER.info('Dropping failed compounds from split indices...')
for i, split in enumerate(splits):
split = split - sum(split > ix for ix in indices)
splits[i] = np.delete(split, indices)
return splits
def create_split_dict(self, splits, name):
return {'{}_{}'.format(name, i + 1): split \
for i, split in enumerate(splits)}
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
LOGGER.info('Converting Muller Ames Dataset...')
MullerAmesConverter.convert() | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/data/converters/muller_ames.py | 0.512937 | 0.327319 | muller_ames.py | pypi |
import os
import zipfile
import logging
LOGGER = logging.getLogger(__name__)
import pandas as pd
import numpy as np
from ... import io
from .base import Converter, contiguous_order
from ...cross_validation import SimThresholdSplit
TXT_COLUMNS = [l.lower() for l in """CAS
Formula
Mol_Weight
Chemical_Name
WS
WS_temp
WS_type
WS_reference
LogP
LogP_temp
LogP_type
LogP_reference
VP
VP_temp
VP_type
VP_reference
DC_pKa
DC_temp
DC_type
DC_reference
henry_law Constant
HL_temp
HL_type
HL_reference
OH
OH_temp
OH_type
OH_reference
BP_pressure
MP
BP
FP""".split('\n')]
class PhysPropConverter(Converter):
def __init__(self, directory, output_directory, output_filename='physprop.h5'):
output_path = os.path.join(output_directory, output_filename)
sdf, txt = self.extract(directory)
mols, data = self.process_sdf(sdf), self.process_txt(txt)
LOGGER.debug('Compounds with data extracted: %s', len(data))
data = mols.to_frame().join(data)
data = self.drop_inconsistencies(data)
y = self.process_targets(data)
LOGGER.debug('Compounds with experimental: %s', len(y))
data = data.ix[y.index]
data.columns.name = 'targets'
ms, y = data.structure, data.drop('structure', axis=1)
cv = SimThresholdSplit(ms, min_threshold=0.6, block_width=4000, n_jobs=-1)
train, valid, test = cv.split((70, 15, 15))
(ms, y, train, valid, test) = contiguous_order((ms, y, train, valid, test), (train, valid, test))
splits = (('train', train), ('valid', valid), ('test', test))
self.run(ms, y, output_path=output_path, splits=splits)
def extract(self, directory):
LOGGER.info('Extracting from %s', directory)
with zipfile.ZipFile(os.path.join(directory, 'phys_sdf.zip')) as f:
sdf = f.extract('PhysProp.sdf')
with zipfile.ZipFile(os.path.join(directory, 'phys_txt.zip')) as f:
txt = f.extract('PhysProp.txt')
return sdf, txt
def process_sdf(self, path):
LOGGER.info('Processing sdf at %s', path)
mols = io.read_sdf(path, read_props=False).structure
mols.index = mols.apply(lambda m: m.GetProp('CAS'))
mols.index.name = 'cas'
LOGGER.debug('Structures extracted: %s', len(mols))
return mols
def process_txt(self, path):
LOGGER.info('Processing txt at %s', path)
data = pd.read_table(path, header=None, engine='python').iloc[:, :32]
data.columns = TXT_COLUMNS
data_types = data.columns[[s.endswith('_type') for s in data.columns]]
data[data_types] = data[data_types].fillna('NAN')
data = data.set_index('cas')
return data
def drop_inconsistencies(self, data):
LOGGER.info('Dropping inconsistent data...')
formula = data.structure.apply(lambda m: m.to_formula())
LOGGER.info('Inconsistent compounds: %s', (formula != data.formula).sum())
data = data[formula == data.formula]
return data
def process_targets(self, data):
LOGGER.info('Dropping estimated data...')
data = pd.concat([self.process_logS(data),
self.process_logP(data),
self.process_mp(data),
self.process_bp(data)], axis=1)
LOGGER.info('Dropped compounds: %s', data.isnull().all(axis=1).sum())
data = data[data.notnull().any(axis=1)]
LOGGER.debug('Compounds with experimental activities: %s', len(data))
return data
def process_logS(self, data):
cleaned = pd.DataFrame(index=data.index)
S = 0.001 * data.ws / data.mol_weight
logS = np.log10(S)
return logS[data.ws_type == 'EXP']
def process_logP(self, data):
logP = data.logp[data.logp_type == 'EXP']
return logP[logP > -10]
def process_mp(self, data):
return data.mp.apply(self.fix_temp)
def process_bp(self, data):
return data.bp.apply(self.fix_temp)
@staticmethod
def fix_temp(s, mean_range=5):
try:
return float(s)
except ValueError:
if '<' in s or '>' in s:
return np.nan
s = s.strip(' dec')
s = s.strip(' sub')
if '-' in s and mean_range:
rng = [float(n) for n in s.split('-')]
if len(rng) > 2:
return np.nan
if np.abs(rng[1] - rng[0]) < mean_range:
return (rng[0] + rng[1])/2
try:
return float(s)
except ValueError:
return np.nan
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
LOGGER.info('Converting PhysProp Dataset...')
PhysPropConverter.convert() | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/data/converters/physprop.py | 0.417865 | 0.346514 | physprop.py | pypi |
import os
import logging
logger = logging.getLogger(__name__)
import pandas as pd
from .base import Converter, default_pipeline, contiguous_order
from ...core import Mol
from ...cross_validation import SimThresholdSplit
class BradleyOpenMPConverter(Converter):
def __init__(self, directory, output_directory, output_filename='bradley_open_mp.h5'):
output_path = os.path.join(output_directory, output_filename)
data = self.parse_data(os.path.join(directory, 'bradley_melting_point_dataset.xlsx'))
data = self.filter_bad(data)
def parse_smiles(smi):
try:
return Mol.from_smiles(smi)
except ValueError:
return None
data['structure'] = data.smiles.apply(parse_smiles)
data = data[data.structure.notnull()]
ms, y = data.structure, self.fix_mp(data)
pipeline = default_pipeline()
ms, y = pipeline.transform_filter(ms, y)
cv = SimThresholdSplit(ms, min_threshold=0.6, n_jobs=-1)
train, valid, test = cv.split((70, 15, 15))
(ms, y, train, valid, test) = contiguous_order((ms, y, train, valid, test), (train, valid, test))
splits = (('train', train), ('valid', valid), ('test', test))
self.run(ms, y, output_path=output_path, splits=splits)
@staticmethod
def parse_data(path):
logger.info('Parsing data at %s...', path)
return pd.read_excel(path, index_col=0)
@staticmethod
def filter_bad(data):
logger.info('Removing manually annotated errors...')
bad_data = data.donotuse.notnull()
logger.debug('Removed %s', bad_data.sum())
return data[~bad_data]
@staticmethod
def fix_mp(data):
logger.info('Converting temperature to Kelvin...')
return data.mpC + 278.15
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
LOGGER.info('Converting Bradley Open Melting Point Dataset...')
BradleyOpenMPConverter.convert() | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/data/converters/bradley_open_mp.py | 0.514888 | 0.241775 | bradley_open_mp.py | pypi |
from functools import wraps
import warnings
from rdkit import Chem
import pandas as pd
from ..core import Mol
from ..utils import Suppressor, squeeze
def _drop_props(row):
for prop in row.structure.props.keys():
row.structure.ClearProp(prop)
def _set_props(row, cols):
for i in cols:
row.structure.SetProp(str(i), str(row[i])) # rdkit props can only be str
def _set_name(row):
row.structure.name = str(row.name) # rdkit props can only be strs
def read_sdf(sdf, error_bad_mol=False, warn_bad_mol=True, nmols=None,
skipmols=None, skipfooter=None, read_props=True, mol_props=False,
*args, **kwargs):
"""Read an sdf file into a `pd.DataFrame`.
The function wraps the RDKit `ForwardSDMolSupplier` object.
Args:
sdf (str or file-like):
The location of data to load, as a file path, or a file-like object.
error_bad_mol (bool):
Whether an error should be raised if a molecule fails to parse.
Default is False.
warn_bad_mol (bool):
Whether a warning should be output if a molecule fails to parse.
Default is True.
nmols (int):
The number of molecules to read. If `None`, read all molecules.
Default is `None`.
skipmols (int):
The number of molecules to skip at start.
Default is `0`.
skipfooter (int):
The number of molecules to skip from the end.
Default is `0`.
read_props (bool):
Whether to read the properties into the data frame.
Default is `True`.
mol_props (bool):
Whether to keep properties in the molecule dictionary after they are
extracted to the dataframe.
Default is `False`.
args, kwargs:
Arguments will be passed to rdkit's ForwardSDMolSupplier.
Returns:
pandas.DataFrame:
The loaded data frame, with Mols supplied in the `structure` field.
See also:
rdkit.Chem.SDForwardMolSupplier
skchem.read_smiles
"""
# nmols is actually the index to cutoff. If we skip some at start, we need
# to add this number
if skipmols:
nmols += skipmols
if isinstance(sdf, str):
sdf = open(sdf, 'rb') # use read bytes for python 3 compatibility
# use the suppression context manager to not pollute our stdout with rdkit
# errors and warnings.
# perhaps this should be captured better by Mol etc.
with Suppressor():
mol_supp = Chem.ForwardSDMolSupplier(sdf, *args, **kwargs)
mols = []
# single loop through sdf
for i, mol in enumerate(mol_supp):
if skipmols and i < skipmols:
continue
if nmols and i >= nmols:
break
# rdkit returns None if it fails to parse a molecule. We will raise
# errors unless force is used.
if mol is None:
msg = 'Molecule {} could not be decoded.'.format(i + 1)
if error_bad_mol:
raise ValueError(msg)
elif warn_bad_mol:
warnings.warn(msg)
continue
mols.append(Mol(mol))
if skipfooter:
mols = mols[:-skipfooter]
idx = pd.Index((m.name for m in mols), name='name')
data = pd.DataFrame(mols, columns=['structure'])
if read_props:
props = pd.DataFrame([{k: v for (k, v) in mol.props.items()} for mol in mols])
data = pd.concat([data, props], axis=1)
# now we have extracted the props, we can delete if required
if not mol_props:
data.apply(_drop_props, axis=1)
data.index = idx
return squeeze(data, axis=1)
def write_sdf(data, sdf, write_cols=True, index_as_name=True, mol_props=False,
*args, **kwargs):
""" Write an sdf file from a dataframe.
Args:
data (pandas.Series or pandas.DataFrame):
Pandas data structure with a `structure` column containing compounds
to serialize.
sdf (str or file-like):
A file path or file-like object specifying where to write the
compound data.
write_cols (bool):
Whether columns should be written as props. Default `True`.
index_as_name (bool):
Whether to use index as the header, or the molecule's name.
Default is `True`.
mol_props (bool):
Whether to write properties in the Mol dictionary in addition to
fields in the frame.
Warn:
This function will change the names of the compounds if the
`index_as_name` argument is `True`, and will delete all properties in
the molecule dictionary if `mol_props` is `False`.
"""
if isinstance(data, pd.Series):
data = data.to_frame(name='structure')
names = [m.name for m in data.structure]
writer = Chem.SDWriter(sdf, *args, **kwargs)
cols = list(data.columns.drop('structure'))
if not mol_props:
data.apply(_drop_props, axis=1)
if write_cols:
data.apply(_set_props, cols=cols, axis=1)
if index_as_name:
data.apply(_set_name, axis=1)
data.structure.apply(writer.write)
# rdkit writer changes names sometimes
for mol, name in zip(data.structure, names):
mol.name = name
@wraps(write_sdf)
def _to_sdf_series(self, *args, **kwargs):
return write_sdf(self, write_cols=False, *args, **kwargs)
@wraps(write_sdf)
def _to_sdf_df(self, *args, **kwargs):
return write_sdf(self, *args, **kwargs)
pd.Series.to_sdf = _to_sdf_series
pd.DataFrame.to_sdf = _to_sdf_df
@classmethod
@wraps(read_sdf)
def _from_sdf_df(_, *args, **kwargs):
return read_sdf(*args, **kwargs)
pd.DataFrame.from_sdf = _from_sdf_df
@classmethod
@wraps(read_sdf)
def _from_sdf_series(_, *args, **kwargs):
return read_sdf(*args, **kwargs).structure
pd.Series.from_sdf = _from_sdf_series | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/io/sdf.py | 0.749271 | 0.449574 | sdf.py | pypi |
from collections import Counter
import numpy as np
import pandas as pd
from ..resource import ORGANIC, PERIODIC_TABLE
from .base import Filter
class ElementFilter(Filter):
""" Filter by elements.
Args:
elements (list[str]):
A list of elements to filter with. If an element not in the list is
found in a molecule, return False, else return True.
as_bits (bool):
Whether to return integer counts or booleans for atoms if mode is `count`.
Examples:
Basic usage on molecules:
>>> import skchem
>>> has_halogen = skchem.filters.ElementFilter(['F', 'Cl', 'Br', 'I'], agg='any')
Molecules with one of the atoms transform to `True`.
>>> m1 = skchem.Mol.from_smiles('ClC(Cl)Cl', name='chloroform')
>>> has_halogen.transform(m1)
True
Molecules with none of the atoms transform to `False`.
>>> m2 = skchem.Mol.from_smiles('CC', name='ethane')
>>> has_halogen.transform(m2)
False
Can see the atom breakdown by passing `agg` == `False`:
>>> has_halogen.transform(m1, agg=False)
has_element
F 0
Cl 3
Br 0
I 0
Name: ElementFilter, dtype: int64
Can transform series.
>>> ms = [m1, m2]
>>> has_halogen.transform(ms)
chloroform True
ethane False
dtype: bool
>>> has_halogen.transform(ms, agg=False)
has_element F Cl Br I
chloroform 0 3 0 0
ethane 0 0 0 0
Can also filter series:
>>> has_halogen.filter(ms)
chloroform <Mol: ClC(Cl)Cl>
Name: structure, dtype: object
>>> has_halogen.filter(ms, neg=True)
ethane <Mol: CC>
Name: structure, dtype: object
"""
def __init__(self, elements=None, as_bits=False, **kwargs):
self.elements = elements
self.as_bits = as_bits
super(ElementFilter, self).__init__(**kwargs)
@property
def elements(self):
return self._elements
@elements.setter
def elements(self, val):
if val is None:
self._elements = PERIODIC_TABLE.symbol.tolist()
else:
self._elements = val
@property
def columns(self):
return pd.Index(self.elements, name='has_element')
def _transform_mol(self, mol):
counter = Counter(atom.element for atom in mol.atoms)
res = pd.Series(counter)
res = res[self.elements].fillna(0).astype(int)
if self.as_bits:
res = (res > 0).astype(np.uint8)
return res
class OrganicFilter(ElementFilter):
""" Whether a molecule is organic.
For the purpose of this function, an organic molecule is defined as having
atoms with elements only in the set H, B, C, N, O, F, P, S, Cl, Br, I.
Args:
mol (skchem.Mol):
The molecule to be tested.
Returns:
bool:
Whether the molecule is organic.
Examples:
Basic usage as a function on molecules:
>>> import skchem
>>> of = skchem.filters.OrganicFilter()
>>> benzene = skchem.Mol.from_smiles('c1ccccc1', name='benzene')
>>> of.transform(benzene)
True
>>> ferrocene = skchem.Mol.from_smiles('[cH-]1cccc1.[cH-]1cccc1.[Fe+2]',
... name='ferrocene')
>>> of.transform(ferrocene)
False
More useful on collections:
>>> sa = skchem.Mol.from_smiles('CC(=O)[O-].[Na+]', name='sodium acetate')
>>> norbornane = skchem.Mol.from_smiles('C12CCC(C2)CC1', name='norbornane')
>>> data = [benzene, ferrocene, norbornane, sa]
>>> of.transform(data)
benzene True
ferrocene False
norbornane True
sodium acetate False
dtype: bool
>>> of.filter(data)
benzene <Mol: c1ccccc1>
norbornane <Mol: C1CC2CCC1C2>
Name: structure, dtype: object
>>> of.filter(data, neg=True)
ferrocene <Mol: [Fe+2].c1cc[cH-]c1.c1cc[cH-]c1>
sodium acetate <Mol: CC(=O)[O-].[Na+]>
Name: structure, dtype: object
"""
def __init__(self):
super(OrganicFilter, self).__init__(elements=None, agg='not any')
self.elements = [element for element in self.elements if element not in ORGANIC]
def n_atoms(mol, above=2, below=75, include_hydrogens=False):
""" Whether the number of atoms in a molecule falls in a defined interval.
``above <= n_atoms < below``
Args:
mol: (skchem.Mol):
The molecule to be tested.
above (int):
The lower threshold number of atoms (exclusive).
Defaults to None.
below (int):
The higher threshold number of atoms (inclusive).
Defaults to None.
Returns:
bool:
Whether the molecule has more atoms than the threshold.
Examples:
Basic usage as a function on molecules:
>>> import skchem
>>> m = skchem.Mol.from_smiles('c1ccccc1') # benzene has 6 atoms.
Lower threshold:
>>> skchem.filters.n_atoms(m, above=3)
True
>>> skchem.filters.n_atoms(m, above=8)
False
Higher threshold:
>>> skchem.filters.n_atoms(m, below=8)
True
>>> skchem.filters.n_atoms(m, below=3)
False
Bounds work like Python slices - inclusive lower, exclusive upper:
>>> skchem.filters.n_atoms(m, above=6)
True
>>> skchem.filters.n_atoms(m, below=6)
False
Both can be used at once:
>>> skchem.filters.n_atoms(m, above=3, below=8)
True
Can include hydrogens:
>>> skchem.filters.n_atoms(m, above=3, below=8, include_hydrogens=True)
False
>>> skchem.filters.n_atoms(m, above=9, below=14, include_hydrogens=True)
True
"""
assert above < below, 'Interval {} < a < {} undefined.'.format(above, below)
n_a = len(mol.atoms)
if include_hydrogens:
n_a += sum(atom.GetNumImplicitHs() + atom.GetNumExplicitHs() for atom in mol.atoms)
return above <= n_a < below
class AtomNumberFilter(Filter):
"""Filter for whether the number of atoms in a molecule falls in a defined interval.
``above <= n_atoms < below``
Args:
above (int):
The lower threshold number of atoms (exclusive).
Defaults to None.
below (int):
The higher threshold number of atoms (inclusive).
Defaults to None.
Examples:
>>> import skchem
>>> data = [
... skchem.Mol.from_smiles('CC', name='ethane'),
... skchem.Mol.from_smiles('CCCC', name='butane'),
... skchem.Mol.from_smiles('NC(C)C(=O)O', name='alanine'),
... skchem.Mol.from_smiles('C12C=CC(C=C2)C=C1', name='barrelene')
... ]
>>> af = skchem.filters.AtomNumberFilter(above=3, below=7)
>>> af.transform(data)
ethane False
butane True
alanine True
barrelene False
Name: num_atoms_in_range, dtype: bool
>>> af.filter(data)
butane <Mol: CCCC>
alanine <Mol: CC(N)C(=O)O>
Name: structure, dtype: object
>>> af = skchem.filters.AtomNumberFilter(above=5, below=15, include_hydrogens=True)
>>> af.transform(data)
ethane True
butane True
alanine True
barrelene False
Name: num_atoms_in_range, dtype: bool
"""
def __init__(self, above=3, below=60, include_hydrogens=False, **kwargs):
assert above < below, 'Interval {} < a < {} undefined.'.format(above, below)
self.above = above
self.below = below
self.include_hydrogens = include_hydrogens
super(AtomNumberFilter, self).__init__(**kwargs)
def _transform_mol(self, mol):
return n_atoms(mol, above=self.above, below=self.below, include_hydrogens=self.include_hydrogens)
@property
def columns(self):
return pd.Index(['num_atoms_in_range'])
def mass(mol, above=10, below=900):
""" Whether a the molecular weight of a molecule is lower than a threshold.
``above <= mass < below``
Args:
mol: (skchem.Mol):
The molecule to be tested.
above (float):
The lower threshold on the mass.
Defaults to None.
below (float):
The higher threshold on the mass.
Defaults to None.
Returns:
bool:
Whether the mass of the molecule is lower than the threshold.
Examples:
Basic usage as a function on molecules:
>>> import skchem
>>> m = skchem.Mol.from_smiles('c1ccccc1') # benzene has M_r = 78.
>>> skchem.filters.mass(m, above=70)
True
>>> skchem.filters.mass(m, above=80)
False
>>> skchem.filters.mass(m, below=80)
True
>>> skchem.filters.mass(m, below=70)
False
>>> skchem.filters.mass(m, above=70, below=80)
True
"""
return above <= mol.mass < below
class MassFilter(Filter):
""" Filter whether a the molecular weight of a molecule is lower than a threshold.
``above <= mass < below``
Args:
mol: (skchem.Mol):
The molecule to be tested.
above (float):
The lower threshold on the mass.
Defaults to None.
below (float):
The higher threshold on the mass.
Defaults to None.
Examples:
>>> import skchem
>>> data = [
... skchem.Mol.from_smiles('CC', name='ethane'),
... skchem.Mol.from_smiles('CCCC', name='butane'),
... skchem.Mol.from_smiles('NC(C)C(=O)O', name='alanine'),
... skchem.Mol.from_smiles('C12C=CC(C=C2)C=C1', name='barrelene')
... ]
>>> mf = skchem.filters.MassFilter(above=31, below=100)
>>> mf.transform(data)
ethane False
butane True
alanine True
barrelene False
Name: mass_in_range, dtype: bool
>>> mf.filter(data)
butane <Mol: CCCC>
alanine <Mol: CC(N)C(=O)O>
Name: structure, dtype: object
"""
def __init__(self, above=3, below=900, **kwargs):
assert above < below, 'Interval {} < a < {} undefined.'.format(above, below)
self.above = above
self.below = below
super(MassFilter, self).__init__( **kwargs)
def _transform_mol(self, mol):
return mass(mol, above=self.above, below=self.below)
@property
def columns(self):
return pd.Index(['mass_in_range']) | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/filters/simple.py | 0.896438 | 0.505188 | simple.py | pypi |
from rdkit import RDConfig
import os
import pandas as pd
from .base import Filter
from ..core import Mol
class SMARTSFilter(Filter):
""" Filter a molecule based on smarts.
Args:
smarts (pd.Series):
A series of SMARTS to use in the filter.
agg (function):
Option specifying the mode of the filter.
- None : No filtering takes place
- any: If any of the substructures are in molecule return True.
- all: If all of the substructures are in molecule.
Examples:
>>> import skchem
>>> data = [
... skchem.Mol.from_smiles('CC', name='ethane'),
... skchem.Mol.from_smiles('c1ccccc1', name='benzene'),
... skchem.Mol.from_smiles('c1ccccc1-c2c(C=O)ccnc2', name='big')
... ]
>>> f = skchem.filters.SMARTSFilter({'benzene': 'c1ccccc1', 'pyridine': 'c1ccccn1', 'acetyl': 'C=O'}, agg='any')
>>> f.transform(data, agg=False)
acetyl benzene pyridine
ethane False False False
benzene False True False
big True True True
>>> f.transform(data)
ethane False
benzene True
big True
dtype: bool
>>> f.filter(data)
benzene <Mol: c1ccccc1>
big <Mol: O=Cc1ccncc1-c1ccccc1>
Name: structure, dtype: object
>>> f.agg = all
>>> f.filter(data)
big <Mol: O=Cc1ccncc1-c1ccccc1>
Name: structure, dtype: object
"""
def __init__(self, smarts, **kwargs):
def read_smarts(s):
if isinstance(s, str):
return Mol.from_smarts(s, mergeHs=True)
else:
return s
self.smarts = pd.Series(smarts).apply(read_smarts)
super(SMARTSFilter, self).__init__(**kwargs)
def _transform_mol(self, mol):
return self.smarts.apply(lambda smarts: smarts in mol).values
@property
def columns(self):
return self.smarts.index
class PAINSFilter(SMARTSFilter):
""" Whether a molecule passes the Pan Assay INterference (PAINS) filters.
These are supplied with RDKit, and were originally proposed by Baell et al.
References:
[The original paper](http://dx.doi.org/10.1021/jm901137j)
Examples:
Basic usage as a function on molecules:
>>> import skchem
>>> benzene = skchem.Mol.from_smiles('c1ccccc1', name='benzene')
>>> pf = skchem.filters.PAINSFilter()
>>> pf.transform(benzene)
True
>>> catechol = skchem.Mol.from_smiles('Oc1c(O)cccc1', name='catechol')
>>> pf.transform(catechol)
False
>>> res = pf.transform(catechol, agg=False)
>>> res[res]
names
catechol_A(92) True
Name: PAINSFilter, dtype: bool
More useful in combination with pandas DataFrames:
>>> data = [benzene, catechol]
>>> pf.transform(data)
benzene True
catechol False
dtype: bool
>>> pf.filter(data)
benzene <Mol: c1ccccc1>
Name: structure, dtype: object
"""
def __init__(self):
super(PAINSFilter, self).__init__(self._load_pains(), agg='not any')
def _load_pains(cls):
""" Load PAINS included in rdkit into a pandas dataframe and cache as class attribute. """
if not hasattr(cls, '_pains'):
path = os.path.join(RDConfig.RDDataDir, 'Pains', 'wehi_pains.csv')
pains = pd.read_csv(path, names=['pains', 'names'])
pains['names'] = pains.names.str.lstrip('<regId=').str.rstrip('>')
pains = pains.set_index('names').pains.apply(Mol.from_smarts, mergeHs=True)
cls._pains = pains
return cls._pains | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/filters/smarts.py | 0.837985 | 0.489198 | smarts.py | pypi |
from rdkit.Chem.Draw import MolToImage, DrawingOptions
import numpy as np
from matplotlib import pyplot as plt
def plot_weights(mol, weights, quality=1, l=0.4, step=50, levels=20, contour_opacity=0.5, cmap='RdBu', ax=None, **kwargs):
""" Plot weights as a sum of gaussians across a structure image.
Args:
mol (skchem.Mol):
Molecule to visualize weights for.
weights (iterable<float>):
Array of weights in atom index order.
l (float):
Lengthscale of gaussians to visualize as a multiple of bond length.
steps (int):
Size of grid edge to calculate the gaussians.
levels (int):
Number of contours to plot.
contour_opacity (float):
Alpha applied to the contour layer.
ax (plt.axis):
Axis to apply the plot to. Defaults to current axis.
cmap (plt.cm):
Colormap to use for the contour.
**kwargs:
Passed to contourf function.
Returns:
matplotlib.AxesSubplot: The plot.
"""
if not ax:
ax = plt.gca()
ax.grid('off')
ax.axis('off')
opts = DrawingOptions()
opts.dotsPerAngstrom *= quality
opts.atomLabelFontSize *= quality
opts.bondLineWidth *= quality
size = 300 * quality
img, canvas, drawer = MolToImage(mol, size=(size, size), options=opts, returnCanvas=True)
canvas.flush()
coords = np.array([[i / size, 1 - j / size] for k, (i, j) in list(drawer.atomPs.values())[0].items()])
b = mol.bonds[0]
begin, end = b.GetBeginAtom().GetIdx(), b.GetEndAtom().GetIdx()
length = np.linalg.norm(coords[end] - coords[begin])
x = np.linspace(0, 1, 500)
y = np.linspace(0, 1, 500)
x, y = np.meshgrid(x, y)
def gaussian(x, y, mu=np.zeros(2), sigma=np.identity(2), size=50):
return (1 / (2 * np.pi * sigma[0, 0] * sigma[1, 1]) * np.exp(-((x - mu[0]) ** 2 / (2 * sigma[0, 0] ** 2)
+ (y - mu[1]) ** 2 / (2 * sigma[1, 1] ** 2))))
if not np.max(weights) == np.min(weights) == 0:
z = sum([w * gaussian(x, y, mu, sigma=l * length * np.identity(2)) for mu, w in zip(coords, weights)])
v = np.max((np.abs(z.min()), np.abs(z.max())))
else:
z = np.zeros(x.shape)
v = 1
if z.min() >= 0:
levels = int(levels/2)
cf = ax.contourf(x, y, z, levels, alpha=contour_opacity, extent=(0, 1, 0, 1), vmin=-v, vmax=v, cmap=cmap, **kwargs)
ax.imshow(img, extent=(0, 1, 0, 1))
return ax | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/vis/atom.py | 0.967506 | 0.670804 | atom.py | pypi |
import argparse
import ci
import os
class _OptionalStep(argparse.Action):
"""Custom action making the ``step`` positional argument with choices
optional.
Setting the ``choices`` attribute will fail with an *invalid choice* error.
Adapted from http://stackoverflow.com/questions/8526675/python-argparse-optional-append-argument-with-choices/8527629#8527629
""" # noqa: E501
def __call__(self, parser, namespace, value, option_string=None):
if value:
if value not in ci.STEPS:
message = ("invalid choice: {0!r} (choose from {1})"
.format(value,
', '.join([repr(action)
for action in
ci.STEPS])))
raise argparse.ArgumentError(self, message)
setattr(namespace, self.dest, value)
def main():
"""The main entry point to ``ci.py``.
This is installed as the script entry point.
"""
version_str = ("This is scikit-ci version %s, imported from %s\n" %
(ci.__version__, os.path.abspath(ci.__file__)))
parser = argparse.ArgumentParser(description=ci.__doc__)
parser.add_argument(
"step", type=str, nargs='?', default=ci.STEPS[-1],
action=_OptionalStep, metavar='STEP',
help="name of the step to execute. "
"Choose from: {}. "
"If no step is specified, all are executed.".format(", ".join(
[repr(action) for action in ci.STEPS]))
)
parser.add_argument(
"--force", action="store_true",
help="always execute the steps"
)
parser.add_argument(
"--without-deps", action="store_false",
help="do not execute dependent steps", dest='with_dependencies'
)
parser.add_argument(
"--clear-cached-env", action="store_true",
help="clear cached environment (removes 'env.json' file)"
)
parser.add_argument(
"--version", action="version",
version=version_str,
help="display scikit-ci version and import information.")
args = parser.parse_args()
try:
ci.execute_step(
args.step,
force=args.force,
with_dependencies=args.with_dependencies,
clear_cached_env=args.clear_cached_env
)
except ci.SKCIError as exc:
exit(exc)
if __name__ == '__main__': # pragma: no cover
main() | /scikit-ci-0.21.0.tar.gz/scikit-ci-0.21.0/ci/__main__.py | 0.583559 | 0.174621 | __main__.py | pypi |
Scikit-clean
==================
**scikit-clean** is a python ML library for classification in the presence of \
label noise. Aimed primarily at researchers, this provides implementations of \
several state-of-the-art algorithms; tools to simulate artificial noise, create complex pipelines \
and evaluate them.
This library is fully scikit-learn API compatible: which means \
all scikit-learn's building blocks can be seamlessly integrated into workflow. \
Like scikit-learn estimators, most of the methods also support features like \
parallelization, reproducibility etc.
Example Usage
***************
A typical label noise research workflow begins with clean labels, simulates \
label noise into training set, and then evaluates how a model handles that noise \
using clean test set. In scikit-clean, this looks like:
.. code-block:: python
from skclean.simulate_noise import flip_labels_uniform
from skclean.models import RobustLR # Robust Logistic Regression
X, y = make_classification(n_samples=200,n_features=4)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.20)
y_train_noisy = flip_labels_uniform(y_train, .3) # Flip labels of 30% samples
clf = RobustLR().fit(X_train,y_train_noisy)
print(clf.score(X_test, y_test))
scikit-clean provides a customized `Pipeline` for more complex workflow. Many noise robust \
algorithms can be broken down into two steps: detecting noise likelihood for each sample
in the dataset, and train robust classifiers by using that information. This fits
nicely with Pipeline's API:
.. code-block:: python
# ---Import scikit-learn stuff----
from skclean.simulate_noise import UniformNoise
from skclean.detectors import KDN
from skclean.handlers import Filter
from skclean.pipeline import Pipeline, make_pipeline # Importing from skclean, not sklearn
clf = Pipeline([
('scale', StandardScaler()), # Scale features
('feat_sel', VarianceThreshold(.2)), # Feature selection
('detector', KDN()), # Detect mislabeled samples
('handler', Filter(SVC())), # Filter out likely mislabeled samples and then train a SVM
])
clf_g = GridSearchCV(clf,{'detector__n_neighbors':[2,5,10]})
n_clf_g = make_pipeline(UniformNoise(.3),clf_g) # Create label noise at the very first step
print(cross_val_score(n_clf_g, X, y, cv=5).mean()) # 5-fold cross validation
Please see this notebook_ before you begin for a more detailed introduction, \
and this_ for complete API.
.. _notebook: https://scikit-clean.readthedocs.io/en/latest/examples/Introduction%20to%20Scikit-clean.html
.. _this: https://scikit-clean.readthedocs.io/en/latest/api.html
Installation
******************
Simplest option is probably using pip::
pip install scikit-clean
If you intend to modify the code, install in editable mode::
git clone https://github.com/Shihab-Shahriar/scikit-clean.git
cd scikit-clean
pip install -e .
If you're only interested in small part of this library, say one or two algorithms, feel free to simply \
copy/paste relevant code into your project.
Alternatives
**************
There are several open source tools to handle label noise, some of them are: \
1. Cleanlab_
2. Snorkel_
3. NoiseFiltersR_
.. _Cleanlab: https://github.com/cgnorthcutt/cleanlab
.. _Snorkel: https://github.com/snorkel-team/snorkel
.. _NoiseFiltersR: https://journal.r-project.org/archive/2017/RJ-2017-027/RJ-2017-027.pdf
`NoiseFiltersR` is closest in objective as ours, though it's implemented in R, and doesn't \
appear to be actively maintained.
`Cleanlab` and `Snorkel` are both in Python, though they have somewhat different \
priorities than us. While our goal is to implement as many algorithms as \
possible, these tools usually focus on one or few related papers. They have also been \
developed for some time- meaning they are more stable, well-optimized and better suited \
for practitioners/ engineers than `scikit-clean`.
Credits
**************
We want to `scikit-learn`, `imbalance-learn` and `Cleanlab`, these implemntations \
are inspired by, and dircetly borrows code from these libraries.
We also want to thank the authors of original papers. Here is a list of papers partially \
or fully implemented by `scikit-clean`:
* Taghi M Khoshgoftaar and Pierre Rebours. Improving software quality prediction by noise filtering techniques. Journal of Computer Science and Technology, 22(3):387–396, 2007.
* Sunghun Kim, Hongyu Zhang, Rongxin Wu, and Liang Gong. Dealing with noise in defect prediction. In 2011 33rd International Conference on Software Engineering (ICSE), 481–490. IEEE, 2011.
* Alexander Hanbo Li and Andrew Martin. Forest-type regression with general losses and robust forest. In International Conference on Machine Learning, 2091–2100. 2017.
* Aditya Krishna Menon, Brendan Van Rooyen, and Nagarajan Natarajan. Learning from binary labels with instance-dependent noise. Machine Learning, 107(8-10):1561–1595, 2018.
* Nagarajan Natarajan, Inderjit S Dhillon, Pradeep K Ravikumar, and Ambuj Tewari. Learning with noisy labels. In Advances in neural information processing systems, 1196–1204. 2013.
* Maryam Sabzevari, Gonzalo Martínez-Muñoz, and Alberto Suárez. A two-stage ensemble method for the detection of class-label noise. Neurocomputing, 275:2374–2383, 2018.
* Michael R Smith, Tony Martinez, and Christophe Giraud-Carrier. An instance level analysis of data complexity. Machine learning, 95(2):225–256, 2014.
* Felipe N Walmsley, George DC Cavalcanti, Dayvid VR Oliveira, Rafael MO Cruz, and Robert Sabourin. An ensemble generation method based on instance hardness. In 2018 International Joint Conference on Neural Networks (IJCNN), 1–8. IEEE, 2018.
* Bianca Zadrozny, John Langford, and Naoki Abe. Cost-sensitive learning by cost-proportionate example weighting. In Third IEEE international conference on data mining, 435–442. IEEE, 2003.
* Zijin Zhao, Lingyang Chu, Dacheng Tao, and Jian Pei. Classification with label noise: a markov chain sampling framework. Data Mining and Knowledge Discovery, 33(5):1468–1504, 2019.
A note about naming
-----------------------------------------------
"There are 2 hard problems in computer science: cache invalidation, naming things, and \
off-by-1 errors."
Majority of the algorithms in `scikit-clean` are not explicitly named by their authors. \
In some rare cases, similar or very similar ideas appear under different names (e.g. `KDN`). \
We tried to name things as best as we could. However, if you're the author of any of these \
methods and want to rename it, we'll happily oblige.
| /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/README.rst | 0.875121 | 0.862178 | README.rst | pypi |
## Introduction to Scikit-clean
`scikit-clean` is a python ML library for classification in the presence of label noise. Aimed primarily at researchers, this provides implementations of several state-of-the-art algorithms, along with tools to simulate artificial noise, create complex pipelines and evaluate them.
### Example Usage
Before we dive into the details, let's take a quick look to see how it works. scikit-clean, as the name implies, is built on top of scikit-learn and is fully compatible* with scikit-learn API. scikit-clean classifiers can be used as a drop in replacement for scikit-learn classifiers.
In the simple example below, we corrupt a dataset using artifical label noise, and then train a model using robust logistic regression:
```
from sklearn.datasets import make_classification, load_breast_cancer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score
from skclean.simulate_noise import flip_labels_uniform, UniformNoise, CCNoise
from skclean.models import RobustLR
from skclean.pipeline import Pipeline, make_pipeline
SEED = 42
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.30, random_state=SEED)
y_train_noisy = flip_labels_uniform(y_train, .3, random_state=SEED) # Flip labels of 30% samples
clf = RobustLR(random_state=SEED).fit(X_train,y_train_noisy)
print(clf.score(X_test, y_test))
```
You can use scikit-learn's built in tools with scikit-clean. For example, let's tune one hyper-parameter of `RobustLR` used above, and evaluate the resulting model using cross-validation:
```
from sklearn.model_selection import GridSearchCV, cross_val_score
grid_clf = GridSearchCV(RobustLR(),{'PN':[.1,.2,.4]},cv=3)
cross_val_score(grid_clf, X, y, cv=5, n_jobs=5).mean() # Note: here we're training & testing here on clean data for simplicity
```
### Algorithms
Algorithms implemented in scikit-clean can be broadly categorized into two types. First we have ones that are *inherently* robust to label noise. They often modify or replace the loss functions of existing well known algorithms like SVM, Logistic Regression etc. and do not explcitly try to detect mislabeled samples in data. `RobustLR` used above is a robust variant of regular Logistic Regression. These methods can currently be found in `skclean.models` module, though this part of API is likely to change in future as no. of implementations grow.
On the other hand we have *Dataset-focused* algorithms: their focus is more on identifying or cleaning the dataset, they usually rely on other existing classifiers to do the actual learning. Majority of current scikit-clean implementations fall under this category, so we describe them in a bit more detail in next section.
### Detectors and Handlers
Many robust algorithms designed to handle label noise can be essentially broken down to two sequential steps: detect samples which has (probably) been mislabeled, and use that information to build robust meta classifiers on top of existing classifiers. This allows us to easily create new robust classifiers by mixing the noise detector of one paper with the noise-handler of another.
In scikit-clean, the classes that implement those two tasks are called `Detector` and `Handler` respectively. During training, `Detector` will find for each sample the probability that it has been *correctly* labeled (i.e. `conf_score`). `Handler` can use that information in many ways, like removing likely noisy instances from dataset (`Filtering` class), or assigning more weight on reliable samples (`example_weighting` module) etc.
Let's rewrite the above example. We'll use `KDN`: a simple neighborhood-based noise detector, and `WeightedBagging`: a variant of regular bagging that takes sample reliability into account.
```
from skclean.detectors import KDN
from skclean.handlers import WeightedBagging
conf_score = KDN(n_neighbors=5).detect(X_train, y_train_noisy)
clf = WeightedBagging(n_estimators=50).fit(X_train, y_train_noisy, conf_score)
print(clf.score(X_test, y_test))
```
The above code is fine for very simple workflow. However, real world data modeling usually includes lots of sequential steps for preprocesing, feature selection etc. Moreover, hyper-paramter tuning, cross-validation further complicates the process, which, among other things, frequently leads to [Information Leakage](https://machinelearningmastery.com/data-leakage-machine-learning/). An elegant solution to this complexity management is `Pipeline`.
### Pipeline
`scikit-clean` provides a customized `Pipeline` to manage modeling which involves lots of sequential steps, including noise detection and handling. Below is an example of `pipeline`. At the very first step, we introduce some label noise on training set. Some preprocessing like scaling and feature selection comes next. The last two steps are noise detection and handling respectively, these two must always be the last steps.
```
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import VarianceThreshold
from sklearn.svm import SVC
from sklearn.model_selection import ShuffleSplit, StratifiedKFold
from skclean.handlers import Filter
from skclean.pipeline import Pipeline # Importing from skclean, not sklearn
clf = Pipeline([
('scale', StandardScaler()), # Scale features
('feat_sel', VarianceThreshold(.2)), # Feature selection
('detector', KDN()), # Detect mislabeled samples
('handler', Filter(SVC())), # Filter out likely mislabeled samples and then train a SVM
])
inner_cv = ShuffleSplit(n_splits=5,test_size=.2,random_state=1)
outer_cv = StratifiedKFold(n_splits=5,shuffle=True,random_state=2)
clf_g = GridSearchCV(clf,{'detector__n_neighbors':[2,5,10]},cv=inner_cv)
n_clf_g = make_pipeline(UniformNoise(.3),clf_g) # Create label noise at the very first step
print(cross_val_score(n_clf_g, X, y, cv=outer_cv).mean()) # 5-fold cross validation
```
There are two important things to note here. First, don't use the `Pipeline` of `scikit-learn`, import from `skclean.pipeline` instead.
Secondly, a group of noise hanbdlers are iterative: they call the `detect` of noise detectors multiple times (`CLNI`, `IPF` etc). Since they don't exactly follow the sequential noise detection->handling pattern, you must pass the detector in the constructor of those `Handler`s.
```
from skclean.handlers import CLNI
clf = CLNI(classifier=SVC(), detector=KDN())
```
All `Handler` *can* be instantiated this way, but this is a *must* for iterative ones. (Use `iterative` attribute to check.)
### Noise Simulation
Remember that as a library written primarily for researchers, you're expected to have access to "true" or "clean" labels, and then introduce noise to training data by flipping those true labels. `scikit-clean` provides several commonly used noise simulators- take a look at [this example](./Noise%20SImulators.ipynb) to understand their differences. Here we mainly focus on how to use them.
Perhaps the most important thing to remember is that noise simulation should usually be the very first thing you do to your training data. In code below, `GridSearchCV` is creating a validation set *before* introducing noise and using clean labels for inner loop, leading to information leakage. This is probably NOT what you want.
```
clf = Pipeline([
('simulate_noise', UniformNoise(.3)), # Create label noise at first step
('scale', StandardScaler()), # Scale features
('feat_sel', VarianceThreshold(.2)), # Feature selection
('detector', KDN()), # Detect mislabeled samples
('handler', Filter(SVC())), # Filter out likely mislabeled samples and then train a SVM
])
clf_g = GridSearchCV(clf,{'detector__n_neighbors':[2,5,10]},cv=inner_cv)
print(cross_val_score(clf_g, X, y, cv=outer_cv).mean()) # 5-fold cross validation
```
You can use noise simulators outside `Pipeline`, all `NoiseSimulator` classes are simple wrapper around functions. `UniformNoise` is a wrapper of `flip_labels_uniform`, as the first example of this document shows.
### Datasets & Performance Evaluation
Unlike deep learning datasets which tends to be massive in size, tabular datasets are usually lot smaller. Any new algorithm is therefore compared using multiple datasets against baselines. The `skclean.utils` module provides two important functions to help researchers in these tasks:
1. `load_data`: to load several small to medium-sized preprocessed datasets on memory.
2. `compare`: These function takes several algorithms and datasets, and outputs the performances in a csv file. It supports automatic resumption of partially computed results, specially helpful for comparing long running, computationally expensive methods on big datasets.
Take a look at [this notebook](./Evaluating%20Robust%20Methods.ipynb) to see how they are used.
```
```
| /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/doc/examples/Introduction to Scikit-clean.ipynb | 0.844601 | 0.986585 | Introduction to Scikit-clean.ipynb | pypi |
## Evaluating Detectors
In `scikit-clean`, A `Detector` only identifies/detects the mislabelled samples. It's not a complete classifier (rather a part of one). So procedure for their evaluation is different.
We can view a noise detector as a binary classifier: it's job is to provide a probability denoting if a sample is "mislabelled" or "clean". We can therefore use binary classification metrics that work on continuous output: brier score, log loss, area under ROC curve etc.
```
# Suppress warnings, you should remove this before modifying this notebook
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.metrics import brier_score_loss, log_loss, roc_auc_score
from skclean.tests.common_stuff import NOISE_DETECTORS # All noise detectors in skclean
from skclean.utils import load_data
from skclean.detectors.base import BaseDetector
from skclean.simulate_noise import flip_labels_uniform
class DummyDetector(BaseDetector):
def detect(self, X, y):
return np.random.uniform(size=y.shape)
from skclean.detectors import KDN, RkDN
class WkDN:
def detect(self,X,y):
return .5 * KDN().detect(X,y) + .5 * RkDN().detect(X,y)
ALL_DETECTOTS = [DummyDetector(), WkDN()] + NOISE_DETECTORS
X, y = make_classification(1800, 10)
#X, y = load_data('breast_cancer')
yn = flip_labels_uniform(y, .3) # 30% label noise
clean_idx = (y==yn) # Indices of correctly labelled samples
df = pd.DataFrame()
for d in ALL_DETECTOTS:
conf_score = d.detect(X, yn)
for name,loss_func in zip(['log','brier','roc'],
[log_loss, brier_score_loss, roc_auc_score]):
loss = loss_func(clean_idx, conf_score)
df.at[d.__class__.__name__,name] = np.round(loss,3)
df
```
Note that in case of `roc_auc_score`, higher is better.
```
```
| /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/doc/examples/Evaluating Detectors.ipynb | 0.634996 | 0.881564 | Evaluating Detectors.ipynb | pypi |
## Evaluating Robust Models
The goal of this notebook is to show how to compare several methods across several datasets.This will also serve as inroduction to two important `scikit-clean` functions: `load_data` and `compare`.
We'll (roughly) implement the core idea of 3 existing papers on robust classification in the presence of label noise, and see how they compare on our 4 datasets readily available in `scikit-clean`. Those papers are:
1. Forest-type Regression with General Losses and Robust Forest - ICML'17 (`RobustForest` below in `MODELS` dictionary)
2. An Ensemble Generation Method Based on Instance Hardness- IJCNN'18 (`EGIH`)
3. Classification with label noise- a Markov chain sampling framework - ECML-PKDD'18 (`MCS`)
```
import os
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score, make_scorer
from skclean.detectors import KDN, InstanceHardness, MCS
from skclean.handlers import WeightedBagging, SampleWeight, Filter
from skclean.models import RobustForest
from skclean.pipeline import Pipeline, make_pipeline
from skclean.utils import load_data, compare
import seaborn as sns
```
We'll use 4 datasets here, all come preloaded with `scikit-clean`. If you want to load new datasets through this function, put the csv-formatted dataset file in `datasets` folder (use `os.path.dirname(skclean.datasets.__file__)` to get it's location). Make sure labels are at the last column, and features are all real numbers. Check source code of `load_data` for more details.
```
DATASETS = ['iris', 'breast_cancer', 'optdigits', 'spambase']
SEED = 42 # For reproducibility
N_JOBS = 8 # No of cpu cores to use in parallel
CV = StratifiedKFold(n_splits=5, shuffle=True, random_state=SEED+1)
SCORING = 'accuracy'
MODELS = {
'RobustForest': RobustForest(n_estimators=100),
'EGIH':make_pipeline(KDN(), WeightedBagging()),
'MCS': make_pipeline(MCS(), SampleWeight(LogisticRegression()))
}
```
We'll create 30% uniform label noise for all our datasets using `UniformNoise`. Note that we're treating noise simulation as data transformation step and attaching it before our models in a pipeline. In this way, noise will only impact training set, and testing will be performed on clean labels.
```
from skclean.simulate_noise import UniformNoise
N_MODELS = {}
for name, clf in MODELS.items():
N_MODELS[name] = make_pipeline(UniformNoise(.3), clf)
```
`scikit-clean` models are compatible with `scikit-learn` API. So for evaluation, we'll use `cross_val_score` function of scikit-learn- this will create multiple train/test according to the `CV` variable we defined at the beginning, and compute performance. It also allows easily parallelizing the code using `n_jobs`.
```
from time import perf_counter # Wall time
from sklearn.model_selection import cross_val_score
for data_name in DATASETS:
X,y = load_data(data_name, stats=True)
for clf_name, clf in N_MODELS.items():
start_at = perf_counter()
r = cross_val_score(clf, X, y, cv=CV, n_jobs=N_JOBS, scoring=SCORING).mean()
print(f"{data_name}, {clf_name}: {r:.4f} in {perf_counter()-start_at:.2f} secs")
print()
```
The `compare` function does basically the same thing the above cell does. Plus, it stores the results in a CSV format, with datasets in rows and algorithms in columns. And it can also automatically resume after interruption.
```
%%time
result_path = "noisy.csv"
dfn = compare(N_MODELS, DATASETS, cv=CV, df_path=result_path, random_state=SEED,
scoring=SCORING,n_jobs=N_JOBS, verbose=False)
dfn
```
Let's compare above values with ones computed with clean labels:
```
dfc = compare(MODELS, DATASETS, cv=CV, df_path=None, random_state=SEED,
scoring=SCORING,n_jobs=N_JOBS, verbose=False)
dfc
dfc = dfc.assign(label='clean')
dfn = dfn.assign(label='noisy')
df = pd.concat([dfc,dfn]).melt(id_vars='label')
df.rename(columns={'variable':'classifier','value':SCORING},inplace=True)
sns.boxplot(data=df,hue='label',x='classifier',y=SCORING,width=.4);
os.remove(result_path)
```
Note: This is a simple example, not a replication study, and shouldn't be taken as such.
| /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/doc/examples/Evaluating Robust Methods.ipynb | 0.447943 | 0.970882 | Evaluating Robust Methods.ipynb | pypi |
## Introduction to Scikit-clean
`scikit-clean` is a python ML library for classification in the presence of label noise. Aimed primarily at researchers, this provides implementations of several state-of-the-art algorithms, along with tools to simulate artificial noise, create complex pipelines and evaluate them.
### Example Usage
Before we dive into the details, let's take a quick look to see how it works. scikit-clean, as the name implies, is built on top of scikit-learn and is fully compatible* with scikit-learn API. scikit-clean classifiers can be used as a drop in replacement for scikit-learn classifiers.
In the simple example below, we corrupt a dataset using artifical label noise, and then train a model using robust logistic regression:
```
from sklearn.datasets import make_classification, load_breast_cancer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score
from skclean.simulate_noise import flip_labels_uniform, UniformNoise, CCNoise
from skclean.models import RobustLR
from skclean.pipeline import Pipeline, make_pipeline
SEED = 42
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.30, random_state=SEED)
y_train_noisy = flip_labels_uniform(y_train, .3, random_state=SEED) # Flip labels of 30% samples
clf = RobustLR(random_state=SEED).fit(X_train,y_train_noisy)
print(clf.score(X_test, y_test))
```
You can use scikit-learn's built in tools with scikit-clean. For example, let's tune one hyper-parameter of `RobustLR` used above, and evaluate the resulting model using cross-validation:
```
from sklearn.model_selection import GridSearchCV, cross_val_score
grid_clf = GridSearchCV(RobustLR(),{'PN':[.1,.2,.4]},cv=3)
cross_val_score(grid_clf, X, y, cv=5, n_jobs=5).mean() # Note: here we're training & testing here on clean data for simplicity
```
### Algorithms
Algorithms implemented in scikit-clean can be broadly categorized into two types. First we have ones that are *inherently* robust to label noise. They often modify or replace the loss functions of existing well known algorithms like SVM, Logistic Regression etc. and do not explcitly try to detect mislabeled samples in data. `RobustLR` used above is a robust variant of regular Logistic Regression. These methods can currently be found in `skclean.models` module, though this part of API is likely to change in future as no. of implementations grow.
On the other hand we have *Dataset-focused* algorithms: their focus is more on identifying or cleaning the dataset, they usually rely on other existing classifiers to do the actual learning. Majority of current scikit-clean implementations fall under this category, so we describe them in a bit more detail in next section.
### Detectors and Handlers
Many robust algorithms designed to handle label noise can be essentially broken down to two sequential steps: detect samples which has (probably) been mislabeled, and use that information to build robust meta classifiers on top of existing classifiers. This allows us to easily create new robust classifiers by mixing the noise detector of one paper with the noise-handler of another.
In scikit-clean, the classes that implement those two tasks are called `Detector` and `Handler` respectively. During training, `Detector` will find for each sample the probability that it has been *correctly* labeled (i.e. `conf_score`). `Handler` can use that information in many ways, like removing likely noisy instances from dataset (`Filtering` class), or assigning more weight on reliable samples (`example_weighting` module) etc.
Let's rewrite the above example. We'll use `KDN`: a simple neighborhood-based noise detector, and `WeightedBagging`: a variant of regular bagging that takes sample reliability into account.
```
from skclean.detectors import KDN
from skclean.handlers import WeightedBagging
conf_score = KDN(n_neighbors=5).detect(X_train, y_train_noisy)
clf = WeightedBagging(n_estimators=50).fit(X_train, y_train_noisy, conf_score)
print(clf.score(X_test, y_test))
```
The above code is fine for very simple workflow. However, real world data modeling usually includes lots of sequential steps for preprocesing, feature selection etc. Moreover, hyper-paramter tuning, cross-validation further complicates the process, which, among other things, frequently leads to [Information Leakage](https://machinelearningmastery.com/data-leakage-machine-learning/). An elegant solution to this complexity management is `Pipeline`.
### Pipeline
`scikit-clean` provides a customized `Pipeline` to manage modeling which involves lots of sequential steps, including noise detection and handling. Below is an example of `pipeline`. At the very first step, we introduce some label noise on training set. Some preprocessing like scaling and feature selection comes next. The last two steps are noise detection and handling respectively, these two must always be the last steps.
```
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import VarianceThreshold
from sklearn.svm import SVC
from sklearn.model_selection import ShuffleSplit, StratifiedKFold
from skclean.handlers import Filter
from skclean.pipeline import Pipeline # Importing from skclean, not sklearn
clf = Pipeline([
('scale', StandardScaler()), # Scale features
('feat_sel', VarianceThreshold(.2)), # Feature selection
('detector', KDN()), # Detect mislabeled samples
('handler', Filter(SVC())), # Filter out likely mislabeled samples and then train a SVM
])
inner_cv = ShuffleSplit(n_splits=5,test_size=.2,random_state=1)
outer_cv = StratifiedKFold(n_splits=5,shuffle=True,random_state=2)
clf_g = GridSearchCV(clf,{'detector__n_neighbors':[2,5,10]},cv=inner_cv)
n_clf_g = make_pipeline(UniformNoise(.3),clf_g) # Create label noise at the very first step
print(cross_val_score(n_clf_g, X, y, cv=outer_cv).mean()) # 5-fold cross validation
```
There are two important things to note here. First, don't use the `Pipeline` of `scikit-learn`, import from `skclean.pipeline` instead.
Secondly, a group of noise hanbdlers are iterative: they call the `detect` of noise detectors multiple times (`CLNI`, `IPF` etc). Since they don't exactly follow the sequential noise detection->handling pattern, you must pass the detector in the constructor of those `Handler`s.
```
from skclean.handlers import CLNI
clf = CLNI(classifier=SVC(), detector=KDN())
```
All `Handler` *can* be instantiated this way, but this is a *must* for iterative ones. (Use `iterative` attribute to check.)
### Noise Simulation
Remember that as a library written primarily for researchers, you're expected to have access to "true" or "clean" labels, and then introduce noise to training data by flipping those true labels. `scikit-clean` provides several commonly used noise simulators- take a look at [this example](./Noise%20SImulators.ipynb) to understand their differences. Here we mainly focus on how to use them.
Perhaps the most important thing to remember is that noise simulation should usually be the very first thing you do to your training data. In code below, `GridSearchCV` is creating a validation set *before* introducing noise and using clean labels for inner loop, leading to information leakage. This is probably NOT what you want.
```
clf = Pipeline([
('simulate_noise', UniformNoise(.3)), # Create label noise at first step
('scale', StandardScaler()), # Scale features
('feat_sel', VarianceThreshold(.2)), # Feature selection
('detector', KDN()), # Detect mislabeled samples
('handler', Filter(SVC())), # Filter out likely mislabeled samples and then train a SVM
])
clf_g = GridSearchCV(clf,{'detector__n_neighbors':[2,5,10]},cv=inner_cv)
print(cross_val_score(clf_g, X, y, cv=outer_cv).mean()) # 5-fold cross validation
```
You can use noise simulators outside `Pipeline`, all `NoiseSimulator` classes are simple wrapper around functions. `UniformNoise` is a wrapper of `flip_labels_uniform`, as the first example of this document shows.
### Datasets & Performance Evaluation
Unlike deep learning datasets which tends to be massive in size, tabular datasets are usually lot smaller. Any new algorithm is therefore compared using multiple datasets against baselines. The `skclean.utils` module provides two important functions to help researchers in these tasks:
1. `load_data`: to load several small to medium-sized preprocessed datasets on memory.
2. `compare`: These function takes several algorithms and datasets, and outputs the performances in a csv file. It supports automatic resumption of partially computed results, specially helpful for comparing long running, computationally expensive methods on big datasets.
Take a look at [this notebook](./Evaluating%20Robust%20Methods.ipynb) to see how they are used.
```
```
| /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/doc/_build/doctrees/nbsphinx/examples/Introduction to Scikit-clean.ipynb | 0.844601 | 0.986585 | Introduction to Scikit-clean.ipynb | pypi |
## Evaluating Detectors
In `scikit-clean`, A `Detector` only identifies/detects the mislabelled samples. It's not a complete classifier (rather a part of one). So procedure for their evaluation is different.
We can view a noise detector as a binary classifier: it's job is to provide a probability denoting if a sample is "mislabelled" or "clean". We can therefore use binary classification metrics that work on continuous output: brier score, log loss, area under ROC curve etc.
```
# Suppress warnings, you should remove this before modifying this notebook
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.metrics import brier_score_loss, log_loss, roc_auc_score
from skclean.tests.common_stuff import NOISE_DETECTORS # All noise detectors in skclean
from skclean.utils import load_data
from skclean.detectors.base import BaseDetector
from skclean.simulate_noise import flip_labels_uniform
class DummyDetector(BaseDetector):
def detect(self, X, y):
return np.random.uniform(size=y.shape)
from skclean.detectors import KDN, RkDN
class WkDN:
def detect(self,X,y):
return .5 * KDN().detect(X,y) + .5 * RkDN().detect(X,y)
ALL_DETECTOTS = [DummyDetector(), WkDN()] + NOISE_DETECTORS
X, y = make_classification(1800, 10)
#X, y = load_data('breast_cancer')
yn = flip_labels_uniform(y, .3) # 30% label noise
clean_idx = (y==yn) # Indices of correctly labelled samples
df = pd.DataFrame()
for d in ALL_DETECTOTS:
conf_score = d.detect(X, yn)
for name,loss_func in zip(['log','brier','roc'],
[log_loss, brier_score_loss, roc_auc_score]):
loss = loss_func(clean_idx, conf_score)
df.at[d.__class__.__name__,name] = np.round(loss,3)
df
```
Note that in case of `roc_auc_score`, higher is better.
```
```
| /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/doc/_build/doctrees/nbsphinx/examples/Evaluating Detectors.ipynb | 0.634996 | 0.881564 | Evaluating Detectors.ipynb | pypi |
## Evaluating Robust Models
The goal of this notebook is to show how to compare several methods across several datasets.This will also serve as inroduction to two important `scikit-clean` functions: `load_data` and `compare`.
We'll (roughly) implement the core idea of 3 existing papers on robust classification in the presence of label noise, and see how they compare on our 4 datasets readily available in `scikit-clean`. Those papers are:
1. Forest-type Regression with General Losses and Robust Forest - ICML'17 (`RobustForest` below in `MODELS` dictionary)
2. An Ensemble Generation Method Based on Instance Hardness- IJCNN'18 (`EGIH`)
3. Classification with label noise- a Markov chain sampling framework - ECML-PKDD'18 (`MCS`)
```
import os
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score, make_scorer
from skclean.detectors import KDN, InstanceHardness, MCS
from skclean.handlers import WeightedBagging, SampleWeight, Filter
from skclean.models import RobustForest
from skclean.pipeline import Pipeline, make_pipeline
from skclean.utils import load_data, compare
import seaborn as sns
```
We'll use 4 datasets here, all come preloaded with `scikit-clean`. If you want to load new datasets through this function, put the csv-formatted dataset file in `datasets` folder (use `os.path.dirname(skclean.datasets.__file__)` to get it's location). Make sure labels are at the last column, and features are all real numbers. Check source code of `load_data` for more details.
```
DATASETS = ['iris', 'breast_cancer', 'optdigits', 'spambase']
SEED = 42 # For reproducibility
N_JOBS = 8 # No of cpu cores to use in parallel
CV = StratifiedKFold(n_splits=5, shuffle=True, random_state=SEED+1)
SCORING = 'accuracy'
MODELS = {
'RobustForest': RobustForest(n_estimators=100),
'EGIH':make_pipeline(KDN(), WeightedBagging()),
'MCS': make_pipeline(MCS(), SampleWeight(LogisticRegression()))
}
```
We'll create 30% uniform label noise for all our datasets using `UniformNoise`. Note that we're treating noise simulation as data transformation step and attaching it before our models in a pipeline. In this way, noise will only impact training set, and testing will be performed on clean labels.
```
from skclean.simulate_noise import UniformNoise
N_MODELS = {}
for name, clf in MODELS.items():
N_MODELS[name] = make_pipeline(UniformNoise(.3), clf)
```
`scikit-clean` models are compatible with `scikit-learn` API. So for evaluation, we'll use `cross_val_score` function of scikit-learn- this will create multiple train/test according to the `CV` variable we defined at the beginning, and compute performance. It also allows easily parallelizing the code using `n_jobs`.
```
from time import perf_counter # Wall time
from sklearn.model_selection import cross_val_score
for data_name in DATASETS:
X,y = load_data(data_name, stats=True)
for clf_name, clf in N_MODELS.items():
start_at = perf_counter()
r = cross_val_score(clf, X, y, cv=CV, n_jobs=N_JOBS, scoring=SCORING).mean()
print(f"{data_name}, {clf_name}: {r:.4f} in {perf_counter()-start_at:.2f} secs")
print()
```
The `compare` function does basically the same thing the above cell does. Plus, it stores the results in a CSV format, with datasets in rows and algorithms in columns. And it can also automatically resume after interruption.
```
%%time
result_path = "noisy.csv"
dfn = compare(N_MODELS, DATASETS, cv=CV, df_path=result_path, random_state=SEED,
scoring=SCORING,n_jobs=N_JOBS, verbose=False)
dfn
```
Let's compare above values with ones computed with clean labels:
```
dfc = compare(MODELS, DATASETS, cv=CV, df_path=None, random_state=SEED,
scoring=SCORING,n_jobs=N_JOBS, verbose=False)
dfc
dfc = dfc.assign(label='clean')
dfn = dfn.assign(label='noisy')
df = pd.concat([dfc,dfn]).melt(id_vars='label')
df.rename(columns={'variable':'classifier','value':SCORING},inplace=True)
sns.boxplot(data=df,hue='label',x='classifier',y=SCORING,width=.4);
os.remove(result_path)
```
Note: This is a simple example, not a replication study, and shouldn't be taken as such.
| /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/doc/_build/doctrees/nbsphinx/examples/Evaluating Robust Methods.ipynb | 0.447943 | 0.970882 | Evaluating Robust Methods.ipynb | pypi |
## Introduction to Scikit-clean
`scikit-clean` is a python ML library for classification in the presence of label noise. Aimed primarily at researchers, this provides implementations of several state-of-the-art algorithms, along with tools to simulate artificial noise, create complex pipelines and evaluate them.
### Example Usage
Before we dive into the details, let's take a quick look to see how it works. scikit-clean, as the name implies, is built on top of scikit-learn and is fully compatible* with scikit-learn API. scikit-clean classifiers can be used as a drop in replacement for scikit-learn classifiers.
In the simple example below, we corrupt a dataset using artifical label noise, and then train a model using robust logistic regression:
```
from sklearn.datasets import make_classification, load_breast_cancer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score
from skclean.simulate_noise import flip_labels_uniform, UniformNoise, CCNoise
from skclean.models import RobustLR
from skclean.pipeline import Pipeline, make_pipeline
SEED = 42
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.30, random_state=SEED)
y_train_noisy = flip_labels_uniform(y_train, .3, random_state=SEED) # Flip labels of 30% samples
clf = RobustLR(random_state=SEED).fit(X_train,y_train_noisy)
print(clf.score(X_test, y_test))
```
You can use scikit-learn's built in tools with scikit-clean. For example, let's tune one hyper-parameter of `RobustLR` used above, and evaluate the resulting model using cross-validation:
```
from sklearn.model_selection import GridSearchCV, cross_val_score
grid_clf = GridSearchCV(RobustLR(),{'PN':[.1,.2,.4]},cv=3)
cross_val_score(grid_clf, X, y, cv=5, n_jobs=5).mean() # Note: here we're training & testing here on clean data for simplicity
```
### Algorithms
Algorithms implemented in scikit-clean can be broadly categorized into two types. First we have ones that are *inherently* robust to label noise. They often modify or replace the loss functions of existing well known algorithms like SVM, Logistic Regression etc. and do not explcitly try to detect mislabeled samples in data. `RobustLR` used above is a robust variant of regular Logistic Regression. These methods can currently be found in `skclean.models` module, though this part of API is likely to change in future as no. of implementations grow.
On the other hand we have *Dataset-focused* algorithms: their focus is more on identifying or cleaning the dataset, they usually rely on other existing classifiers to do the actual learning. Majority of current scikit-clean implementations fall under this category, so we describe them in a bit more detail in next section.
### Detectors and Handlers
Many robust algorithms designed to handle label noise can be essentially broken down to two sequential steps: detect samples which has (probably) been mislabeled, and use that information to build robust meta classifiers on top of existing classifiers. This allows us to easily create new robust classifiers by mixing the noise detector of one paper with the noise-handler of another.
In scikit-clean, the classes that implement those two tasks are called `Detector` and `Handler` respectively. During training, `Detector` will find for each sample the probability that it has been *correctly* labeled (i.e. `conf_score`). `Handler` can use that information in many ways, like removing likely noisy instances from dataset (`Filtering` class), or assigning more weight on reliable samples (`example_weighting` module) etc.
Let's rewrite the above example. We'll use `KDN`: a simple neighborhood-based noise detector, and `WeightedBagging`: a variant of regular bagging that takes sample reliability into account.
```
from skclean.detectors import KDN
from skclean.handlers import WeightedBagging
conf_score = KDN(n_neighbors=5).detect(X_train, y_train_noisy)
clf = WeightedBagging(n_estimators=50).fit(X_train, y_train_noisy, conf_score)
print(clf.score(X_test, y_test))
```
The above code is fine for very simple workflow. However, real world data modeling usually includes lots of sequential steps for preprocesing, feature selection etc. Moreover, hyper-paramter tuning, cross-validation further complicates the process, which, among other things, frequently leads to [Information Leakage](https://machinelearningmastery.com/data-leakage-machine-learning/). An elegant solution to this complexity management is `Pipeline`.
### Pipeline
`scikit-clean` provides a customized `Pipeline` to manage modeling which involves lots of sequential steps, including noise detection and handling. Below is an example of `pipeline`. At the very first step, we introduce some label noise on training set. Some preprocessing like scaling and feature selection comes next. The last two steps are noise detection and handling respectively, these two must always be the last steps.
```
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import VarianceThreshold
from sklearn.svm import SVC
from sklearn.model_selection import ShuffleSplit, StratifiedKFold
from skclean.handlers import Filter
from skclean.pipeline import Pipeline # Importing from skclean, not sklearn
clf = Pipeline([
('scale', StandardScaler()), # Scale features
('feat_sel', VarianceThreshold(.2)), # Feature selection
('detector', KDN()), # Detect mislabeled samples
('handler', Filter(SVC())), # Filter out likely mislabeled samples and then train a SVM
])
inner_cv = ShuffleSplit(n_splits=5,test_size=.2,random_state=1)
outer_cv = StratifiedKFold(n_splits=5,shuffle=True,random_state=2)
clf_g = GridSearchCV(clf,{'detector__n_neighbors':[2,5,10]},cv=inner_cv)
n_clf_g = make_pipeline(UniformNoise(.3),clf_g) # Create label noise at the very first step
print(cross_val_score(n_clf_g, X, y, cv=outer_cv).mean()) # 5-fold cross validation
```
There are two important things to note here. First, don't use the `Pipeline` of `scikit-learn`, import from `skclean.pipeline` instead.
Secondly, a group of noise hanbdlers are iterative: they call the `detect` of noise detectors multiple times (`CLNI`, `IPF` etc). Since they don't exactly follow the sequential noise detection->handling pattern, you must pass the detector in the constructor of those `Handler`s.
```
from skclean.handlers import CLNI
clf = CLNI(classifier=SVC(), detector=KDN())
```
All `Handler` *can* be instantiated this way, but this is a *must* for iterative ones. (Use `iterative` attribute to check.)
### Noise Simulation
Remember that as a library written primarily for researchers, you're expected to have access to "true" or "clean" labels, and then introduce noise to training data by flipping those true labels. `scikit-clean` provides several commonly used noise simulators- take a look at [this example](./Noise%20SImulators.ipynb) to understand their differences. Here we mainly focus on how to use them.
Perhaps the most important thing to remember is that noise simulation should usually be the very first thing you do to your training data. In code below, `GridSearchCV` is creating a validation set *before* introducing noise and using clean labels for inner loop, leading to information leakage. This is probably NOT what you want.
```
clf = Pipeline([
('simulate_noise', UniformNoise(.3)), # Create label noise at first step
('scale', StandardScaler()), # Scale features
('feat_sel', VarianceThreshold(.2)), # Feature selection
('detector', KDN()), # Detect mislabeled samples
('handler', Filter(SVC())), # Filter out likely mislabeled samples and then train a SVM
])
clf_g = GridSearchCV(clf,{'detector__n_neighbors':[2,5,10]},cv=inner_cv)
print(cross_val_score(clf_g, X, y, cv=outer_cv).mean()) # 5-fold cross validation
```
You can use noise simulators outside `Pipeline`, all `NoiseSimulator` classes are simple wrapper around functions. `UniformNoise` is a wrapper of `flip_labels_uniform`, as the first example of this document shows.
### Datasets & Performance Evaluation
Unlike deep learning datasets which tends to be massive in size, tabular datasets are usually lot smaller. Any new algorithm is therefore compared using multiple datasets against baselines. The `skclean.utils` module provides two important functions to help researchers in these tasks:
1. `load_data`: to load several small to medium-sized preprocessed datasets on memory.
2. `compare`: These function takes several algorithms and datasets, and outputs the performances in a csv file. It supports automatic resumption of partially computed results, specially helpful for comparing long running, computationally expensive methods on big datasets.
Take a look at [this notebook](./Evaluating%20Robust%20Methods.ipynb) to see how they are used.
```
```
| /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/doc/_build/html/examples/Introduction to Scikit-clean.ipynb | 0.844601 | 0.986585 | Introduction to Scikit-clean.ipynb | pypi |
## Evaluating Detectors
In `scikit-clean`, A `Detector` only identifies/detects the mislabelled samples. It's not a complete classifier (rather a part of one). So procedure for their evaluation is different.
We can view a noise detector as a binary classifier: it's job is to provide a probability denoting if a sample is "mislabelled" or "clean". We can therefore use binary classification metrics that work on continuous output: brier score, log loss, area under ROC curve etc.
```
# Suppress warnings, you should remove this before modifying this notebook
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.metrics import brier_score_loss, log_loss, roc_auc_score
from skclean.tests.common_stuff import NOISE_DETECTORS # All noise detectors in skclean
from skclean.utils import load_data
from skclean.detectors.base import BaseDetector
from skclean.simulate_noise import flip_labels_uniform
class DummyDetector(BaseDetector):
def detect(self, X, y):
return np.random.uniform(size=y.shape)
from skclean.detectors import KDN, RkDN
class WkDN:
def detect(self,X,y):
return .5 * KDN().detect(X,y) + .5 * RkDN().detect(X,y)
ALL_DETECTOTS = [DummyDetector(), WkDN()] + NOISE_DETECTORS
X, y = make_classification(1800, 10)
#X, y = load_data('breast_cancer')
yn = flip_labels_uniform(y, .3) # 30% label noise
clean_idx = (y==yn) # Indices of correctly labelled samples
df = pd.DataFrame()
for d in ALL_DETECTOTS:
conf_score = d.detect(X, yn)
for name,loss_func in zip(['log','brier','roc'],
[log_loss, brier_score_loss, roc_auc_score]):
loss = loss_func(clean_idx, conf_score)
df.at[d.__class__.__name__,name] = np.round(loss,3)
df
```
Note that in case of `roc_auc_score`, higher is better.
```
```
| /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/doc/_build/html/examples/Evaluating Detectors.ipynb | 0.634996 | 0.881564 | Evaluating Detectors.ipynb | pypi |
## Evaluating Robust Models
The goal of this notebook is to show how to compare several methods across several datasets.This will also serve as inroduction to two important `scikit-clean` functions: `load_data` and `compare`.
We'll (roughly) implement the core idea of 3 existing papers on robust classification in the presence of label noise, and see how they compare on our 4 datasets readily available in `scikit-clean`. Those papers are:
1. Forest-type Regression with General Losses and Robust Forest - ICML'17 (`RobustForest` below in `MODELS` dictionary)
2. An Ensemble Generation Method Based on Instance Hardness- IJCNN'18 (`EGIH`)
3. Classification with label noise- a Markov chain sampling framework - ECML-PKDD'18 (`MCS`)
```
import os
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score, make_scorer
from skclean.detectors import KDN, InstanceHardness, MCS
from skclean.handlers import WeightedBagging, SampleWeight, Filter
from skclean.models import RobustForest
from skclean.pipeline import Pipeline, make_pipeline
from skclean.utils import load_data, compare
import seaborn as sns
```
We'll use 4 datasets here, all come preloaded with `scikit-clean`. If you want to load new datasets through this function, put the csv-formatted dataset file in `datasets` folder (use `os.path.dirname(skclean.datasets.__file__)` to get it's location). Make sure labels are at the last column, and features are all real numbers. Check source code of `load_data` for more details.
```
DATASETS = ['iris', 'breast_cancer', 'optdigits', 'spambase']
SEED = 42 # For reproducibility
N_JOBS = 8 # No of cpu cores to use in parallel
CV = StratifiedKFold(n_splits=5, shuffle=True, random_state=SEED+1)
SCORING = 'accuracy'
MODELS = {
'RobustForest': RobustForest(n_estimators=100),
'EGIH':make_pipeline(KDN(), WeightedBagging()),
'MCS': make_pipeline(MCS(), SampleWeight(LogisticRegression()))
}
```
We'll create 30% uniform label noise for all our datasets using `UniformNoise`. Note that we're treating noise simulation as data transformation step and attaching it before our models in a pipeline. In this way, noise will only impact training set, and testing will be performed on clean labels.
```
from skclean.simulate_noise import UniformNoise
N_MODELS = {}
for name, clf in MODELS.items():
N_MODELS[name] = make_pipeline(UniformNoise(.3), clf)
```
`scikit-clean` models are compatible with `scikit-learn` API. So for evaluation, we'll use `cross_val_score` function of scikit-learn- this will create multiple train/test according to the `CV` variable we defined at the beginning, and compute performance. It also allows easily parallelizing the code using `n_jobs`.
```
from time import perf_counter # Wall time
from sklearn.model_selection import cross_val_score
for data_name in DATASETS:
X,y = load_data(data_name, stats=True)
for clf_name, clf in N_MODELS.items():
start_at = perf_counter()
r = cross_val_score(clf, X, y, cv=CV, n_jobs=N_JOBS, scoring=SCORING).mean()
print(f"{data_name}, {clf_name}: {r:.4f} in {perf_counter()-start_at:.2f} secs")
print()
```
The `compare` function does basically the same thing the above cell does. Plus, it stores the results in a CSV format, with datasets in rows and algorithms in columns. And it can also automatically resume after interruption.
```
%%time
result_path = "noisy.csv"
dfn = compare(N_MODELS, DATASETS, cv=CV, df_path=result_path, random_state=SEED,
scoring=SCORING,n_jobs=N_JOBS, verbose=False)
dfn
```
Let's compare above values with ones computed with clean labels:
```
dfc = compare(MODELS, DATASETS, cv=CV, df_path=None, random_state=SEED,
scoring=SCORING,n_jobs=N_JOBS, verbose=False)
dfc
dfc = dfc.assign(label='clean')
dfn = dfn.assign(label='noisy')
df = pd.concat([dfc,dfn]).melt(id_vars='label')
df.rename(columns={'variable':'classifier','value':SCORING},inplace=True)
sns.boxplot(data=df,hue='label',x='classifier',y=SCORING,width=.4);
os.remove(result_path)
```
Note: This is a simple example, not a replication study, and shouldn't be taken as such.
| /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/doc/_build/html/examples/Evaluating Robust Methods.ipynb | 0.447943 | 0.970882 | Evaluating Robust Methods.ipynb | pypi |
import numpy as np
from scipy.stats import entropy
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn.preprocessing import minmax_scale
from sklearn.utils import check_random_state
from skclean.utils.noise_generation import gen_simple_noise_mat
def _flip_idx(Y, target_idx, random_state=None):
"""Flip the labels of `target_idx` to random label"""
labels = np.unique(Y)
random_state = check_random_state(random_state)
target_mask = np.full(Y.shape, 0, dtype=np.bool)
target_mask[target_idx] = 1
yn = Y.copy()
mask = target_mask.copy()
while True:
left = mask.sum()
if left == 0:
break
new_labels = random_state.choice(labels, size=left)
yn[mask] = new_labels
mask = mask & (yn == Y)
return yn
def flip_labels_uniform(Y: np.ndarray, noise_level: float, *, random_state=None, exact=True):
"""
All labels are equally likely to be flipped, irrespective of their true \
label or feature. The new (noisy) label is also chosen with uniform \
probability from alternative class labels.
Parameters
-----------------
Y: np.ndarray
1-D array of labels
noise_level: float
percentage of labels to flip
random_state : int, default=None
Set this value for reproducibility
exact: bool default=True
If True, the generated noise will be as close to `noise_level` as possible.
The approximate version (i.e. exact=False) is faster but less accurate.
Returns
-----------
Yn: np.ndarray
1-D array of flipped labels
"""
if not exact:
labels = np.unique(Y)
n_labels = len(labels)
lcm = np.full((n_labels, n_labels), noise_level / (n_labels - 1))
np.fill_diagonal(lcm, 1 - noise_level)
return flip_labels_cc(Y, lcm, random_state=random_state)
random_state = check_random_state(random_state)
nns = int(len(Y) * noise_level)
target_idx = random_state.choice(len(Y), size=nns, replace=False)
yn = _flip_idx(Y, target_idx, random_state=random_state)
assert (yn[target_idx] == Y[target_idx]).sum() == 0
return yn
# TODO: create an *exact* version of this
def flip_labels_cc(y, lcm, random_state=None):
"""
Class Conditional Noise: general version of flip_labels_uniform, a \
sample's probability of getting mislabelled and it's new (noisy) \
label depends on it's true label, but not features.
Parameters
-----------------
Y: np.ndarray
1-D array of labels
lcm: np.ndarray
Short for Label Confusion Matrix. `lcm[i,j]` denotes the probability \
of a sample with true label `i` getting mislabelled as `j`.
random_state : int, default=None
Set this value for reproducibility
Returns
-----------
Yn: np.ndarray
1-D array of flipped labels
"""
lcm = np.array(lcm)
lcm = lcm / lcm.sum(axis=1).reshape(-1, 1) # Each row sums to 1
a = lcm[y]
s = a.cumsum(axis=1)
random_state = check_random_state(random_state)
r = random_state.rand(a.shape[0])[:, None]
yn = (s > r).argmax(axis=1)
return yn
# -----------------------------------------------------------------
class NoiseSimulator(BaseEstimator, TransformerMixin):
def __init__(self, random_state=None):
self.random_state = random_state
def simulate_noise(self, X, y):
raise NotImplementedError("Attempt to instantiate abstract class")
def fit_transform(self, X, y=None, **fit_params):
return self.simulate_noise(X, y)
def transform(self, X):
return X
class UniformNoise(NoiseSimulator):
"""
All labels are equally likely to be flipped, irrespective of their true \
label or feature. The new (noisy) label is also chosen with uniform \
probability from alternative class labels. Simple wrapper around \
`flip_labels_uniform` mainly for use in `Pipeline`.
Parameters
-----------------
noise_level: float
percentage of labels to flip
exact: bool default=True
If True, the generated noise will be as close to `noise_level` as possible.
The approximate version (i.e. exact=False) is faster but less accurate.
random_state : int, default=None
Set this value for reproducibility
"""
def __init__(self, noise_level, exact=True, random_state=None):
super().__init__(random_state=random_state)
self.noise_level = noise_level
self.exact = exact
def simulate_noise(self, X, y):
X, y = self._validate_data(X, y)
yn = flip_labels_uniform(y, self.noise_level, random_state=self.random_state,
exact=self.exact)
return X, yn
class CCNoise(NoiseSimulator):
"""
Class Conditional Noise: general version of `flip_labels_uniform`- \
a sample's probability of getting mislabelled and it's new (noisy) \
label depends on it's true label, but not features. Simple wrapper \
around `flip_labels_cc` mainly for use in `Pipeline`.
Parameters
-----------------
lcm: np.ndarray
Short for Label Confusion Matrix. `lcm[i,j]` denotes the probability \
of a sample with true label `i` getting mislabelled as `j`.
random_state : int, default=None
Set this value for reproducibility
"""
def __init__(self, lcm=None, random_state=None):
super().__init__(random_state=random_state)
self.lcm = lcm
def simulate_noise(self, X, y):
lcm = self.lcm
if self.lcm is None or isinstance(self.lcm, float):
noise_level = self.lcm or .2
K = len(np.unique(y))
lcm = gen_simple_noise_mat(K, noise_level, self.random_state)
X, y = self._validate_data(X, y)
yn = flip_labels_cc(y, lcm, self.random_state)
return X, yn
class BCNoise(NoiseSimulator):
"""
Boundary Consistent Noise- instances closer to boundary more likely to \
be noisy. In this implementation, "closeness" to decision boundary of a \
sample is measured using entropy of it's class probabilities. A
classifier with support for well calibrated class probabilities (i.e. \
`predict_proba` of scikit-learn API) is required.
Only supports binary classification for now. See :cite:`idnoise18` for \
details.
Parameters
-------------------
classifier : object
A classifier instance supporting sklearn API.
noise_level: float
percentage of labels to flip
random_state : int, default=None
Set this value for reproducibility
"""
def __init__(self, classifier, noise_level, random_state=None):
self.classifier = classifier
self.noise_level = noise_level
self.random_state = random_state
def simulate_noise(self, X, y):
X, y = self._validate_data(X, y)
rns = check_random_state(self.random_state)
if 'random_state' in self.classifier.get_params():
self.classifier.set_params(random_state=rns.randint(10**3))
probs = self.classifier.fit(X, y).predict_proba(X)
e = entropy(probs, axis=1) + .01 # Otherwise 0-entropy samples would never be selected
e = e / e.max()
nns = int(len(y) * self.noise_level)
target_idx = rns.choice(len(y), size=nns, replace=False, p= e/e.sum())
return X, _flip_idx(y, target_idx, random_state=self.random_state) | /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/skclean/simulate_noise.py | 0.899096 | 0.57523 | simulate_noise.py | pypi |
import warnings
import numpy as np
from sklearn.base import ClassifierMixin, clone
from sklearn.model_selection import StratifiedKFold
from sklearn.utils import shuffle, check_random_state
from .base import BaseHandler
class Filter(BaseHandler, ClassifierMixin):
"""
Removes from dataset samples most likely to be noisy. Samples-to-be-removed
can be selected in two ways: either a specified percentage of samples
with lowest `conf_score`, or samples with lower `conf_score` than a
specified threshold.
Parameters
----------------
classifier: object
A classifier instance supporting sklearn API.
detector : `BaseDetector` or None, default=None
To compute `conf_score`. Set it to `None` only if `conf_score` is \
expected in `fit()` (e.g. when used inside a Pipeline with a \
`BaseDetector` preceding it). Otherwise a Detector must be supplied \
during instantiation.
threshold: float, default=.5
Samples with higher conf_score will be kept, rest will be filtered out. A \
value of .5 implies majority voting, whereas .99 (i.e. a value closer to, \
but less than 1.0) implies onsensus voting.
frac_to_filter: float, default=None
Percentages of samples to filter out. Exactly one of either threshold or \
frac_to_filter must be set.
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
"""
def __init__(self, classifier, detector=None, threshold: float = .5,
frac_to_filter: float = None,
n_jobs=1, random_state=None):
super().__init__(classifier, detector, n_jobs=n_jobs, random_state=random_state)
self.threshold = threshold
self.frac_to_filter = frac_to_filter
def fit(self, X, y, conf_score=None):
X, y, conf_score = self._check_everything(X, y, conf_score)
if not self.threshold and not self.frac_to_filter:
raise ValueError("At least one of threshold or frac_to_filter must "
"be supplied")
if self.threshold is not None and self.frac_to_filter is not None:
raise ValueError("Both threshold and frac_to_filter can not be supplied "
"together, choose one.")
if self.frac_to_filter is None:
clean_idx = conf_score > self.threshold
else:
to_take = int(len(conf_score) * (1 - self.frac_to_filter))
clean_idx = np.argsort(-conf_score)[:to_take]
self.classifier.fit(X[clean_idx], y[clean_idx])
return self
# TODO: Support RandomState obj everywhere
# TODO: Change all "See X for details" to details/usage
class FilterCV(BaseHandler, ClassifierMixin):
"""
For quickly finding best cutoff point for Filter i.e. `threshold` or \
`fraction_to_filter`. This avoids recomputing `conf_score` for each \
hyper-parameter value as opposed to say GridSearchCV. See \
:cite:`twostage18` for details/usage.
Parameters
-------------------
classifier: object
A classifier instance supporting sklearn API.
detector : `BaseDetector` or None, default=None
To compute `conf_score`. Set it to `None` only if `conf_score` is \
expected in `fit()` (e.g. when used inside a Pipeline with a \
`BaseDetector` preceding it). Otherwise a Detector must be supplied \
during instantiation.
thresholds : list, default=None
A list of thresholds to choose the best one from
fracs_to_filter : list, default=None
A list of percentages to choose the best one from
cv : int, cross-validation generator or an iterable, default=None
If None, uses 5-fold stratified k-fold
if int, no of folds to use in stratified k-fold
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
"""
def __init__(self, classifier, detector=None, thresholds=None,
fracs_to_filter=None, cv=5,
n_jobs=1, random_state=None):
super().__init__(classifier, detector, n_jobs=n_jobs, random_state=random_state)
self.thresholds = thresholds
self.fracs_to_filter = fracs_to_filter
self.cv = StratifiedKFold(n_splits=cv) if isinstance(cv, int) else cv
def _get_clean_idx(self, point, conf_score):
if self.thresholds is not None:
return np.argwhere(conf_score > point).reshape(-1)
to_take = int(len(conf_score) * (1 - point))
return np.argsort(-conf_score)[:to_take]
def _find_cutoff(self, X, y, conf_score):
"""Find the best cutoff point (either threshold or frac_to_filter)
using cross_validation"""
self.cv.random_state = check_random_state(self.random_state).randint(10 ** 8)
cutoff_points = self.thresholds or self.fracs_to_filter
best_acc, best_cutoff = 0.0, cutoff_points[0]
for point in cutoff_points:
clean_idx = self._get_clean_idx(point, conf_score)
accs = []
for tr_idx, test_idx in self.cv.split(X, y):
train_idx = set(tr_idx).intersection(clean_idx)
train_idx = np.array(list(train_idx))
if len(train_idx) == 0:
warnings.warn("All training instances of identified as noisy, skipping this fold")
continue
clf = clone(self.classifier).fit(X[train_idx], y[train_idx])
acc = clf.score(X[test_idx], y[test_idx])
accs.append(acc)
avg_acc = sum(accs) / len(accs) if len(accs) > 0 else 0.0
print(point, avg_acc)
if avg_acc > best_acc:
best_acc = avg_acc
best_cutoff = point
return best_cutoff
def fit(self, X, y, conf_score=None):
X, y, conf_score = self._check_everything(X, y, conf_score)
cutoff = self._find_cutoff(X, y, conf_score)
clean_idx = self._get_clean_idx(cutoff, conf_score)
self.classifier.fit(X[clean_idx], y[clean_idx])
return self
# TODO: Support frac_to_filter, maybe using Filter? - nah, w/o FIlter
class CLNI(BaseHandler, ClassifierMixin):
"""
Iteratively detects and filters out mislabelled samples unless
a stopping criterion is met. See :cite:`clni11` for details/usage.
Parameters
-----------------
classifier: object
A classifier instance supporting sklearn API.
detector : `BaseDetector`
To compute `conf_score`. All iterative handlers require this.
threshold : float, default=.4
Samples with lower conf_score will be filtered out.
eps : float, default=.99
Stopping criterion for main detection->cleaning loop, indicates ratio \
of total number of mislabelled samples identified in two successive \
iterations.
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
"""
def __init__(self, classifier, detector, threshold=.4, eps=.99,
n_jobs=1, random_state=None):
super().__init__(classifier, detector, n_jobs=n_jobs, random_state=random_state)
self.threshold = threshold
self.eps = eps
def clean(self, X, y):
X, y, conf_score = self._check_everything(X, y, conf_score=None)
Xt, yt = X.copy(), y.copy()
while True:
clean_idx = conf_score > self.threshold
N = len(X) - len(Xt) # size of A_(j-1) i.e. no of noisy instances detected so far
Xa, ya = Xt[clean_idx], yt[clean_idx]
# If new labels have fewer classes than original...
if len(np.unique(y)) != len(np.unique(ya)):
warnings.warn("One or more of the classes has been completely "
"filtered out, stopping iteration.")
break
else:
Xt, yt = Xa, ya
if len(X) == len(Xt):
warnings.warn("No noisy sample found, stopping at first iteration")
break
if N / (len(X) - len(Xt)) >= self.eps:
break
conf_score = self.detector.detect(Xt, yt)
return Xt, yt
def fit(self, X, y, conf_score=None):
if conf_score is not None:
raise RuntimeWarning("conf_score will be ignored. Iterative handlers only use "
"Detector passed during construction.")
Xf, yf = self.clean(X, y)
self.classifier.fit(Xf, yf)
return self
@property
def iterative(self): # Does this Handler call Detector multiple times?
return True
# TODO: Throw this away? merge with CLNI?
class IPF(BaseHandler, ClassifierMixin):
"""
Iteratively detects and filters out mislabelled samples unless \
a stopping criterion is met. See :cite:`ipf07` for details/usage.
Differs slightly from `CLNI` in terms of how stopping criterion is \
implemented.
Parameters
-----------------
classifier: object
A classifier instance supporting sklearn API.
detector : `BaseDetector`
To compute `conf_score`. All iterative handlers require this.
threshold : float, default=.4
Samples with lower conf_score will be filtered out.
eps : float, default=.99
Stopping criterion for main detection->cleaning loop, indicates ratio \
of total number of mislabelled samples identified in two successive \
iterations.
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
"""
def __init__(self, classifier, detector, n_estimator=5, max_iter=3,
n_jobs=1, random_state=None):
super().__init__(classifier, detector, n_jobs=n_jobs, random_state=random_state)
self.n_estimator = n_estimator
self.max_iter = max_iter
def clean(self, X, y):
Xf, yf = shuffle(X, y, random_state=self.random_state)
orig_size = len(X)
n_iters_with_small_change = 0
tmp = 0
Xf, yf, conf_score = self._check_everything(Xf, yf, conf_score=None)
while n_iters_with_small_change < self.max_iter:
tmp += 1
cur_size = len(Xf)
clean_idx = conf_score > .5 # Idx of clean samples
Xa, ya = Xf[clean_idx], yf[clean_idx]
# If new labels have fewer classes than original...
if len(np.unique(y)) != len(np.unique(ya)):
warnings.warn("One or more of the classes has been completely "
"filtered out, stopping iteration.")
break
else:
Xf, yf = Xa, ya
conf_score = self.detector.detect(Xf, yf) # Calling detect once more than necessary
cur_change = cur_size - len(Xf)
if cur_change <= .01 * orig_size:
n_iters_with_small_change += 1
else:
n_iters_with_small_change = 0 # Because these small change has to be consecutively 3 times
return Xf, yf
# Duplicate fit, a IterativeHandlerBase?
def fit(self, X, y, conf_score=None):
if conf_score is not None:
raise RuntimeWarning("conf_score will be ignored. Iterative handlers only use "
"Detector passed during construction.")
Xf, yf = self.clean(X, y)
assert len(np.unique(y)) == len(np.unique(yf)), "One or more of the classes has been completely filtered out"
self.classifier.fit(Xf, yf)
return self
@property
def iterative(self): # Does this Handler call Detector multiple times?
return True | /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/skclean/handlers/filters.py | 0.842896 | 0.552298 | filters.py | pypi |
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import check_random_state
from ..detectors.base import BaseDetector
from sklearn.utils.validation import _check_sample_weight
def _check_data_params(obj, X, y, conf_score):
"""Extracted out of BaseHandler for WeightedBag & Costing"""
# Reproducibility
rns = check_random_state(obj.random_state)
for k, v in obj.get_params().items():
if isinstance(v, BaseEstimator) and 'random_state' in v.get_params():
v.set_params(random_state=rns.randint(10**8))
# Parallelization
if obj.classifier and 'n_jobs' in obj.classifier.get_params():
obj.classifier.set_params(n_jobs=obj.n_jobs)
if obj.detector and 'n_jobs' in obj.detector.get_params():
obj.detector.set_params(n_jobs=obj.n_jobs)
if conf_score is None and obj.detector is None:
raise ValueError("Neither conf_score or Detector is supplied to Handler")
if conf_score is None: # outside Pipeline/ inside Iterative Handler
conf_score = obj.detector.detect(X, y)
X, y = obj._validate_data(X, y)
obj.classes_ = np.unique(y)
conf_score = _check_sample_weight(conf_score, X)
return X, y, conf_score
# Non-iterative Handlers can be used both w/ pipeline and H(c=C(),d=D()) format
class BaseHandler(BaseEstimator):
def __init__(self, classifier=None, detector=None, *, n_jobs=1, random_state=None):
self.classifier = classifier
self.detector = detector
self.n_jobs = n_jobs
self.random_state = random_state
def _check_everything(self, X, y, conf_score):
"""Check hyparams suppiled in __init__ & data"""
return _check_data_params(self, X, y, conf_score)
def fit(self, X, y, conf_score=None):
raise NotImplementedError("Attempt to instantiate abstract class")
# problem with ensemble handlers: i.e. many copies of obj.classifier
def predict(self, X):
return self.classifier.predict(X)
def predict_proba(self, X):
return self.classifier.predict_proba(X)
@property
def iterative(self): # Does this Handler call Detector multiple times?
return False | /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/skclean/handlers/base.py | 0.757346 | 0.243187 | base.py | pypi |
import warnings
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_random_state
from skclean.handlers.base import BaseHandler, _check_data_params
class SampleWeight(BaseHandler, ClassifierMixin):
"""
Simply passes `conf_score` (computed with `detector`) as sample weight
to underlying classifier.
Parameters
------------
classifier: object
A classifier instance supporting sklearn API. Must support `sample_weight`
in `fit()` method.
detector : `BaseDetector` or None, default=None
To compute `conf_score`. Set it to `None` only if `conf_score` is \
expected in `fit()` (e.g. when used inside a Pipeline with a \
`BaseDetector` preceding it). Otherwise a Detector must be supplied \
during instantiation.
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
"""
def __init__(self, classifier, detector=None, *, n_jobs=1, random_state=None):
super().__init__(classifier, detector, n_jobs=n_jobs,
random_state=random_state)
def fit(self, X, y, conf_score=None):
X, y, conf_score = self._check_everything(X, y, conf_score)
self.classifier.fit(X, y, sample_weight=conf_score)
return self
class Relabeling:
"""
Flip labels when confident about some other samples. Will require access to
decision_function (predict_proba i.e. N*L shape).
Raise error early if detector doesn't support it. Also, show available detectors
that do support it by providing sample code to run to get that list.
Find another Relabeling algo, and Put them in a 3rd file?
"""
class _WBBase(ClassifierMixin, BaseEstimator):
def __init__(self, estimator, replacement, sampling_ratio, sample_weight=None):
super().__init__()
self.estimator = estimator
self.replacement = replacement
self.sampling_ratio = sampling_ratio
self.sample_weight = sample_weight
def fit(self, X, y):
rng = check_random_state(self.estimator.random_state)
to_sample = int(self.sampling_ratio * len(y))
target_idx = rng.choice(len(y), size=to_sample, replace=self.replacement, p=self.sample_weight)
X, y = X[target_idx], y[target_idx]
self.estimator.fit(X, y)
return self
def predict(self, X):
return self.estimator.predict(X)
class WeightedBagging(BaggingClassifier):
"""
Similar to regular bagging- except cleaner samples will be chosen
more often during bagging. That is, a sample's probability of
getting selected in bootstrapping process is directly proportional to
it's `conf_score`. See :cite:`ensih18` for details.
Parameters
------------------
classifier: object
A classifier instance supporting sklearn API. Same as `base_estimator`
of scikit-learn's BaggingClassifier.
detector : `BaseDetector` or None, default=None
To compute `conf_score`. Set it to `None` only if `conf_score` is \
expected in `fit()` (e.g. when used inside a Pipeline with a \
`BaseDetector` preceding it). Otherwise a Detector must be supplied \
during instantiation.
n_estimators : int, default=10
The number of base classifiers in the ensemble.
replacement : bool, default=True
Whether to sample instances with/without replacement at each base classifier
sampling_ratio : float, 0.0 to 1.0, default=1.0
No of samples drawn at each tree equals: len(X) * sampling_ratio
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
verbose : int, default=0
Controls the verbosity when fitting and predicting
"""
def __init__(self,
classifier=None,
detector=None,
n_estimators=100,
replacement=True,
sampling_ratio=1.0,
n_jobs=1,
random_state=None,
verbose=0):
BaggingClassifier.__init__(
self,
base_estimator=classifier,
warm_start=False,
n_estimators=n_estimators,
n_jobs=n_jobs,
random_state=random_state,
bootstrap=False,
bootstrap_features=False,
verbose=verbose)
self.classifier = classifier
self.detector = detector
self.replacement = replacement
self.sampling_ratio = sampling_ratio
@property
def classifier(self):
return self.base_estimator
@classifier.setter
def classifier(self, clf):
self.base_estimator = clf
def _validate_estimator(self, default=DecisionTreeClassifier()):
super()._validate_estimator()
self.base_estimator_ = _WBBase(self.base_estimator_, self.replacement, self.sampling_ratio, self.conf_score_)
def fit(self, X, y, conf_score=None, **kwargs):
X, y, conf_score = _check_data_params(self, X, y, conf_score)
conf_score += 1 / (len(y))
self.conf_score_ = conf_score/conf_score.sum() # Sum to one
return super().fit(X, y)
@property
def iterative(self): # Does this Handler call Detector multiple times?
return False
class _RSBase(ClassifierMixin, BaseEstimator): # Rejection sampling Base
def __init__(self, estimator, sample_weight=None):
super().__init__()
self.estimator = estimator
self.sample_weight = sample_weight
def fit(self, X, y):
rng = check_random_state(self.estimator.random_state)
r = rng.uniform(self.sample_weight.min(), self.sample_weight.max(), size=y.shape)
target_idx = r <= self.sample_weight
if len(np.unique(y[target_idx])) != len(np.unique(y)):
warnings.warn("One or more classes are not present after resampling")
X, y = X[target_idx], y[target_idx]
self.estimator = self.estimator.fit(X, y)
return self
def predict(self, X):
return self.estimator.predict(X)
class Costing(BaggingClassifier):
"""
Implements *costing*, a method combining cost-proportionate rejection
sampling and ensemble aggregation. At each base classifier, samples
are selected for training with probability equal to `conf_score`.
See :cite:`costing03` for details.
Parameters
------------------
classifier: object
A classifier instance supporting sklearn API. Same as `base_estimator`
of scikit-learn's BaggingClassifier.
detector : `BaseDetector` or None, default=None
To compute `conf_score`. Set it to `None` only if `conf_score` is \
expected in `fit()` (e.g. when used inside a Pipeline with a \
`BaseDetector` preceding it). Otherwise a Detector must be supplied \
during instantiation.
n_estimators : int, default=10
The number of base classifiers in the ensemble.
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
verbose : int, default=0
Controls the verbosity when fitting and predicting
"""
def __init__(self,
classifier=None,
detector=None,
n_estimators=100,
n_jobs=1,
random_state=None,
verbose=0):
BaggingClassifier.__init__(
self,
base_estimator=classifier,
n_estimators=n_estimators,
warm_start=False,
n_jobs=n_jobs,
random_state=random_state,
bootstrap=False,
bootstrap_features=False,
verbose=verbose)
self.classifier = classifier
self.detector = detector
@property
def classifier(self):
return self.base_estimator
@classifier.setter
def classifier(self, clf):
self.base_estimator = clf
def _validate_estimator(self, default=DecisionTreeClassifier()):
super()._validate_estimator()
self.base_estimator_ = _RSBase(self.base_estimator_, self.conf_score_)
# Duplicate fit
def fit(self, X, y, conf_score=None, **kwargs):
X, y, conf_score = _check_data_params(self, X, y, conf_score)
conf_score += 1 / (len(y))
self.conf_score_ = conf_score/conf_score.sum() # Sum to one
return super().fit(X, y)
@property
def iterative(self): # Does this Handler call Detector multiple times?
return False | /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/skclean/handlers/example_weighting.py | 0.92222 | 0.453625 | example_weighting.py | pypi |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.base import ClassifierMixin, BaseEstimator
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors._base import _get_weights
from sklearn.utils.extmath import weighted_mode
# TODO: support all sklearn Random Forest parameters
class RobustForest(BaseEstimator, ClassifierMixin):
"""
Uses a random forest to to compute pairwise similarity/distance, and then \
a simple K Nearest Neighbor that works on that similarity matrix. For
a pair of samples, the similarity value is proportional to how frequently \
they belong to the same leaf. See :cite:`forestkdn17` for details.
Parameters
------------
method : string, default='simple'
There are two different ways to compute similarity matrix. In 'simple'
method, the similarity value is simply the percentage of times two \
samples belong to same leaf. 'weighted' method also takes the size of \
those leaves into account- it exactly matches above paper's algorithm, \
but it is computationally slow.
K : int, default=5
No of nearest neighbors to consider for final classification
n_estimators : int, default=101
No of trees in Random Forest.
max_leaf_nodes : int, default=128
Maximum no of leaves in each tree.
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
"""
def __init__(self, method='simple', K=5, n_estimators=100, max_leaf_nodes=128,
random_state=None, n_jobs=None):
self.method = method
self.K = K
self.n_estimators = n_estimators
self.max_leaf_nodes = max_leaf_nodes
self.random_state = random_state
self.n_jobs = n_jobs
def fit(self, X, y):
X, y = self._validate_data(X, y)
self.forest_ = RandomForestClassifier(
n_estimators=self.n_estimators, max_leaf_nodes=self.max_leaf_nodes,
n_jobs=self.n_jobs, random_state=self.random_state
).fit(X, y)
self.data_ = (X, y)
return self
def _pairwise_distance_weighted(self, train_X, test_X):
out_shape = (test_X.shape[0], train_X.shape[0])
mat = np.zeros(out_shape, dtype='float32')
temp = np.zeros(out_shape, dtype='float32')
to_add = np.zeros(out_shape, dtype='float32')
ones = np.ones(out_shape, dtype='float32')
for tree in self.forest_.estimators_:
train_leaves = tree.apply(train_X)
test_leaves = tree.apply(test_X)
match = test_leaves.reshape(-1, 1) == train_leaves.reshape(1, -1) # Samples w/ same leaf as mine:mates
no_of_mates = match.sum(axis=1, dtype='float') # No of My Leaf mates
np.multiply(match, no_of_mates.reshape(-1, 1),
out=temp) # assigning weight to each leaf-mate, proportional to no of mates
to_add.fill(0)
np.divide(ones, temp, out=to_add, where=temp != 0) # Now making that inversely proportional
assert np.allclose(to_add.sum(axis=1), 1)
assert match.shape == (len(test_X), len(train_X)) == to_add.shape == temp.shape
assert no_of_mates.shape == (len(test_X),)
np.add(mat, to_add, out=mat)
return 1 - mat / len(self.forest_.estimators_)
def _pairwise_distance_simple(self, train_X, test_X):
train_leaves = self.forest_.apply(train_X) # (train_X,n_estimators)
test_leaves = self.forest_.apply(test_X) # (test_X,n_estimators)
dist = cdist(test_leaves, train_leaves, metric='hamming')
assert dist.shape == (len(test_X), len(train_X))
return dist
def pairwise_distance(self, train_X, test_X):
if self.method == 'simple':
return self._pairwise_distance_simple(train_X, test_X)
elif self.method == 'weighted':
return self._pairwise_distance_weighted(train_X, test_X)
raise Exception("method not recognized")
def predict(self, X):
train_X, train_Y = self.data_
dist = self.pairwise_distance(train_X, X)
assert np.all(dist >= 0)
idx = np.argsort(dist, axis=1)
nn_idx = idx[:, :self.K]
nn_dist = dist[np.arange(len(X))[:, None], nn_idx]
nn_labels = train_Y[nn_idx]
weights = _get_weights(nn_dist, 'distance') # Weighted KNN
a, _ = weighted_mode(nn_labels, weights, axis=1)
return a.reshape(-1) | /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/skclean/models/ensemble.py | 0.764892 | 0.613787 | ensemble.py | pypi |
import numpy as np
from scipy.optimize import minimize
from sklearn.linear_model import LogisticRegression
from sklearn.utils.extmath import log_logistic
from sklearn.utils.multiclass import unique_labels
def log_loss(wp, X, target, C, PN, NP):
"""
It is minimized using "L-BFGS-B" method of "scipy.optimize.minimize" function, and results in
similar coefficients as sklearn's Logistic Regression when PN=NP=0.0.
Parameters
-------------
wp: Coefficients & Intercept
X: (N,M) shaped data matrix
target: (N,) shaped 1-D array of targets
C: Regularization
PN: % of Positive samples labeled as Negative
NP: % of Positive samples labeled as Negative
Returns
------------
loss_value: float
"""
c = wp[-1]
w = wp[:-1]
z = np.dot(X, w) + c
yz = target * z # to compute l(t,y)
nyz = -target * z # to compute l(t,-y)
ls = -log_logistic(yz) # l(t,y)
nls = -log_logistic(nyz) # l(t,-y)
idx = target == 1 # indexes of samples w/ P label
loss = ls.copy() # To store l-hat
loss[idx] = (1 - NP) * ls[idx] - PN * nls[idx] # Modified loss for P samples
loss[~idx] = (1 - PN) * ls[~idx] - NP * nls[~idx] # Modified loss for N samples
loss = loss / (1 - PN - NP) + .5 * (1. / C) * np.dot(w, w) # Normalization & regularization
return loss.sum() # Final loss
class RobustLR(LogisticRegression):
"""
Modifies the logistic loss using class dependent (estimated) noise rates \
for robustness. This implementation is for binary classification tasks only.
See :cite:`natarajan13` for details.
Parameters
----------------
PN : float, default=.2
Percentage of Positive labels flipped to Negative.
NP : float, default=.2
Percentage of Negative labels flipped to Positive.
C : float
Inverse of regularization strength, must be a positive float.
random_state : int, default=None
Set this value for reproducibility
"""
def __init__(self, PN=.2, NP=.2, C=np.inf, max_iter=4000, random_state=None):
super().__init__(C=C, max_iter=max_iter, random_state=random_state)
self.PN = PN
self.NP = NP
# TODO: Support `sample_weight`
def fit(self, X, y, sample_weight=None):
X, y = self._validate_data(X, y)
self.classes_ = unique_labels(y)
w0 = np.zeros(X.shape[1] + 1)
target = y.copy()
target[target == 0] = -1
self.r_ = minimize(log_loss, w0, method="L-BFGS-B", args=(X, target, self.C, self.PN, self.NP),
options={"maxiter": self.max_iter})
self.coef_ = self.r_.x[:-1].reshape(1, -1)
self.intercept_ = self.r_.x[-1:]
return self | /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/skclean/models/logistic_regression.py | 0.924959 | 0.627438 | logistic_regression.py | pypi |
import numpy as np
from sklearn.exceptions import NotFittedError
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors._base import _get_weights
from .base import BaseDetector
# TODO: Support other distance metrics
class KDN(BaseDetector):
"""
For each sample, the percentage of it's nearest neighbors with same
label serves as it's `conf_score`. Euclidean distance is used to
find the nearest neighbors. See :cite:`ensih18,ih14` for details.
Parameters
--------------
n_neighbors : int, default=5
No of nearest neighbors to use to compute `conf_score`
weight : string, default='uniform'
weight function used in prediction. If 'uniform', all points
in each neighborhood are weighted equally. If 'distance', weights
points by the inverse of their distance.
n_jobs : int, default=1
No of parallel cpu cores to use
"""
def __init__(self, n_neighbors=5, weight='uniform', n_jobs=1):
super().__init__(n_jobs=n_jobs, random_state=None)
self.n_neighbors = n_neighbors
self.weight = weight
def _get_kdn(self, knn, y):
dist, kid = knn.kneighbors() # (n_estimators,K) : ids & dist of nn's for every sample in X
weights = _get_weights(dist, self.weight)
if weights is None:
weights = np.ones_like(kid)
agreement = y[kid] == y.reshape(-1, 1)
return np.average(agreement, axis=1, weights=weights)
def detect(self, X, y):
X, y = self._validate_data(X, y) # , accept_sparse=True
knn = KNeighborsClassifier(n_neighbors=self.n_neighbors, weights=self.weight,
n_jobs=self.n_jobs).fit(X, y)
return self._get_kdn(knn, y)
class ForestKDN(KDN):
"""
Like KDN, but a trained Random Forest is used to compute pairwise similarity.
Specifically, for a pair of samples, their similarity is the percentage of
times they belong to the same leaf. See :cite:`forestkdn17` for details.
Parameters
-------------------
n_neighbors : int, default=5
No of nearest neighbors to use to compute `conf_score`
n_estimators : int, default=101
No of trees in Random Forest.
max_leaf_nodes : int, default=64
Maximum no of leaves in each tree.
weight : string, default='distance'
weight function used in prediction. If 'distance', weights
points by the inverse of their distance. If 'uniform', all points
in each neighborhood are weighted equally.
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
"""
def __init__(self, n_neighbors=5, n_estimators=100, max_leaf_nodes=64,
weight='distance', n_jobs=1, random_state=None):
super().__init__(n_neighbors=n_neighbors, weight=weight, n_jobs=n_jobs)
self.n_estimators = n_estimators
self.max_leaf_nodes = max_leaf_nodes
self.random_state = random_state
def detect(self, X, y):
X, y = self._check_everything(X, y)
forest = RandomForestClassifier(
n_estimators=self.n_estimators,
max_leaf_nodes=self.max_leaf_nodes, n_jobs=self.n_jobs,
random_state=self.random_state).fit(X, y)
Xs = forest.apply(X)
knn = KNeighborsClassifier(
n_neighbors=self.n_neighbors, metric='hamming', algorithm='brute',
weights=self.weight, n_jobs=self.n_jobs).fit(Xs, y)
return self._get_kdn(knn, y)
# TODO: rename this class (?)
class HybridKDN(KDN):
def __init__(self, classifier, n_neighbors=5, weight='uniform', n_jobs=1):
super().__init__(n_neighbors=n_neighbors, weight=weight, n_jobs=n_jobs)
self.classifier = classifier
def detect(self, X, y):
X, y = self._validate_data(X, y)
try: # classifier may already be trained
yp = self.classifier.predict(X)
except NotFittedError:
yp = self.classifier.fit(X, y).predict(X)
knn = KNeighborsClassifier().fit(X, y)
_, kid = knn.kneighbors()
agr = yp[kid] == y[kid]
return agr.sum(axis=1) / knn.n_neighbors
class RkDN(KDN):
__doc__ = KDN.__doc__
def detect(self, X, y):
X, y = self._validate_data(X, y)
knn = KNeighborsClassifier(n_neighbors=self.n_neighbors, weights=self.weight,
n_jobs=self.n_jobs).fit(X, y)
_, kid = knn.kneighbors()
N = len(X)
M = np.zeros((N, N), dtype='bool')
cols = np.zeros_like(kid) + np.arange(0, N).reshape(-1, 1)
M[kid.reshape(-1), cols.reshape(-1)] = 1
label_agr = y.reshape(1, -1) == y.reshape(-1, 1)
agr = M & label_agr
m = M.sum(axis=1).astype('float')
# Outliers who doesn't belong to anybody's NN list have conf_score=0
return np.divide(agr.sum(axis=1), m, out=np.zeros_like(m), where=(m != 0)) | /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/skclean/detectors/neighbors.py | 0.787073 | 0.54056 | neighbors.py | pypi |
import warnings
import numpy as np
from sklearn import clone
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_predict
from sklearn.utils import check_random_state
from .base import BaseDetector
class PartitioningDetector(BaseDetector):
"""
Partitions dataset into n subsets, trains a classifier on each.
Trained models are then used to predict on entire dataset.
See :cite:`ipf07` for details.
Parameters
------------
classifier : object, default=None
A classifier instance supporting sklearn API.
If None, `DecisionTreeClassifier` is used.
n_partitions : int, default=5
No of non-overlapping partitions created from dataset.
For small datasets, you might want to use smaller values.
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
"""
def __init__(self, classifier=None, n_partitions=5, n_jobs=1, random_state=None):
super().__init__(n_jobs=n_jobs, random_state=random_state)
self.classifier = classifier
self.n_partitions = n_partitions
def detect(self, X, y):
X, y = self._check_everything(X, y)
classifier = clone(self.classifier) if self.classifier else \
DecisionTreeClassifier(max_depth=2, random_state=self.random_state)
breaks = [(len(X) // self.n_partitions) * i
for i in range(1, self.n_partitions)]
Xs, ys = np.split(X, breaks), np.split(y, breaks)
clfs = []
for i in range(self.n_partitions): # All clfs have same random_state but diff data
c = clone(classifier).fit(Xs[i], ys[i])
clfs.append(c)
preds = np.zeros((len(X), self.n_partitions))
for i in range(self.n_partitions):
preds[:, i] = clfs[i].predict(X)
eqs = preds == y.reshape(-1, 1)
return eqs.sum(axis=1) / self.n_partitions
class MCS(BaseDetector):
"""
Detects noise using a sequential Markov Chain Monte Carlo sampling algorithm.
Tested for binary classification, multi-class classification sometimes
perform poorly. See :cite:`mcmc19` for details.
Parameters
--------------
classifier : object, default=None
A classifier instance supporting sklearn API.
If None, `LogisticRegression` is used.
n_steps : int, default=20
No of sampling steps to run.
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
"""
def __init__(self, classifier=None, n_steps=20, n_jobs=1, random_state=None):
super().__init__(n_jobs=n_jobs, random_state=random_state)
self.classifier = classifier
self.n_steps = n_steps
def detect(self, X, y):
X, y = self._check_everything(X, y)
rns = check_random_state(self.random_state)
seeds = rns.randint(10 ** 8, size=self.n_steps)
classifier = clone(self.classifier) if self.classifier \
else LogisticRegression(random_state=self.random_state)
contain_random_state = 'random_state' in classifier.get_params()
mask = np.ones(y.shape, 'bool')
conf_score = np.zeros(y.shape)
for i in range(self.n_steps):
conf_score[mask] += 1
clf = clone(classifier)
if contain_random_state:
clf.set_params(random_state=seeds[i])
clf.fit(X[mask], y[mask])
probs = clf.predict_proba(X) # (N,n_estimators), p(k|x) for all k in classes
pc = probs[range(len(y)), y] # (N,), Prob assigned to correct class
mask = rns.binomial(1, pc).astype('bool')
if not np.all(np.unique(y[mask]) == self.classes_):
warnings.warn(f"One or more classes have been entirely left out "
f"in current iteration {i}, stopping MCMC loop.",
category=RuntimeWarning)
break
return conf_score / self.n_steps
class InstanceHardness(BaseDetector):
"""
A set of classifiers are used to predict labels of each sample
using cross-validation. `conf_score` of a sample is percentage
classifiers that correctly predict it's label. See :cite:`ih14`
for details.
Parameters
--------------
classifiers : list, default=None
Classifiers used to predict sample labels.
If None, four classifiers are used: `GaussianNB`,
`DecisionTreeClassifier`, `KNeighborsClassifier` and `LogisticRegression`.
cv : int, cross-validation generator or an iterable, default=None
If None, uses 5-fold stratified k-fold
if int, no of folds to use in stratified k-fold
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
"""
DEFAULT_CLFS = [DecisionTreeClassifier(max_leaf_nodes=500), GaussianNB(), KNeighborsClassifier(),
LogisticRegression(multi_class='auto', max_iter=4000, solver='lbfgs')]
def __init__(self, classifiers=None, cv=None, n_jobs=1, random_state=None):
super().__init__(n_jobs=n_jobs, random_state=random_state)
self.classifiers = classifiers
self.cv = cv
def detect(self, X, y):
X, y = self._check_everything(X, y)
if self.classifiers is None:
self.classifiers = InstanceHardness.DEFAULT_CLFS
cv = self.cv
if cv is None or type(cv) == int:
n_splits = self.cv or 5
cv = StratifiedKFold(n_splits=n_splits, shuffle=True,
random_state=self.random_state)
N = len(X)
conf_score = np.zeros_like(y, dtype='float64')
rns = check_random_state(self.random_state)
seeds = rns.randint(10 ** 8, size=len(self.classifiers))
for i, clf in enumerate(self.classifiers):
if 'random_state' in clf.get_params():
clf.set_params(random_state=seeds[i])
# probability given to original class of all samples
probs = cross_val_predict(clf, X, y, cv=cv, n_jobs=self.n_jobs,
method='predict_proba')[range(N), y]
conf_score += probs
return conf_score / len(self.classifiers)
class RandomForestDetector(BaseDetector):
"""
Trains a Random Forest first- for each sample, only trees that
didn't select it for training (via bootstrapping) are used to
predict it's label. Percentage of trees that correctly predicted
the label is the sample's `conf_score`.
See :cite:`twostage18` for details.
n_estimators : int, default=101
No of trees in Random Forest.
sampling_ratio : float, 0.0 to 1.0, default=1.0
No of samples drawn at each tree equals: len(X) * sampling_ratio
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility
"""
# TODO: Allow other tree ensembles
def __init__(self, n_estimators=101, sampling_ratio=None, n_jobs=1, random_state=None):
super().__init__(n_jobs=n_jobs, random_state=random_state)
self.n_estimators = n_estimators
self.sampling_ratio = sampling_ratio
def detect(self, X, y):
X, y = self._validate_data(X, y)
rf = RandomForestClassifier(n_estimators=self.n_estimators, oob_score=True,
max_samples=self.sampling_ratio, n_jobs=self.n_jobs,
random_state=self.random_state).fit(X, y)
conf_score = rf.oob_decision_function_[range(len(X)), y]
return conf_score | /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/skclean/detectors/ensemble.py | 0.870308 | 0.495117 | ensemble.py | pypi |
import numpy as np
from sklearn.utils import check_random_state
def noise_matrix_is_valid(noise_matrix, py, verbose=False):
'''Given a prior py = p(y=k), returns true if the given noise_matrix is a learnable matrix.
Learnability means that it is possible to achieve better than random performance, on average,
for the amount of noise in noise_matrix.'''
# Number of classes
K = len(py)
# Let's assume some number of training examples for code readability,
# but it doesn't matter what we choose as its not actually used.
N = float(10000)
ps = np.dot(noise_matrix, py) # P(y=k)
# P(s=k, y=k')
joint_noise = np.multiply(noise_matrix, py) # / float(N)
# Check that joint_probs is valid probability matrix
if not (abs(joint_noise.sum() - 1.0) < 1e-6):
return False
# Check that noise_matrix is a valid matrix
# i.e. check p(s=k)*p(y=k) < p(s=k, y=k)
for i in range(K):
C = N * joint_noise[i][i]
E1 = N * joint_noise[i].sum() - C
E2 = N * joint_noise.T[i].sum() - C
O = N - E1 - E2 - C
if verbose:
print(
"E1E2/C", round(E1 * E2 / C),
"E1", round(E1),
"E2", round(E2),
"C", round(C),
"|", round(E1 * E2 / C + E1 + E2 + C),
"|", round(E1 * E2 / C), "<", round(O),
)
print(
round(ps[i] * py[i]), "<", round(joint_noise[i][i]),
":", ps[i] * py[i] < joint_noise[i][i],
)
if not (ps[i] * py[i] < joint_noise[i][i]):
return False
return True
def generate_n_rand_probabilities_that_sum_to_m(
n,
m,
seed,
max_prob=1.0,
min_prob=0.0,
):
'''When min_prob=0 and max_prob = 1.0, this method is deprecated.
Instead use np.random.dirichlet(np.ones(n))*m
Generates 'n' random probabilities that sum to 'm'.
Parameters
----------
n : int
Length of np.array of random probabilities to be returned.
m : float
Sum of np.array of random probabilites that is returned.
max_prob : float (0.0, 1.0] | Default value is 1.0
Maximum probability of any entry in the returned np.array.
min_prob : float [0.0, 1.0) | Default value is 0.0
Minimum probability of any entry in the returned np.array.'''
epsilon = 1e-6 # Imprecision allowed for inequalities with floats
rns = check_random_state(seed)
if n == 0:
return np.array([])
if (max_prob + epsilon) < m / float(n):
raise ValueError("max_prob must be greater or equal to m / n, but " +
"max_prob = " + str(max_prob) + ", m = " + str(m) + ", n = " +
str(n) + ", m / n = " + str(m / float(n)))
if min_prob > (m + epsilon) / float(n):
raise ValueError("min_prob must be less or equal to m / n, but " +
"max_prob = " + str(max_prob) + ", m = " + str(m) + ", n = " +
str(n) + ", m / n = " + str(m / float(n)))
# When max_prob = 1, min_prob = 0, the following two lines are equivalent to:
# intermediate = np.sort(np.append(np.random.uniform(0, 1, n-1), [0, 1]))
# result = (intermediate[1:] - intermediate[:-1]) * m
result = rns.dirichlet(np.ones(n)) * m
min_val = min(result)
max_val = max(result)
while max_val > (max_prob + epsilon):
new_min = min_val + (max_val - max_prob)
# This adjustment prevents the new max from always being max_prob.
adjustment = (max_prob - new_min) * rns.rand()
result[np.argmin(result)] = new_min + adjustment
result[np.argmax(result)] = max_prob - adjustment
min_val = min(result)
max_val = max(result)
min_val = min(result)
max_val = max(result)
while min_val < (min_prob - epsilon):
min_val = min(result)
max_val = max(result)
new_max = max_val - (min_prob - min_val)
# This adjustment prevents the new min from always being min_prob.
adjustment = (new_max - min_prob) * rns.rand()
result[np.argmax(result)] = new_max - adjustment
result[np.argmin(result)] = min_prob + adjustment
min_val = min(result)
max_val = max(result)
return result
def randomly_distribute_N_balls_into_K_bins(
N, # int
K, # int
seed,
max_balls_per_bin=None,
min_balls_per_bin=None,
):
'''Returns a uniformly random numpy integer array of length N that sums to K.'''
if N == 0:
return np.zeros(K, dtype=int)
if max_balls_per_bin is None:
max_balls_per_bin = N
else:
max_balls_per_bin = min(max_balls_per_bin, N)
if min_balls_per_bin is None:
min_balls_per_bin = 0
else:
min_balls_per_bin = min(min_balls_per_bin, N / K)
if N / float(K) > max_balls_per_bin:
N = max_balls_per_bin * K
arr = np.round(generate_n_rand_probabilities_that_sum_to_m(
n=K,
m=1,
max_prob=max_balls_per_bin / float(N),
min_prob=min_balls_per_bin / float(N),
seed=seed
) * N)
while sum(arr) != N:
while sum(arr) > N: # pragma: no cover
arr[np.argmax(arr)] -= 1
while sum(arr) < N:
arr[np.argmin(arr)] += 1
return arr.astype(int)
# This can be quite slow
def generate_noise_matrix_from_trace(
K,
trace,
max_trace_prob=1.0,
min_trace_prob=1e-5,
max_noise_rate=1 - 1e-5,
min_noise_rate=0.0,
valid_noise_matrix=True,
py=None,
frac_zero_noise_rates=0.,
seed=0,
max_iter=10000,
):
'''Generates a K x K noise matrix P(s=k_s|y=k_y) with trace
as the np.mean(np.diagonal(noise_matrix)).
Parameters
----------
K : int
Creates a noise matrix of shape (K, K). Implies there are
K classes for learning with noisy labels.
trace : float (0.0, 1.0]
Sum of diagonal entries of np.array of random probabilites that is returned.
max_trace_prob : float (0.0, 1.0]
Maximum probability of any entry in the trace of the return matrix.
min_trace_prob : float [0.0, 1.0)
Minimum probability of any entry in the trace of the return matrix.
max_noise_rate : float (0.0, 1.0]
Maximum noise_rate (non-digonal entry) in the returned np.array.
min_noise_rate : float [0.0, 1.0)
Minimum noise_rate (non-digonal entry) in the returned np.array.
valid_noise_matrix : bool
If True, returns a matrix having all necessary conditions for
learning with noisy labels. In particular, p(y=k)p(s=k) < p(y=k,s=k)
is satisfied. This requires that Trace > 1.
py : np.array (shape (K, 1))
The fraction (prior probability) of each true, hidden class label, P(y = k).
REQUIRED when valid_noise_matrix == True.
frac_zero_noise_rates : float
The fraction of the n*(n-1) noise rates that will be set to 0. Note that if
you set a high trace, it may be impossible to also have a low
fraction of zero noise rates without forcing all non-"1" diagonal values.
Instead, when this happens we only guarantee to produce a noise matrix with
frac_zero_noise_rates **or higher**. The opposite occurs with a small trace.
seed : int
Seeds the random number generator for numpy.
max_iter : int (default: 10000)
The max number of tries to produce a valid matrix before returning False.
Output
------
np.array (shape (K, K))
noise matrix P(s=k_s|y=k_y) with trace
as the np.sum(np.diagonal(noise_matrix)).
This a conditional probability matrix and a
left stochastic matrix.'''
if valid_noise_matrix and trace <= 1:
raise ValueError("trace = {}. trace > 1 is necessary for a".format(trace) +
" valid noise matrix to be returned (valid_noise_matrix == True)")
if valid_noise_matrix and py is None and K > 2:
raise ValueError("py must be provided (not None) if the input parameter" +
" valid_noise_matrix == True")
if K <= 1:
raise ValueError('K must be >= 2, but K = {}.'.format(K))
if max_iter < 1:
return False
rns = check_random_state(seed)
# Special (highly constrained) case with faster solution.
# Every 2 x 2 noise matrix with trace > 1 is valid because p(y) is not used
if K == 2:
if frac_zero_noise_rates >= 0.5: # Include a single zero noise rate
noise_mat = np.array([
[1., 1 - (trace - 1.)],
[0., trace - 1.],
])
return noise_mat if rns.rand() > 0.5 else np.rot90(noise_mat, k=2)
else: # No zero noise rates
diag = generate_n_rand_probabilities_that_sum_to_m(2, trace, seed=rns.randint(100))
noise_matrix = np.array([
[diag[0], 1 - diag[1]],
[1 - diag[0], diag[1]],
])
return noise_matrix
# K > 2
for z in range(max_iter):
noise_matrix = np.zeros(shape=(K, K))
# Randomly generate noise_matrix diagonal.
nm_diagonal = generate_n_rand_probabilities_that_sum_to_m(
n=K,
m=trace,
max_prob=max_trace_prob,
min_prob=min_trace_prob,
seed=rns.randint(100)
)
np.fill_diagonal(noise_matrix, nm_diagonal)
# Randomly distribute number of zero-noise-rates across columns
num_col_with_noise = K - np.count_nonzero(1 == nm_diagonal)
num_zero_noise_rates = int(K * (K - 1) * frac_zero_noise_rates)
# Remove zeros already in [1,0,..,0] columns
num_zero_noise_rates -= (K - num_col_with_noise) * (K - 1)
num_zero_noise_rates = np.maximum(num_zero_noise_rates, 0) # Prevent negative
num_zero_noise_rates_per_col = randomly_distribute_N_balls_into_K_bins(
N=num_zero_noise_rates,
K=num_col_with_noise,
max_balls_per_bin=K - 2, # 2 = one for diagonal, and one to sum to 1
min_balls_per_bin=0,
seed=rns.randint(100)
) if K > 2 else np.array([0, 0]) # Special case when K == 2
stack_nonzero_noise_rates_per_col = list(K - 1 - num_zero_noise_rates_per_col)[::-1]
# Randomly generate noise rates for columns with noise.
for col in np.arange(K)[nm_diagonal != 1]:
num_noise = stack_nonzero_noise_rates_per_col.pop()
# Generate num_noise noise_rates for the given column.
noise_rates_col = list(generate_n_rand_probabilities_that_sum_to_m(
n=num_noise,
m=1 - nm_diagonal[col],
max_prob=max_noise_rate,
min_prob=min_noise_rate,
seed=rns.randint(100),
))
# Randomly select which rows of the noisy column to assign the random noise rates
rows = rns.choice([row for row in range(K) if row != col], num_noise, replace=False)
for row in rows:
noise_matrix[row][col] = noise_rates_col.pop()
if not valid_noise_matrix or noise_matrix_is_valid(noise_matrix, py):
break
return noise_matrix
def gen_simple_noise_mat(K: int, noise_level: float, random_state=None):
rns = check_random_state(random_state)
mat = np.zeros((K, K), dtype='float')
mean = 1 - noise_level
diag = rns.normal(loc=mean, scale=mean / 10, size=5)
np.fill_diagonal(mat, diag)
print(diag)
for i in range(K):
nl = 1 - mat[i][i]
cols = [j for j in range(K) if j != i]
mat[i, cols] = rns.dirichlet(np.ones(K - 1)) * nl
return mat | /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/skclean/utils/noise_generation.py | 0.905396 | 0.711657 | noise_generation.py | pypi |
from pathlib import Path
from time import ctime, perf_counter
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score, check_cv
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from sklearn.utils import shuffle, check_random_state
_intervals = (
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
# Code taken from https://stackoverflow.com/a/24542445/4553309
def _display_time(seconds, granularity=4):
if seconds < 60:
return f"{seconds:.2f} seconds"
result = []
for name, count in _intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
# TODO: Add support for downloading dataset
def load_data(dataset, stats=False):
path = Path(__file__).parent.parent / f'datasets/{dataset}.csv'
try:
df = pd.read_csv(path, header=None)
except FileNotFoundError as e:
raise FileNotFoundError(f"Dataset file {dataset} does not exist")
df = df.astype('float64')
data = df.values
X, Y = data[:, :-1], data[:, -1]
Y = LabelEncoder().fit_transform(Y)
X = MinMaxScaler().fit_transform(X)
if stats:
labels, freq = np.unique(Y, return_counts=True)
print(f"{dataset}, {X.shape}, {len(labels)}, {freq.min() / freq.max():.3f}\n")
return shuffle(X, Y, random_state=42)
# TODO: Support resuming inside cross_val_score, use Dask?
def compare(models: dict, datasets: list, cv, df_path=None, n_jobs=-1,
scoring='accuracy', random_state=None, verbose=True, **kwargs):
"""
Compare different methods across several datasets, with support for \
parallelization, reproducibility and automatic resumption. Output is \
a csv file where each row represents a dataset and each column \
represents a method/ algorithm. It's basically a wrapper around \
`sklearn.model_selection.cross_val_score`- check this for more details.
Note that support for resumption is somewhat limited, it can only \
recover output of (dataset, method) pair for whom computation is fully \
complete. In other words, if a 10-fold cross-validation is stopped \
after 5-fold, the results of that 5-fold is lost.
Parameters
--------------
models : dict
Keys are model name, values are scikit-learn API compatible classifiers.
datasets : list
A list of either `string`, denoting dataset names to be loaded with \
`load_data`, or a nested tuple of (name, (X, y)), denoting dataset \
name, features and labels respectively.
cv : int, cross-validation generator or an iterable
if int, no of folds to use in stratified k-fold
df_path : string, default=None
Path to (csv) file to store results- will be overwritten if already \
present.
scoring : string, or a scorer callable object / function with signature \
``scorer(estimator, X, y)`` which should return only a single value.
n_jobs : int, default=1
No of parallel cpu cores to use
random_state : int, default=None
Set this value for reproducibility. Note that this will overwrite \
existing random state of methods even if it's already present.
verbose : Controls the verbosity level
kwargs : Other parameters for ``cross_val_score``.
"""
rns = check_random_state(random_state)
cv = check_cv(cv)
cv.random_state = rns.randint(100)
seeds = iter(rns.randint(10 ** 8, size=len(models)*len(datasets)))
try:
df = pd.read_csv(df_path, index_col=0)
if verbose:
print("Result file found, resuming...") # Word 'resuming' is used in test
except (FileNotFoundError, ValueError):
df = pd.DataFrame()
for data in datasets:
if type(data) == str:
X, Y = load_data(data, stats=verbose)
else:
data, (X, Y) = data # Nested tuple of (name, (data, target))
if data not in df.index:
df.loc[data, :] = None
for name, clf in models.items():
if 'n_jobs' in clf.get_params():
clf.set_params(n_jobs=1)
if 'random_state' in clf.get_params():
clf.set_params(random_state=next(seeds))
if name not in df.columns:
df[name] = None
if not pd.isna(df.loc[data, name]):
v = df.loc[data, name]
if verbose:
print(f"Skipping {data},{name} :{v}")
continue
elif verbose:
print(f"Starting: {data}, {name} at {ctime()[-14:-4]}")
start = perf_counter()
res = cross_val_score(clf, X, Y, cv=cv, n_jobs=n_jobs,
scoring=scoring, error_score='raise', **kwargs)
df.at[data, name] = res.mean()
elapsed_time = _display_time(perf_counter() - start)
if verbose:
print(f"Result: {df.loc[data, name]:.4f} in {elapsed_time} \n")
if df_path:
df.to_csv(df_path)
if verbose:
print()
return df | /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/skclean/utils/_utils.py | 0.459804 | 0.434341 | _utils.py | pypi |
import numpy as np
from scipy.spatial.distance import cdist
from .initialization import initialize_random, initialize_probabilistic
class CMeans:
"""Base class for C-means algorithms.
Parameters
----------
n_clusters : int, optional
The number of clusters to find.
n_init : int, optional
The number of times to attempt convergence with new initial centroids.
max_iter : int, optional
The number of cycles of the alternating optimization routine to run for
*each* convergence.
tol : float, optional
The stopping condition. Convergence is considered to have been reached
when the objective function changes less than `tol`.
verbosity : int, optional
The verbosity of the instance. May be 0, 1, or 2.
.. note:: Very much not yet implemented.
random_state : :obj:`int` or :obj:`np.random.RandomState`, optional
The generator used for initialization. Using an integer fixes the seed.
eps : float, optional
To avoid numerical errors, zeros are sometimes replaced with a very
small number, specified here.
Attributes
----------
metric : :obj:`string` or :obj:`function`
The distance metric used. May be any of the strings specified for
:obj:`cdist`, or a user-specified function.
initialization : function
The method used to initialize the cluster centers.
centers : :obj:`np.ndarray`
(n_clusters, n_features)
The derived or supplied cluster centers.
memberships : :obj:`np.ndarray`
(n_samples, n_clusters)
The derived or supplied cluster memberships.
"""
metric = 'euclidean'
initialization = staticmethod(initialize_random)
def __init__(self, n_clusters=2, n_init=10, max_iter=300, tol=1e-4,
verbosity=0, random_state=None, eps=1e-18, **kwargs):
self.n_clusters = n_clusters
self.n_init = n_init
self.max_iter = max_iter
self.tol = tol
self.verbosity = verbosity
self.random_state = random_state
self.eps = eps
self.params = kwargs
self.centers = None
self.memberships = None
def distances(self, x):
"""Calculates the distance between data x and the centers.
The distance, by default, is calculated according to `metric`, but this
method should be overridden by subclasses if required.
Parameters
----------
x : :obj:`np.ndarray`
(n_samples, n_features)
The original data.
Returns
-------
:obj:`np.ndarray`
(n_samples, n_clusters)
Each entry (i, j) is the distance between sample i and cluster
center j.
"""
return cdist(x, self.centers, metric=self.metric)
def calculate_memberships(self, x):
raise NotImplementedError(
"`calculate_memberships` should be implemented by subclasses.")
def calculate_centers(self, x):
raise NotImplementedError(
"`calculate_centers` should be implemented by subclasses.")
def objective(self, x):
raise NotImplementedError(
"`objective` should be implemented by subclasses.")
def fit(self, x):
"""Optimizes cluster centers by restarting convergence several times.
Parameters
----------
x : :obj:`np.ndarray`
(n_samples, n_features)
The original data.
"""
objective_best = np.infty
memberships_best = None
centers_best = None
j_list = []
for i in range(self.n_init):
self.centers = None
self.memberships = None
self.converge(x)
objective = self.objective(x)
j_list.append(objective)
if objective < objective_best:
memberships_best = self.memberships.copy()
centers_best = self.centers.copy()
objective_best = objective
self.memberships = memberships_best
self.centers = centers_best
return j_list
def converge(self, x):
"""Finds cluster centers through an alternating optimization routine.
Terminates when either the number of cycles reaches `max_iter` or the
objective function changes by less than `tol`.
Parameters
----------
x : :obj:`np.ndarray`
(n_samples, n_features)
The original data.
"""
centers = []
j_new = np.infty
for i in range(self.max_iter):
j_old = j_new
self.update(x)
centers.append(self.centers)
j_new = self.objective(x)
if np.abs(j_old - j_new) < self.tol:
break
return np.array(centers)
def update(self, x):
"""Updates cluster memberships and centers in a single cycle.
If the cluster centers have not already been initialized, they are
chosen according to `initialization`.
Parameters
----------
x : :obj:`np.ndarray`
(n_samples, n_features)
The original data.
"""
self.initialize(x)
self.memberships = self.calculate_memberships(x)
self.centers = self.calculate_centers(x)
def initialize(self, x):
if self.centers is None and self.memberships is None:
self.memberships, self.centers = \
self.initialization(x, self.n_clusters, self.random_state)
elif self.memberships is None:
self.memberships = \
self.initialization(x, self.n_clusters, self.random_state)[0]
elif self.centers is None:
self.centers = \
self.initialization(x, self.n_clusters, self.random_state)[1]
class Hard(CMeans):
"""Hard C-means, equivalent to K-means clustering.
Methods
-------
calculate_memberships(x)
The membership of a sample is 1 to the closest cluster and 0 otherwise.
calculate_centers(x)
New centers are calculated as the mean of the points closest to them.
objective(x)
Interpretable as the data's rotational inertia about the cluster
centers. To be minimised.
"""
def calculate_memberships(self, x):
distances = self.distances(x)
return (np.arange(distances.shape[1])[:, np.newaxis] == np.argmin(
distances, axis=1)).T
def calculate_centers(self, x):
return np.dot(self.memberships.T, x) / \
np.sum(self.memberships, axis=0)[..., np.newaxis]
def objective(self, x):
if self.memberships is None or self.centers is None:
return np.infty
distances = self.distances(x)
return np.sum(self.memberships * distances)
class Fuzzy(CMeans):
"""Base class for fuzzy C-means clusters.
Attributes
----------
m : float
Fuzziness parameter. Higher values reduce the rate of drop-off from
full membership to zero membership.
Methods
-------
fuzzifier(memberships)
Fuzzification operator. By default, for memberships $u$ this is $u^m$.
objective(x)
Interpretable as the data's weighted rotational inertia about the
cluster centers. To be minimised.
"""
m = 2
def fuzzifier(self, memberships):
return np.power(memberships, self.m)
def objective(self, x):
if self.memberships is None or self.centers is None:
return np.infty
distances = self.distances(x)
return np.sum(self.fuzzifier(self.memberships) * distances)
class Probabilistic(Fuzzy):
"""Probabilistic C-means.
In the probabilistic algorithm, sample points have total membership of
unity, distributed equally among each of the centers. This tends to push
cluster centers away from each other.
Methods
-------
calculate_memberships(x)
Memberships are calculated from the distance :math:`d_{ij}` between the
sample :math:`j` and the cluster center :math:`i`.
.. math::
u_{ik} = \left(\sum_j \left( \\frac{d_{ik}}{d_{jk}} \\right)^{\\frac{2}{m - 1}} \\right)^{-1}
calculate_centers(x)
New centers are calculated as the mean of the points closest to them,
weighted by the fuzzified memberships.
.. math:: c_i = \left. \sum_k u_{ik}^m x_k \middle/ \sum_k u_{ik} \\right.
"""
def calculate_memberships(self, x):
distances = self.distances(x)
distances[distances == 0.] = 1e-18
return np.sum(np.power(
np.divide(distances[:, :, np.newaxis], distances[:, np.newaxis, :]),
2 / (self.m - 1)), axis=2) ** -1
def calculate_centers(self, x):
return np.dot(self.fuzzifier(self.memberships).T, x) / \
np.sum(self.fuzzifier(self.memberships).T, axis=1)[..., np.newaxis]
class Possibilistic(Fuzzy):
"""Possibilistic C-means.
In the possibilistic algorithm, sample points are assigned memberships
according to their relative proximity to the centers. This is controlled
through a weighting to the cluster centers, approximately the variance of
each cluster.
Methods
-------
calculate_memberships(x)
Memberships are calculated from the distance :math:`d_{ij}` between the
sample :math:`j` and the cluster center :math:`i`, and the weighting
:math:`w_i` of each center.
.. math::
u_{ik} = \left(1 + \left(\\frac{d_{ik}}{w_i}\\right)^\\frac{1}{m
-1} \\right)^{-1}
calculate_centers(x)
New centers are calculated as the mean of the points closest to them,
weighted by the fuzzified memberships.
.. math::
c_i = \left. \sum_k u_{ik}^m x_k \middle/ \sum_k u_{ik} \\right.
"""
initialization = staticmethod(initialize_probabilistic)
_weights = None
def weights(self, x):
if self._weights is None:
distances = self.distances(x)
memberships = self.memberships
self._weights = np.sum(self.fuzzifier(memberships) * distances,
axis=0) / np.sum(self.fuzzifier(memberships),
axis=0)
return self._weights
def calculate_memberships(self, x):
distances = self.distances(x)
return (1. + (distances / self.weights(x)) ** (
1. / (self.m - 1))) ** -1.
def calculate_centers(self, x):
return np.divide(np.dot(self.fuzzifier(self.memberships).T, x),
np.sum(self.fuzzifier(self.memberships), axis=0)[
..., np.newaxis])
class GustafsonKesselMixin(Fuzzy):
"""Gives clusters ellipsoidal character.
The Gustafson-Kessel algorithm redefines the distance measurement such that
clusters may adopt ellipsoidal shapes. This is achieved through updates to
a covariance matrix assigned to each cluster center.
Examples
--------
Create a algorithm for probabilistic clustering with ellipsoidal clusters:
>>> class ProbabilisticGustafsonKessel(GustafsonKesselMixin, Probabilistic):
>>> pass
>>> pgk = ProbabilisticGustafsonKessel()
>>> pgk.fit(x)
"""
covariance = None
def fit(self, x):
"""Optimizes cluster centers by restarting convergence several times.
Extends the default behaviour by recalculating the covariance matrix
with resultant memberships and centers.
Parameters
----------
x : :obj:`np.ndarray`
(n_samples, n_features)
The original data.
"""
j_list = super(GustafsonKesselMixin, self).fit(x)
self.covariance = self.calculate_covariance(x)
return j_list
def update(self, x):
"""Single update of the cluster algorithm.
Extends the default behaviour by including a covariance calculation
after updating the centers
Parameters
----------
x : :obj:`np.ndarray`
(n_samples, n_features)
The original data.
"""
self.initialize(x)
self.centers = self.calculate_centers(x)
self.covariance = self.calculate_covariance(x)
self.memberships = self.calculate_memberships(x)
def distances(self, x):
covariance = self.covariance if self.covariance is not None \
else self.calculate_covariance(x)
d = x - self.centers[:, np.newaxis]
left_multiplier = \
np.einsum('...ij,...jk', d, np.linalg.inv(covariance))
return np.sum(left_multiplier * d, axis=2).T
def calculate_covariance(self, x):
"""Calculates the covariance of the data `u` with cluster centers `v`.
Parameters
----------
x : :obj:`np.ndarray`
(n_samples, n_features)
The original data.
Returns
-------
:obj:`np.ndarray`
(n_clusters, n_features, n_features)
The covariance matrix of each cluster.
"""
v = self.centers
if v is None:
return None
q, p = v.shape
if self.memberships is None:
# If no memberships have been calculated assume n-spherical clusters
return (np.eye(p)[..., np.newaxis] * np.ones((p, q))).T
q, p = v.shape
vector_difference = x - v[:, np.newaxis]
fuzzy_memberships = self.fuzzifier(self.memberships)
right_multiplier = \
np.einsum('...i,...j->...ij', vector_difference, vector_difference)
einstein_sum = \
np.einsum('i...,...ijk', fuzzy_memberships, right_multiplier) / \
np.sum(fuzzy_memberships, axis=0)[..., np.newaxis, np.newaxis]
return np.nan_to_num(
einstein_sum / (np.linalg.det(einstein_sum) ** (1 / q))[
..., np.newaxis, np.newaxis]) | /scikit-cmeans-0.1.tar.gz/scikit-cmeans-0.1/skcmeans/algorithms.py | 0.950423 | 0.63484 | algorithms.py | pypi |
[![Build Status](https://secure.travis-ci.org/veeresht/CommPy.svg?branch=master)](https://secure.travis-ci.org/veeresht/CommPy)
[![Coverage](https://coveralls.io/repos/veeresht/CommPy/badge.svg?branch=master)](https://coveralls.io/r/veeresht/CommPy)
[![PyPi](https://badge.fury.io/py/scikit-commpy.svg)](https://badge.fury.io/py/scikit-commpy)
[![Docs](https://readthedocs.org/projects/commpy/badge/?version=latest)](http://commpy.readthedocs.io/en/latest/?badge=latest)
CommPy
======
CommPy is an open source toolkit implementing digital communications algorithms
in Python using NumPy and SciPy.
Objectives
----------
- To provide readable and useable implementations of algorithms used in the research, design and implementation of digital communication systems.
Available Features
------------------
[Channel Coding](https://github.com/veeresht/CommPy/tree/master/commpy/channelcoding)
--------------
- Encoder for Convolutional Codes (Polynomial, Recursive Systematic). Supports all rates and puncture matrices.
- Viterbi Decoder for Convolutional Codes (Hard Decision Output).
- MAP Decoder for Convolutional Codes (Based on the BCJR algorithm).
- Encoder for a rate-1/3 systematic parallel concatenated Turbo Code.
- Turbo Decoder for a rate-1/3 systematic parallel concatenated turbo code (Based on the MAP decoder/BCJR algorithm).
- Binary Galois Field GF(2^m) with minimal polynomials and cyclotomic cosets.
- Create all possible generator polynomials for a (n,k) cyclic code.
- Random Interleavers and De-interleavers.
- Belief Propagation (BP) Decoder and triangular systematic encoder for LDPC Codes.
[Channel Models](https://github.com/veeresht/CommPy/blob/master/commpy/channels.py)
--------------
- SISO Channel with Rayleigh or Rician fading.
- MIMO Channel with Rayleigh or Rician fading.
- Binary Erasure Channel (BEC)
- Binary Symmetric Channel (BSC)
- Binary AWGN Channel (BAWGNC)
[Wifi 802.11 Simulation Class](https://github.com/veeresht/CommPy/blob/master/commpy/wifi80211.py)
- A class to simulate the transmissions and receiving parameters of physical layer 802.11 (currently till VHT (ac)).
[Filters](https://github.com/veeresht/CommPy/blob/master/commpy/filters.py)
-------
- Rectangular
- Raised Cosine (RC), Root Raised Cosine (RRC)
- Gaussian
[Impairments](https://github.com/veeresht/CommPy/blob/master/commpy/impairments.py)
-----------
- Carrier Frequency Offset (CFO)
[Modulation/Demodulation](https://github.com/veeresht/CommPy/blob/master/commpy/modulation.py)
-----------------------
- Phase Shift Keying (PSK)
- Quadrature Amplitude Modulation (QAM)
- OFDM Tx/Rx signal processing
- MIMO Maximum Likelihood (ML) Detection.
- MIMO K-best Schnorr-Euchner Detection.
- MIMO Best-First Detection.
- Convert channel matrix to Bit-level representation.
- Computation of LogLikelihood ratio using max-log approximation.
[Sequences](https://github.com/veeresht/CommPy/blob/master/commpy/sequences.py)
---------
- PN Sequence
- Zadoff-Chu (ZC) Sequence
[Utilities](https://github.com/veeresht/CommPy/blob/master/commpy/utilities.py)
---------
- Decimal to bit-array, bit-array to decimal.
- Hamming distance, Euclidean distance.
- Upsample
- Power of a discrete-time signal
[Links](https://github.com/veeresht/CommPy/blob/master/commpy/links.py)
-----
- Estimate the BER performance of a link model with Monte Carlo simulation.
- Link model object.
- Helper function for MIMO Iteration Detection and Decoding scheme.
FAQs
----
Why are you developing this?
----------------------------
During my coursework in communication theory and systems at UCSD, I realized that the best way to actually learn and understand the theory is to try and implement ''the Math'' in practice :). Having used Scipy before, I thought there should be a similar package for Digital Communications in Python. This is a start!
What programming languages do you use?
--------------------------------------
CommPy uses Python as its base programming language and python packages like NumPy, SciPy and Matplotlib.
How can I contribute?
---------------------
Implement any feature you want and send me a pull request :). If you want to suggest new features or discuss anything related to CommPy, please get in touch with me ([email protected]).
How do I use CommPy?
--------------------
Requirements/Dependencies
-------------------------
- python 3.2 or above
- numpy 1.10 or above
- scipy 0.15 or above
- matplotlib 1.4 or above
- nose 1.3 or above
- sympy 1.7 or above
Installation
------------
- To use the released version on PyPi, use pip to install as follows::
```
$ pip install scikit-commpy
```
- To work with the development branch, clone from github and install as follows::
```
$ git clone https://github.com/veeresht/CommPy.git
$ cd CommPy
$ python setup.py install
```
- conda version is curently outdated but v0.3 is still available using::
```
$ conda install -c https://conda.binstar.org/veeresht scikit-commpy
```
Citing CommPy
-------------
If you use CommPy for a publication, presentation or a demo, a citation would be greatly appreciated. A citation example is presented here and we suggest to had the revision or version number and the date:
V. Taranalli, B. Trotobas, and contributors, "CommPy: Digital Communication with Python". [Online]. Available: github.com/veeresht/CommPy
I would also greatly appreciate your feedback if you have found CommPy useful. Just send me a mail: [email protected]
For more details on CommPy, please visit https://veeresht.info/CommPy/
| /scikit-commpy-0.8.0.tar.gz/scikit-commpy-0.8.0/README.md | 0.494629 | 0.960768 | README.md | pypi |
import numpy as np
__all__=['rcosfilter', 'rrcosfilter', 'gaussianfilter', 'rectfilter']
def rcosfilter(N, alpha, Ts, Fs):
"""
Generates a raised cosine (RC) filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha : float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
-------
time_idx : 1-D ndarray (float)
Array containing the time indices, in seconds, for the impulse response.
h_rc : 1-D ndarray (float)
Impulse response of the raised cosine filter.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
sample_num = np.arange(N)
h_rc = np.zeros(N, dtype=float)
for x in sample_num:
t = (x-N/2)*T_delta
if t == 0.0:
h_rc[x] = 1.0
elif alpha != 0 and t == Ts/(2*alpha):
h_rc[x] = (np.pi/4)*(np.sin(np.pi*t/Ts)/(np.pi*t/Ts))
elif alpha != 0 and t == -Ts/(2*alpha):
h_rc[x] = (np.pi/4)*(np.sin(np.pi*t/Ts)/(np.pi*t/Ts))
else:
h_rc[x] = (np.sin(np.pi*t/Ts)/(np.pi*t/Ts))* \
(np.cos(np.pi*alpha*t/Ts)/(1-(((2*alpha*t)/Ts)*((2*alpha*t)/Ts))))
return time_idx, h_rc
def rrcosfilter(N, alpha, Ts, Fs):
"""
Generates a root raised cosine (RRC) filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha : float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
---------
time_idx : 1-D ndarray of floats
Array containing the time indices, in seconds, for
the impulse response.
h_rrc : 1-D ndarray of floats
Impulse response of the root raised cosine filter.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
sample_num = np.arange(N)
h_rrc = np.zeros(N, dtype=float)
for x in sample_num:
t = (x-N/2)*T_delta
if t == 0.0:
h_rrc[x] = 1.0 - alpha + (4*alpha/np.pi)
elif alpha != 0 and t == Ts/(4*alpha):
h_rrc[x] = (alpha/np.sqrt(2))*(((1+2/np.pi)* \
(np.sin(np.pi/(4*alpha)))) + ((1-2/np.pi)*(np.cos(np.pi/(4*alpha)))))
elif alpha != 0 and t == -Ts/(4*alpha):
h_rrc[x] = (alpha/np.sqrt(2))*(((1+2/np.pi)* \
(np.sin(np.pi/(4*alpha)))) + ((1-2/np.pi)*(np.cos(np.pi/(4*alpha)))))
else:
h_rrc[x] = (np.sin(np.pi*t*(1-alpha)/Ts) + \
4*alpha*(t/Ts)*np.cos(np.pi*t*(1+alpha)/Ts))/ \
(np.pi*t*(1-(4*alpha*t/Ts)*(4*alpha*t/Ts))/Ts)
return time_idx, h_rrc
def gaussianfilter(N, alpha, Ts, Fs):
"""
Generates a gaussian filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
alpha : float
Roll off factor (Valid values are [0, 1]).
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
-------
time_index : 1-D ndarray of floats
Array containing the time indices for the impulse response.
h_gaussian : 1-D ndarray of floats
Impulse response of the gaussian filter.
"""
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
h_gaussian = (np.sqrt(np.pi)/alpha)*np.exp(-((np.pi*time_idx/alpha)*(np.pi*time_idx/alpha)))
return time_idx, h_gaussian
def rectfilter(N, Ts, Fs):
"""
Generates a rectangular filter (FIR) impulse response.
Parameters
----------
N : int
Length of the filter in samples.
Ts : float
Symbol period in seconds.
Fs : float
Sampling Rate in Hz.
Returns
-------
time_index : 1-D ndarray of floats
Array containing the time indices for the impulse response.
h_rect : 1-D ndarray of floats
Impulse response of the rectangular filter.
"""
h_rect = np.ones(N)
T_delta = 1/float(Fs)
time_idx = ((np.arange(N)-N/2))*T_delta
return time_idx, h_rect | /scikit-commpy-0.8.0.tar.gz/scikit-commpy-0.8.0/commpy/filters.py | 0.928813 | 0.747363 | filters.py | pypi |
from bisect import insort
import matplotlib.pyplot as plt
from numpy import arange, array, zeros, pi, sqrt, log2, argmin, \
hstack, repeat, tile, dot, shape, concatenate, exp, \
log, vectorize, empty, eye, kron, inf, full, abs, newaxis, minimum, clip, fromiter
from numpy.fft import fft, ifft
from numpy.linalg import qr, norm
from sympy.combinatorics.graycode import GrayCode
from commpy.utilities import bitarray2dec, dec2bitarray, signal_power
__all__ = ['PSKModem', 'QAMModem', 'ofdm_tx', 'ofdm_rx', 'mimo_ml', 'kbest', 'best_first_detector',
'bit_lvl_repr', 'max_log_approx']
class Modem:
""" Creates a custom Modem object.
Parameters
----------
constellation : array-like with a length which is a power of 2
Constellation of the custom modem
Attributes
----------
constellation : 1D-ndarray of complex
Modem constellation. If changed, the length of the new constellation must be a power of 2.
Es : float
Average energy per symbols.
m : integer
Constellation length.
num_bits_symb : integer
Number of bits per symbol.
Raises
------
ValueError
If the constellation is changed to an array-like with length that is not a power of 2.
"""
def __init__(self, constellation, reorder_as_gray=True):
""" Creates a custom Modem object. """
if reorder_as_gray:
m = log2(len(constellation))
gray_code_sequence = GrayCode(m).generate_gray()
gray_code_sequence_array = fromiter((int(g, 2) for g in gray_code_sequence), int, len(constellation))
self.constellation = array(constellation)[gray_code_sequence_array.argsort()]
else:
self.constellation = constellation
def modulate(self, input_bits):
""" Modulate (map) an array of bits to constellation symbols.
Parameters
----------
input_bits : 1D ndarray of ints
Inputs bits to be modulated (mapped).
Returns
-------
baseband_symbols : 1D ndarray of complex floats
Modulated complex symbols.
"""
mapfunc = vectorize(lambda i:
self._constellation[bitarray2dec(input_bits[i:i + self.num_bits_symbol])])
baseband_symbols = mapfunc(arange(0, len(input_bits), self.num_bits_symbol))
return baseband_symbols
def demodulate(self, input_symbols, demod_type, noise_var=0):
""" Demodulate (map) a set of constellation symbols to corresponding bits.
Parameters
----------
input_symbols : 1D ndarray of complex floats
Input symbols to be demodulated.
demod_type : string
'hard' for hard decision output (bits)
'soft' for soft decision output (LLRs)
noise_var : float
AWGN variance. Needs to be specified only if demod_type is 'soft'
Returns
-------
demod_bits : 1D ndarray of ints
Corresponding demodulated bits.
"""
if demod_type == 'hard':
index_list = abs(input_symbols - self._constellation[:, None]).argmin(0)
demod_bits = dec2bitarray(index_list, self.num_bits_symbol)
elif demod_type == 'soft':
demod_bits = zeros(len(input_symbols) * self.num_bits_symbol)
for i in arange(len(input_symbols)):
current_symbol = input_symbols[i]
for bit_index in arange(self.num_bits_symbol):
llr_num = 0
llr_den = 0
for bit_value, symbol in enumerate(self._constellation):
if (bit_value >> bit_index) & 1:
llr_num += exp((-abs(current_symbol - symbol) ** 2) / noise_var)
else:
llr_den += exp((-abs(current_symbol - symbol) ** 2) / noise_var)
demod_bits[i * self.num_bits_symbol + self.num_bits_symbol - 1 - bit_index] = log(llr_num / llr_den)
else:
raise ValueError('demod_type must be "hard" or "soft"')
return demod_bits
def plot_constellation(self):
""" Plot the constellation """
plt.scatter(self.constellation.real, self.constellation.imag)
for symb in self.constellation:
plt.text(symb.real + .2, symb.imag, self.demodulate(symb, 'hard'))
plt.title('Constellation')
plt.grid()
plt.show()
@property
def constellation(self):
""" Constellation of the modem. """
return self._constellation
@constellation.setter
def constellation(self, value):
# Check value input
num_bits_symbol = log2(len(value))
if num_bits_symbol != int(num_bits_symbol):
raise ValueError('Constellation length must be a power of 2.')
# Set constellation as an array
self._constellation = array(value)
# Update other attributes
self.Es = signal_power(self.constellation)
self.m = self._constellation.size
self.num_bits_symbol = int(num_bits_symbol)
class PSKModem(Modem):
""" Creates a Phase Shift Keying (PSK) Modem object.
Parameters
----------
m : int
Size of the PSK constellation.
Attributes
----------
constellation : 1D-ndarray of complex
Modem constellation. If changed, the length of the new constellation must be a power of 2.
Es : float
Average energy per symbols.
m : integer
Constellation length.
num_bits_symb : integer
Number of bits per symbol.
Raises
------
ValueError
If the constellation is changed to an array-like with length that is not a power of 2.
"""
def __init__(self, m):
""" Creates a Phase Shift Keying (PSK) Modem object. """
num_bits_symbol = log2(m)
if num_bits_symbol != int(num_bits_symbol):
raise ValueError('Constellation length must be a power of 2.')
super().__init__(exp(1j * arange(0, 2 * pi, 2 * pi / m)))
class QAMModem(Modem):
""" Creates a Quadrature Amplitude Modulation (QAM) Modem object.
Parameters
----------
m : int
Size of the PSK constellation.
Attributes
----------
constellation : 1D-ndarray of complex
Modem constellation. If changed, the length of the new constellation must be a power of 2.
Es : float
Average energy per symbols.
m : integer
Constellation length.
num_bits_symb : integer
Number of bits per symbol.
Raises
------
ValueError
If the constellation is changed to an array-like with length that is not a power of 2.
If the parameter m would lead to an non-square QAM during initialization.
"""
def __init__(self, m):
""" Creates a Quadrature Amplitude Modulation (QAM) Modem object.
Parameters
----------
m : int
Size of the QAM constellation. Must lead to a square QAM (ie sqrt(m) is an integer).
Raises
------
ValueError
If m would lead to an non-square QAM.
"""
num_symb_pam = sqrt(m)
if num_symb_pam != int(num_symb_pam):
raise ValueError('m must lead to a square QAM.')
pam = arange(-num_symb_pam + 1, num_symb_pam, 2)
constellation = tile(hstack((pam, pam[::-1])), int(num_symb_pam) // 2) * 1j + pam.repeat(num_symb_pam)
super().__init__(constellation)
def ofdm_tx(x, nfft, nsc, cp_length):
""" OFDM Transmit signal generation """
nfft = float(nfft)
nsc = float(nsc)
cp_length = float(cp_length)
ofdm_tx_signal = array([])
for i in range(0, shape(x)[1]):
symbols = x[:, i]
ofdm_sym_freq = zeros(nfft, dtype=complex)
ofdm_sym_freq[1:(nsc / 2) + 1] = symbols[nsc / 2:]
ofdm_sym_freq[-(nsc / 2):] = symbols[0:nsc / 2]
ofdm_sym_time = ifft(ofdm_sym_freq)
cp = ofdm_sym_time[-cp_length:]
ofdm_tx_signal = concatenate((ofdm_tx_signal, cp, ofdm_sym_time))
return ofdm_tx_signal
def ofdm_rx(y, nfft, nsc, cp_length):
""" OFDM Receive Signal Processing """
num_ofdm_symbols = int(len(y) / (nfft + cp_length))
x_hat = zeros([nsc, num_ofdm_symbols], dtype=complex)
for i in range(0, num_ofdm_symbols):
ofdm_symbol = y[i * nfft + (i + 1) * cp_length:(i + 1) * (nfft + cp_length)]
symbols_freq = fft(ofdm_symbol)
x_hat[:, i] = concatenate((symbols_freq[-nsc / 2:], symbols_freq[1:(nsc / 2) + 1]))
return x_hat
def mimo_ml(y, h, constellation):
""" MIMO ML Detection.
parameters
----------
y : 1D ndarray of complex floats
Received complex symbols (shape: num_receive_antennas x 1)
h : 2D ndarray of complex floats
Channel Matrix (shape: num_receive_antennas x num_transmit_antennas)
constellation : 1D ndarray of complex floats
Constellation used to modulate the symbols
"""
_, n = h.shape
m = len(constellation)
x_ideal = empty((n, pow(m, n)), complex)
for i in range(0, n):
x_ideal[i] = repeat(tile(constellation, pow(m, i)), pow(m, n - i - 1))
min_idx = argmin(norm(y[:, None] - dot(h, x_ideal), axis=0))
x_r = x_ideal[:, min_idx]
return x_r
def kbest(y, h, constellation, K, noise_var=0, output_type='hard', demode=None):
""" MIMO K-best Schnorr-Euchner Detection.
Reference: Zhan Guo and P. Nilsson, 'Algorithm and implementation of the K-best sphere decoding for MIMO detection',
IEEE Journal on Selected Areas in Communications, vol. 24, no. 3, pp. 491-503, Mar. 2006.
Parameters
----------
y : 1D ndarray
Received complex symbols (length: num_receive_antennas)
h : 2D ndarray
Channel Matrix (shape: num_receive_antennas x num_transmit_antennas)
constellation : 1D ndarray of floats
Constellation used to modulate the symbols
K : positive integer
Number of candidates kept at each step
noise_var : positive float
Noise variance.
*Default* value is 0.
output_type : str
'hard': hard output i.e. output is a binary word
'soft': soft output i.e. output is a vector of Log-Likelihood Ratios.
*Default* value is 'hard'
demode : function with prototype binary_word = demode(point)
Function that provide the binary word corresponding to a symbol vector.
Returns
-------
x : 1D ndarray of constellation points or of Log-Likelihood Ratios.
Detected vector (length: num_receive_antennas).
raises
------
ValueError
If h has more columns than rows.
If output_type is something else than 'hard' or 'soft'
"""
nb_tx, nb_rx = h.shape
if nb_rx > nb_tx:
raise ValueError('h has more columns than rows')
# QR decomposition
q, r = qr(h)
yt = q.conj().T.dot(y)
# Initialization
m = len(constellation)
nb_can = 1
if isinstance(constellation[0], complex):
const_type = complex
else:
const_type = float
X = empty((nb_rx, K * m), dtype=const_type) # Set of current candidates
d = tile(yt[:, None], (1, K * m)) # Corresponding distance vector
d_tot = zeros(K * m, dtype=float) # Corresponding total distance
hyp = empty(K * m, dtype=const_type) # Hypothesis vector
# Processing
for coor in range(nb_rx - 1, -1, -1):
nb_hyp = nb_can * m
# Copy best candidates m times
X[:, :nb_hyp] = tile(X[:, :nb_can], (1, m))
d[:, :nb_hyp] = tile(d[:, :nb_can], (1, m))
d_tot[:nb_hyp] = tile(d_tot[:nb_can], (1, m))
# Make hypothesis
hyp[:nb_hyp] = repeat(constellation, nb_can)
X[coor, :nb_hyp] = hyp[:nb_hyp]
d[coor, :nb_hyp] -= r[coor, coor] * hyp[:nb_hyp]
d_tot[:nb_hyp] += abs(d[coor, :nb_hyp]) ** 2
# Select best candidates
argsort = d_tot[:nb_hyp].argsort()
nb_can = min(nb_hyp, K) # Update number of candidate
# Update accordingly
X[:, :nb_can] = X[:, argsort[:nb_can]]
d[:, :nb_can] = d[:, argsort[:nb_can]]
d[:coor, :nb_can] -= r[:coor, coor, None] * hyp[argsort[:nb_can]]
d_tot[:nb_can] = d_tot[argsort[:nb_can]]
if output_type == 'hard':
return X[:, 0]
elif output_type == 'soft':
return max_log_approx(y, h, noise_var, X[:, :nb_can], demode)
else:
raise ValueError('output_type must be "hard" or "soft"')
def best_first_detector(y, h, constellation, stack_size, noise_var, demode, llr_max):
""" MIMO Best-First Detection.
Reference: G. He, X. Zhang, et Z. Liang, "Algorithm and Architecture of an Efficient MIMO Detector With Cross-Level
Parallel Tree-Search", IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 2019
Parameters
----------
y : 1D ndarray
Received complex symbols (length: num_receive_antennas)
h : 2D ndarray
Channel Matrix (shape: num_receive_antennas x num_transmit_antennas)
constellation : 1D ndarray of floats
Constellation used to modulate the symbols
stack_size : tuple of integers
Size of each stack (length: num_transmit_antennas - 1)
noise_var : positive float
Noise variance.
*Default* value is 0.
demode : function with prototype binary_word = demode(point)
Function that provide the binary word corresponding to a symbol vector.
llr_max : float
Max value for LLR clipping
Returns
-------
x : 1D ndarray of Log-Likelihood Ratios.
Detected vector (length: num_receive_antennas).
"""
class _Node:
""" Helper data model that implements __lt__ (aka '<') as required to use bisect.insort. """
def __init__(self, symb_vectors, partial_metrics):
"""
Recursive initializer that build a sequence of siblings.
Inputs are assumed to be ordered based on metric
"""
if len(partial_metrics) == 1:
# There is one node to build
self.symb_vector = symb_vectors.reshape(-1) # Insure that self.symb_vector is a 1d-ndarray
self.partial_metric = partial_metrics[0]
self.best_sibling = None
else:
# Recursive call to build several nodes
self.symb_vector = symb_vectors[:, 0].reshape(-1) # Insure that self.symb_vector is a 1d-ndarray
self.partial_metric = partial_metrics[0]
self.best_sibling = _Node(symb_vectors[:, 1:], partial_metrics[1:])
def __lt__(self, other):
return self.partial_metric < other.partial_metric
def expand(self, yt, r, constellation):
""" Build all children and return the best one. constellation must be a numpy ndarray."""
# Construct children's symbol vector
child_size = self.symb_vector.size + 1
children_symb_vectors = empty((child_size, constellation.size), constellation.dtype)
children_symb_vectors[1:] = self.symb_vector[:, newaxis]
children_symb_vectors[0] = constellation
# Compute children's partial metric and sort
children_metric = abs(yt[-child_size] - r[-child_size, -child_size:].dot(children_symb_vectors)) ** 2
children_metric += self.partial_metric
ordering = children_metric.argsort()
# Build children and return the best one
return _Node(children_symb_vectors[:, ordering], children_metric[ordering])
# Extract information from arguments
nb_tx, nb_rx = h.shape
constellation = array(constellation)
m = constellation.size
modulation_order = int(log2(m))
# QR decomposition
q, r = qr(h)
yt = q.conj().T.dot(y)
# Initialisation
map_metric = inf
map_bit_vector = None
counter_hyp_metric = full((nb_tx, modulation_order), inf)
stacks = tuple([] for _ in range(nb_tx))
# Start process by adding the best root's child in the last stack
stacks[-1].append(_Node(empty(0, constellation.dtype), array(0, float, ndmin=1)).expand(yt, r, constellation))
# While there is at least one non-empty stack (exempt the first one)
while any(stacks[1:]):
# Node processing
for idx_next_stack in range(len(stacks) - 1):
try:
idx_this_stack = idx_next_stack + 1
best_node = stacks[idx_this_stack].pop(0)
# Update search radius
if map_bit_vector is None:
radius = inf # No leaf has been reached yet so we keep all nodes
else:
bit_vector = demode(best_node.symb_vector).reshape(-1, modulation_order)
bit_vector[bit_vector == 0] = -1
# Select the counter hyp metrics that could be affected by this node. Details: eq. (14)-(16) in [1].
try:
a2 = counter_hyp_metric[idx_this_stack:][map_bit_vector[idx_this_stack:] != bit_vector].max()
except ValueError:
a2 = inf # NumPy cannot compute max on an empty matrix
radius = max(counter_hyp_metric[:idx_this_stack].max(), a2)
# Process best sibling
if best_node.best_sibling is not None and best_node.best_sibling.partial_metric <= radius:
insort(stacks[idx_this_stack], best_node.best_sibling)
# Process children
best_child = best_node.expand(yt, r, constellation)
if best_child.partial_metric <= radius:
insort(stacks[idx_next_stack], best_child)
except IndexError: # Raised when popping an empty stack
pass
# LLR update if there is a new leaf
if stacks[0]:
if stacks[0][0].partial_metric < map_metric:
minimum(counter_hyp_metric, map_metric, out=counter_hyp_metric)
map_metric = stacks[0][0].partial_metric
map_bit_vector = demode(stacks[0][0].symb_vector).reshape(-1, modulation_order)
map_bit_vector[map_bit_vector == 0] = -1
else:
minimum(counter_hyp_metric, stacks[0][0].partial_metric, out=counter_hyp_metric)
clip(counter_hyp_metric, map_metric - llr_max, map_metric + llr_max, counter_hyp_metric)
# Trimming stack according to requested max stack size
del stacks[0][0:] # there is no stack for the leafs
for idx_next_stack in range(len(stacks) - 1):
del stacks[idx_next_stack + 1][stack_size[idx_next_stack]:]
return ((map_metric - counter_hyp_metric) * map_bit_vector).reshape(-1)
def bit_lvl_repr(H, w):
""" Bit-level representation of matrix H with weights w.
parameters
----------
H : 2D ndarray (shape : nb_rx, nb_tx)
Channel Matrix.
w : 1D ndarray of complex (length : beta)
Bit level representation weights. The length must be even.
return
------
A : 2D nbarray (shape : nb_rx, nb_tx*beta)
Channel matrix adapted to the bit-level representation.
raises
------
ValueError
If beta (the length of w) is not even)
"""
beta = len(w)
if beta % 2 == 0:
m, n = H.shape
In = eye(n, n)
kr = kron(In, w)
return dot(H, kr)
else:
raise ValueError('Beta (length of w) must be even.')
def max_log_approx(y, h, noise_var, pts_list, demode):
""" Max-log demode
parameters
----------
y : 1D ndarray
Received symbol vector (length: num_receive_antennas)
h : 2D ndarray
Channel Matrix (shape: num_receive_antennas x num_transmit_antennas)
noise_var : positive float
Noise variance
pts_list : 2D ndarray of constellation points
Set of points to compute max log approximation (points are column-wise).
(shape: num_receive_antennas x num_points)
demode : function with prototype binary_word = demode(point)
Function that provide the binary word corresponding to a symbol vector.
return
------
LLR : 1D ndarray of floats
Log-Likelihood Ratio for each bit (same length as the return of decode)
"""
# Decode all pts
nb_pts = pts_list.shape[1]
bits = demode(pts_list.reshape(-1, order='F')).reshape(nb_pts, -1) # Set of binary words (one word by row)
# Prepare LLR computation
nb_bits = bits.shape[1]
LLR = empty(nb_bits)
# Loop for each bit
for k in range(nb_bits):
# Select pts based on the k-th bit in the corresponding word
pts0 = pts_list.compress(bits[:, k] == 0, axis=1)
pts1 = pts_list.compress(bits[:, k] == 1, axis=1)
# Compute the norms and add inf to handle empty set of points
norms0 = hstack((norm(y[:, None] - h.dot(pts0), axis=0) ** 2, inf))
norms1 = hstack((norm(y[:, None] - h.dot(pts1), axis=0) ** 2, inf))
# Compute LLR
LLR[k] = min(norms0) - min(norms1)
return -LLR / (2 * noise_var) | /scikit-commpy-0.8.0.tar.gz/scikit-commpy-0.8.0/commpy/modulation.py | 0.893768 | 0.657703 | modulation.py | pypi |
from __future__ import division # Python 2 compatibility
import math
from fractions import Fraction
from inspect import getfullargspec
import numpy as np
from commpy.channels import MIMOFlatChannel
__all__ = ['link_performance', 'LinkModel', 'idd_decoder']
def link_performance(link_model, SNRs, send_max, err_min, send_chunk=None, code_rate=1):
"""
Estimate the BER performance of a link model with Monte Carlo simulation.
Equivalent to call link_model.link_performance(SNRs, send_max, err_min, send_chunk, code_rate).
Parameters
----------
link_model : linkModel object.
SNRs : 1D arraylike
Signal to Noise ratio in dB defined as :math:`SNR_{dB} = (E_b/N_0)_{dB} + 10 \log_{10}(R_cM_c)`
where :math:`Rc` is the code rate and :math:`Mc` the modulation rate.
send_max : int
Maximum number of bits send for each SNR.
err_min : int
link_performance send bits until it reach err_min errors (see also send_max).
send_chunk : int
Number of bits to be send at each iteration. This is also the frame length of the decoder if available
so it should be large enough regarding the code type.
*Default*: send_chunck = err_min
code_rate : float or Fraction in (0,1]
Rate of the used code.
*Default*: 1 i.e. no code.
Returns
-------
BERs : 1d ndarray
Estimated Bit Error Ratio corresponding to each SNRs
"""
if not send_chunk:
send_chunk = err_min
return link_model.link_performance(SNRs, send_max, err_min, send_chunk, code_rate)
class LinkModel:
"""
Construct a link model.
Parameters
----------
modulate : function with same prototype as Modem.modulate
channel : FlatChannel object
receive : function with prototype receive(y, H, constellation, noise_var) that return a binary array.
y : 1D ndarray
Received complex symbols (shape: num_receive_antennas x 1)
h : 2D ndarray
Channel Matrix (shape: num_receive_antennas x num_transmit_antennas)
constellation : 1D ndarray
noise_var : positive float
Noise variance
num_bits_symbols : int
constellation : array of float or complex
Es : float
Average energy per symbols.
*Default* Es=1.
decoder : function with prototype decoder(array) or decoder(y, H, constellation, noise_var, array) that return a
binary ndarray.
*Default* is no process.
rate : float or Fraction in (0,1]
Rate of the used code.
*Default*: 1 i.e. no code.
Attributes
----------
modulate : function with same prototype as Modem.modulate
channel : _FlatChannel object
receive : function with prototype receive(y, H, constellation, noise_var) that return a binary array.
y : 1D ndarray
Received complex symbols (shape: num_receive_antennas x 1)
h : 2D ndarray
Channel Matrix (shape: num_receive_antennas x num_transmit_antennas)
constellation : 1D ndarray
noise_var : positive float
Noise variance
num_bits_symbols : int
constellation : array of float or complex
Es : float
Average energy per symbols.
decoder : function with prototype decoder(binary array) that return a binary ndarray.
*Default* is no process.
rate : float
Code rate.
*Default* is 1.
"""
def __init__(self, modulate, channel, receive, num_bits_symbol, constellation, Es=1, decoder=None, rate=Fraction(1, 1)):
self.modulate = modulate
self.channel = channel
self.receive = receive
self.num_bits_symbol = num_bits_symbol
self.constellation = constellation
self.Es = Es
if type(rate) is float:
rate = Fraction(rate).limit_denominator(100)
self.rate = rate
if decoder is None:
self.decoder = lambda msg: msg
else:
self.decoder = decoder
self.full_simulation_results = None
def link_performance_full_metrics(self, SNRs, tx_max, err_min, send_chunk=None, code_rate: Fraction = Fraction(1, 1),
number_chunks_per_send=1, stop_on_surpass_error=True):
"""
Estimate the BER performance of a link model with Monte Carlo simulation.
Parameters
----------
SNRs : 1D arraylike
Signal to Noise ratio in dB defined as :math:`SNR_{dB} = (E_b/N_0)_{dB} + 10 \log_{10}(R_cM_c)`
where :math:`Rc` is the code rate and :math:`Mc` the modulation rate.
tx_max : int
Maximum number of transmissions for each SNR.
err_min : int
link_performance send bits until it reach err_min errors (see also send_max).
send_chunk : int
Number of bits to be send at each iteration. This is also the frame length of the decoder if available
so it should be large enough regarding the code type.
*Default*: send_chunck = err_min
code_rate : Fraction in (0,1]
Rate of the used code.
*Default*: 1 i.e. no code.
number_chunks_per_send : int
Number of chunks per transmission
stop_on_surpass_error : bool
Controls if during simulation of a SNR it should break and move to the next SNR when
the bit error is above the err_min parameter
Returns
-------
List[BERs, BEs, CEs, NCs]
BERs : 1d ndarray
Estimated Bit Error Ratio corresponding to each SNRs
BEs : 2d ndarray
Number of Estimated Bits with Error per transmission corresponding to each SNRs
CEs : 2d ndarray
Number of Estimated Chunks with Errors per transmission corresponding to each SNRs
NCs : 2d ndarray
Number of Chunks transmitted per transmission corresponding to each SNRs
"""
# Initialization
BERs = np.zeros_like(SNRs, dtype=float)
BEs = np.zeros((len(SNRs), tx_max), dtype=int) # Bit errors per tx
CEs = np.zeros((len(SNRs), tx_max), dtype=int) # Chunk Errors per tx
NCs = np.zeros((len(SNRs), tx_max), dtype=int) # Number of Chunks per tx
# Set chunk size and round it to be a multiple of num_bits_symbol* nb_tx to avoid padding taking in to account the coding rate
if send_chunk is None:
send_chunk = err_min
if type(code_rate) is float:
code_rate = Fraction(code_rate).limit_denominator(100)
self.rate = code_rate
divider = (Fraction(1, self.num_bits_symbol * self.channel.nb_tx) * 1 / code_rate).denominator
send_chunk = max(divider, send_chunk // divider * divider)
receive_size = self.channel.nb_tx * self.num_bits_symbol
full_args_decoder = len(getfullargspec(self.decoder).args) > 1
# Computations
for id_SNR in range(len(SNRs)):
self.channel.set_SNR_dB(SNRs[id_SNR], float(code_rate), self.Es)
total_tx_send = 0
bit_err = np.zeros(tx_max, dtype=int)
chunk_loss = np.zeros(tx_max, dtype=int)
chunk_count = np.zeros(tx_max, dtype=int)
for id_tx in range(tx_max):
if stop_on_surpass_error and bit_err.sum() > err_min:
break
# Propagate some bits
msg = np.random.choice((0, 1), send_chunk * number_chunks_per_send)
symbs = self.modulate(msg)
channel_output = self.channel.propagate(symbs)
# Deals with MIMO channel
if isinstance(self.channel, MIMOFlatChannel):
nb_symb_vector = len(channel_output)
received_msg = np.empty(int(math.ceil(len(msg) / float(self.rate))))
for i in range(nb_symb_vector):
received_msg[receive_size * i:receive_size * (i + 1)] = \
self.receive(channel_output[i], self.channel.channel_gains[i],
self.constellation, self.channel.noise_std ** 2)
else:
received_msg = self.receive(channel_output, self.channel.channel_gains,
self.constellation, self.channel.noise_std ** 2)
# Count errors
if full_args_decoder:
decoded_bits = self.decoder(channel_output, self.channel.channel_gains,
self.constellation, self.channel.noise_std ** 2,
received_msg, self.channel.nb_tx * self.num_bits_symbol)
else:
decoded_bits = self.decoder(received_msg)
# calculate number of error frames
for i in range(number_chunks_per_send):
errors = np.bitwise_xor(msg[send_chunk * i:send_chunk * (i + 1)],
decoded_bits[send_chunk * i:send_chunk * (i + 1)].astype(int)).sum()
bit_err[id_tx] += errors
chunk_loss[id_tx] += 1 if errors > 0 else 0
chunk_count[id_tx] += number_chunks_per_send
total_tx_send += 1
BERs[id_SNR] = bit_err.sum() / (total_tx_send * send_chunk)
BEs[id_SNR] = bit_err
CEs[id_SNR] = np.where(bit_err > 0, 1, 0)
NCs[id_SNR] = chunk_count
if BEs[id_SNR].sum() < err_min:
break
self.full_simulation_results = BERs, BEs, CEs, NCs
return BERs, BEs, CEs, NCs
def link_performance(self, SNRs, send_max, err_min, send_chunk=None, code_rate=1):
"""
Estimate the BER performance of a link model with Monte Carlo simulation.
Parameters
----------
SNRs : 1D arraylike
Signal to Noise ratio in dB defined as :math:`SNR_{dB} = (E_b/N_0)_{dB} + 10 \log_{10}(R_cM_c)`
where :math:`Rc` is the code rate and :math:`Mc` the modulation rate.
send_max : int
Maximum number of bits send for each SNR.
err_min : int
link_performance send bits until it reach err_min errors (see also send_max).
send_chunk : int
Number of bits to be send at each iteration. This is also the frame length of the decoder if available
so it should be large enough regarding the code type.
*Default*: send_chunck = err_min
code_rate : float or Fraction in (0,1]
Rate of the used code.
*Default*: 1 i.e. no code.
Returns
-------
BERs : 1d ndarray
Estimated Bit Error Ratio corresponding to each SNRs
"""
# Initialization
BERs = np.zeros_like(SNRs, dtype=float)
# Set chunk size and round it to be a multiple of num_bits_symbol*nb_tx to avoid padding
if send_chunk is None:
send_chunk = err_min
if type(code_rate) is float:
code_rate = Fraction(code_rate).limit_denominator(100)
self.rate = code_rate
divider = (Fraction(1, self.num_bits_symbol * self.channel.nb_tx) * 1 / code_rate).denominator
send_chunk = max(divider, send_chunk // divider * divider)
receive_size = self.channel.nb_tx * self.num_bits_symbol
full_args_decoder = len(getfullargspec(self.decoder).args) > 1
# Computations
for id_SNR in range(len(SNRs)):
self.channel.set_SNR_dB(SNRs[id_SNR], float(code_rate), self.Es)
bit_send = 0
bit_err = 0
while bit_send < send_max and bit_err < err_min:
# Propagate some bits
msg = np.random.choice((0, 1), send_chunk)
symbs = self.modulate(msg)
channel_output = self.channel.propagate(symbs)
# Deals with MIMO channel
if isinstance(self.channel, MIMOFlatChannel):
nb_symb_vector = len(channel_output)
received_msg = np.empty(int(math.ceil(len(msg) / float(self.rate))))
for i in range(nb_symb_vector):
received_msg[receive_size * i:receive_size * (i + 1)] = \
self.receive(channel_output[i], self.channel.channel_gains[i],
self.constellation, self.channel.noise_std ** 2)
else:
received_msg = self.receive(channel_output, self.channel.channel_gains,
self.constellation, self.channel.noise_std ** 2)
# Count errors
if full_args_decoder:
decoded_bits = self.decoder(channel_output, self.channel.channel_gains,
self.constellation, self.channel.noise_std ** 2,
received_msg, self.channel.nb_tx * self.num_bits_symbol)
bit_err += np.bitwise_xor(msg, decoded_bits[:len(msg)].astype(int)).sum()
else:
bit_err += np.bitwise_xor(msg, self.decoder(received_msg)[:len(msg)].astype(int)).sum()
bit_send += send_chunk
BERs[id_SNR] = bit_err / bit_send
if bit_err < err_min:
break
return BERs
def idd_decoder(detector, decoder, decision, n_it):
"""
Produce a decoder function that model the specified MIMO iterative detection and decoding (IDD) process.
The returned function can be used as is to build a working LinkModel object.
Parameters
----------
detector : function with prototype detector(y, H, constellation, noise_var, a_priori) that return a LLRs array.
y : 1D ndarray
Received complex symbols (shape: num_receive_antennas x 1).
h : 2D ndarray
Channel Matrix (shape: num_receive_antennas x num_transmit_antennas).
constellation : 1D ndarray.
noise_var : positive float
Noise variance.
a_priori : 1D ndarray of floats
A priori as Log-Likelihood Ratios.
decoder : function with prototype(LLRs) that return a LLRs array.
LLRs : 1D ndarray of floats
A priori as Log-Likelihood Ratios.
decision : function wih prototype(LLRs) that return a binary 1D-array that model the decision to extract the
information bits from the LLRs array.
n_it : positive integer
Number or iteration during the IDD process.
Returns
-------
decode : function useable as it is to build a LinkModel object that produce a bit array from the parameters
y : 1D ndarray
Received complex symbols (shape: num_receive_antennas x 1).
h : 2D ndarray
Channel Matrix (shape: num_receive_antennas x num_transmit_antennas).
constellation : 1D ndarray
noise_var : positive float
Noise variance.
bits_per_send : positive integer
Number or bit send at each symbol vector.
"""
def decode(y, h, constellation, noise_var, a_priori, bits_per_send):
a_priori_decoder = a_priori.copy()
nb_vect, nb_rx, nb_tx = h.shape
for iteration in range(n_it):
a_priori_detector = (decoder(a_priori_decoder) - a_priori_decoder)
for i in range(nb_vect):
a_priori_decoder[i * bits_per_send:(i + 1) * bits_per_send] = \
detector(y[i], h[i], constellation, noise_var,
a_priori_detector[i * bits_per_send:(i + 1) * bits_per_send])
a_priori_decoder -= a_priori_detector
return decision(a_priori_decoder + a_priori_detector)
return decode | /scikit-commpy-0.8.0.tar.gz/scikit-commpy-0.8.0/commpy/links.py | 0.939345 | 0.637313 | links.py | pypi |
from __future__ import division, print_function # Python 2 compatibility
from numpy import abs, sqrt, sum, zeros, identity, hstack, einsum, trace, kron, absolute, fromiter, array, exp, \
pi, cos
from numpy.random import randn, random, standard_normal
from scipy.linalg import sqrtm
__all__ = ['SISOFlatChannel', 'MIMOFlatChannel', 'bec', 'bsc', 'awgn']
class _FlatChannel(object):
def __init__(self):
self.noises = None
self.channel_gains = None
self.unnoisy_output = None
def generate_noises(self, dims):
"""
Generates the white gaussian noise with the right standard deviation and saves it.
Parameters
----------
dims : int or tuple of ints
Shape of the generated noise.
"""
# Check channel state
assert self.noise_std is not None, "Noise standard deviation must be set before propagation."
# Generate noises
if self.isComplex:
self.noises = (standard_normal(dims) + 1j * standard_normal(dims)) * self.noise_std * 0.5
else:
self.noises = standard_normal(dims) * self.noise_std
def set_SNR_dB(self, SNR_dB, code_rate: float = 1., Es=1):
"""
Sets the the noise standard deviation based on SNR expressed in dB.
Parameters
----------
SNR_dB : float
Signal to Noise Ratio expressed in dB.
code_rate : float in (0,1]
Rate of the used code.
Es : positive float
Average symbol energy
"""
self.noise_std = sqrt((self.isComplex + 1) * self.nb_tx * Es / (code_rate * 10 ** (SNR_dB / 10)))
def set_SNR_lin(self, SNR_lin, code_rate=1, Es=1):
"""
Sets the the noise standard deviation based on SNR expressed in its linear form.
Parameters
----------
SNR_lin : float
Signal to Noise Ratio as a linear ratio.
code_rate : float in (0,1]
Rate of the used code.
Es : positive float
Average symbol energy
"""
self.noise_std = sqrt((self.isComplex + 1) * self.nb_tx * Es / (code_rate * SNR_lin))
@property
def isComplex(self):
""" Read-only - True if the channel is complex, False if not."""
return self._isComplex
class SISOFlatChannel(_FlatChannel):
"""
Constructs a SISO channel with a flat fading.
The channel coefficient are normalized i.e. the mean magnitude is 1.
Parameters
----------
noise_std : float, optional
Noise standard deviation.
*Default* value is None and then the value must set later.
fading_param : tuple of 2 floats, optional
Parameters of the fading (see attribute for details).
*Default* value is (1,0) i.e. no fading.
Attributes
----------
fading_param : tuple of 2 floats
Parameters of the fading. The complete tuple must be set each time.
Raise ValueError when sets with value that would lead to a non-normalized channel.
* fading_param[0] refers to the mean of the channel gain (Line Of Sight component).
* fading_param[1] refers to the variance of the channel gain (Non Line Of Sight component).
Classical fadings:
* (1, 0): no fading.
* (0, 1): Rayleigh fading.
* Others: rician fading.
noise_std : float
Noise standard deviation. None is the value has not been set yet.
isComplex : Boolean, Read-only
True if the channel is complex, False if not.
The value is set together with fading_param based on the type of fading_param[0].
k_factor : positive float, Read-only
Fading k-factor, the power ratio between LOS and NLOS.
nb_tx : int = 1, Read-only
Number of Tx antennas.
nb_rx : int = 1, Read-only
Number of Rx antennas.
noises : 1D ndarray
Last noise generated. None if no noise has been generated yet.
channel_gains : 1D ndarray
Last channels gains generated. None if no channels has been generated yet.
unnoisy_output : 1D ndarray
Last transmitted message without noise. None if no message has been propagated yet.
Raises
------
ValueError
If the fading parameters would lead to a non-normalized channel.
The condition is :math:`|param[1]| + |param[0]|^2 = 1`
"""
@property
def nb_tx(self):
""" Read-only - Number of Tx antennas, set to 1 for SISO channel."""
return 1
@property
def nb_rx(self):
""" Read-only - Number of Rx antennas, set to 1 for SISO channel."""
return 1
def __init__(self, noise_std=None, fading_param=(1, 0)):
super(SISOFlatChannel, self).__init__()
self.noise_std = noise_std
self.fading_param = fading_param
def propagate(self, msg):
"""
Propagates a message through the channel.
Parameters
----------
msg : 1D ndarray
Message to propagate.
Returns
-------
channel_output : 1D ndarray
Message after application of the fading and addition of noise.
Raises
------
TypeError
If the input message is complex but the channel is real.
AssertionError
If the noise standard deviation as not been set yet.
"""
if isinstance(msg[0], complex) and not self.isComplex:
raise TypeError('Trying to propagate a complex message in a real channel.')
nb_symb = len(msg)
# Generate noise
self.generate_noises(nb_symb)
# Generate channel
self.channel_gains = self.fading_param[0]
if self.isComplex:
self.channel_gains += (standard_normal(nb_symb) + 1j * standard_normal(nb_symb)) * sqrt(0.5 * self.fading_param[1])
else:
self.channel_gains += standard_normal(nb_symb) * sqrt(self.fading_param[1])
# Generate outputs
self.unnoisy_output = self.channel_gains * msg
return self.unnoisy_output + self.noises
@property
def fading_param(self):
""" Parameters of the fading (see class attribute for details). """
return self._fading_param
@fading_param.setter
def fading_param(self, fading_param):
if fading_param[1] + absolute(fading_param[0]) ** 2 != 1:
raise ValueError("With this parameters, the channel would add or remove energy.")
self._fading_param = fading_param
self._isComplex = isinstance(fading_param[0], complex)
@property
def k_factor(self):
""" Read-only - Fading k-factor, the power ratio between LOS and NLOS """
return absolute(self.fading_param[0]) ** 2 / absolute(self.fading_param[1])
class MIMOFlatChannel(_FlatChannel):
"""
Constructs a MIMO channel with a flat fading based on the Kronecker model.
The channel coefficient are normalized i.e. the mean magnitude is 1.
Parameters
----------
nb_tx : int >= 1
Number of Tx antennas.
nb_rx : int >= 1
Number of Rx antennas.
noise_std : float, optional
Noise standard deviation.
*Default* value is None and then the value must set later.
fading_param : tuple of 3 floats, optional
Parameters of the fading. The complete tuple must be set each time.
*Default* value is (zeros((nb_rx, nb_tx)), identity(nb_tx), identity(nb_rx)) i.e. Rayleigh fading.
Attributes
----------
fading_param : tuple of 3 2D ndarray
Parameters of the fading.
Raise ValueError when sets with value that would lead to a non-normalized channel.
* fading_param[0] refers to the mean of the channel gain (Line Of Sight component).
* fading_param[1] refers to the transmit-side spatial correlation matrix of the channel.
* fading_param[2] refers to the receive-side spatial correlation matrix of the channel.
Classical fadings:
* (zeros((nb_rx, nb_tx)), identity(nb_tx), identity(nb_rx)): Uncorrelated Rayleigh fading.
noise_std : float
Noise standard deviation. None is the value has not been set yet.
isComplex : Boolean, Read-only
True if the channel is complex, False if not.
The value is set together with fading_param based on the type of fading_param[0].
k_factor : positive float, Read-only
Fading k-factor, the power ratio between LOS and NLOS.
nb_tx : int
Number of Tx antennas.
nb_rx : int
Number of Rx antennas.
noises : 2D ndarray
Last noise generated. None if no noise has been generated yet.
noises[i] is the noise vector of size nb_rx for the i-th message vector.
channel_gains : 2D ndarray
Last channels gains generated. None if no channels has been generated yet.
channel_gains[i] is the channel matrix of size (nb_rx x nb_tx) for the i-th message vector.
unnoisy_output : 1D ndarray
Last transmitted message without noise. None if no message has been propageted yet.
unnoisy_output[i] is the transmitted message without noise of size nb_rx for the i-th message vector.
Raises
------
ValueError
If the fading parameters would lead to a non-normalized channel.
The condition is :math:`NLOS + LOS = nb_{tx} * nb_{rx}` where
* :math:`NLOS = tr(param[1]^T \otimes param[2])`
* :math:`LOS = \sum|param[0]|^2`
"""
def __init__(self, nb_tx, nb_rx, noise_std=None, fading_param=None):
super(MIMOFlatChannel, self).__init__()
self.nb_tx = nb_tx
self.nb_rx = nb_rx
self.noise_std = noise_std
if fading_param is None:
self.fading_param = (zeros((nb_rx, nb_tx)), identity(nb_tx), identity(nb_rx))
else:
self.fading_param = fading_param
def propagate(self, msg):
"""
Propagates a message through the channel.
Parameters
----------
msg : 1D ndarray
Message to propagate.
Returns
-------
channel_output : 2D ndarray
Message after application of the fading and addition of noise.
channel_output[i] is th i-th received symbol of size nb_rx.
Raises
------
TypeError
If the input message is complex but the channel is real.
AssertionError
If the noise standard deviation noise_std as not been set yet.
"""
if isinstance(msg[0], complex) and not self.isComplex:
raise TypeError('Trying to propagate a complex message in a real channel.')
(nb_vect, mod) = divmod(len(msg), self.nb_tx)
# Add padding if required
if mod:
msg = hstack((msg, zeros(self.nb_tx - mod)))
nb_vect += 1
# Reshape msg as vectors sent on each antennas
msg = msg.reshape(nb_vect, -1)
# Generate noises
self.generate_noises((nb_vect, self.nb_rx))
# Generate channel uncorrelated channel
dims = (nb_vect, self.nb_rx, self.nb_tx)
if self.isComplex:
self.channel_gains = (standard_normal(dims) + 1j * standard_normal(dims)) * sqrt(0.5)
else:
self.channel_gains = standard_normal(dims)
# Add correlation and mean
einsum('ij,ajk,lk->ail', sqrtm(self.fading_param[2]), self.channel_gains, sqrtm(self.fading_param[1]),
out=self.channel_gains, optimize='greedy')
self.channel_gains += self.fading_param[0]
# Generate outputs
self.unnoisy_output = einsum('ijk,ik->ij', self.channel_gains, msg)
return self.unnoisy_output + self.noises
def _update_corr_KBSM(self, betat, betar):
"""
Update the correlation parameters to follow the KBSM-BD-AA.
Parameters
----------
betat : positive float
Constant for the transmitter.
betar : positive float
Constant for the receiver.
Raises
------
ValueError
If betat or betar are negative.
"""
if betar < 0 or betat < 0:
raise ValueError("beta must be positif")
# Create Er and Et
Er = array([[exp(-betar * abs(m - n)) for m in range(self.nb_rx)] for n in range(self.nb_rx)])
Et = array([[exp(-betat * abs(m - n)) for m in range(self.nb_tx)] for n in range(self.nb_tx)])
# Updating of correlation matrices
self.fading_param = self.fading_param[0], self.fading_param[1] * Et, self.fading_param[2] * Er
def specular_compo(self, thetat, dt, thetar, dr):
"""
Calculate the specular components of the channel gain as in [1].
ref: [1] Lee M. Garth, Peter J. Smith, Mansoor Shafi, "Exact Symbol Error Probabilities for SVD Transmission
of BPSK Data over Fading Channels", IEEE 2005.
Parameters
----------
thetat : float
the angle of departure.
dt : postive float
the antenna spacing in wavelenghts of departure.
thetar : float
the angle of arrival.
dr : positie float
the antenna spacing in wavelenghts of arrival.
Returns
-------
H : 2D ndarray of shape (nb_rx, nb_tx)
the specular components of channel gains to be use as mean in Rician fading.
Raises
------
ValueError
If dt or dr are negative.
"""
if dr < 0 or dt < 0:
raise ValueError("the distance must be positive ")
H = zeros((self.nb_rx, self.nb_tx), dtype=complex)
for n in range(self.nb_rx):
for m in range(self.nb_tx):
H[n, m] = exp(1j * 2 * pi * (n * dr * cos(thetar) + m * dt * cos(thetat)))
return H
@property
def fading_param(self):
""" Parameters of the fading (see class attribute for details). """
return self._fading_param
@fading_param.setter
def fading_param(self, fading_param):
NLOS_gain = trace(kron(fading_param[1].T, fading_param[2]))
LOS_gain = einsum('ij,ij->', absolute(fading_param[0]), absolute(fading_param[0]))
if absolute(NLOS_gain + LOS_gain - self.nb_tx * self.nb_rx) > 1e-3:
raise ValueError("With this parameters, the channel would add or remove energy.")
self._fading_param = fading_param
self._isComplex = isinstance(fading_param[0][0, 0], complex)
@property
def k_factor(self):
""" Read-only - Fading k-factor, the power ratio between LOS and NLOS """
NLOS_gain = trace(kron(self.fading_param[1].T, self.fading_param[2]))
LOS_gain = einsum('ij,ij->', absolute(self.fading_param[0]), absolute(self.fading_param[0]))
return LOS_gain / NLOS_gain
def uncorr_rayleigh_fading(self, dtype):
""" Set the fading parameters to an uncorrelated Rayleigh channel.
Parameters
----------
dtype : dtype
Type of the channel
"""
self.fading_param = zeros((self.nb_rx, self.nb_tx), dtype), identity(self.nb_tx), identity(self.nb_rx)
def expo_corr_rayleigh_fading(self, t, r, betat=0, betar=0):
""" Set the fading parameters to a complex correlated Rayleigh channel following the exponential model [1].
A KBSM-BD-AA can be used as in [2] to improve the model.
ref: [1] S. L. Loyka, "Channel capacity if MIMO architecture using the exponential correlation matrix ", IEEE
Commun. Lett., vol.5, n. 9, p. 369-371, sept. 2001.
[2] S. Wu, C. Wang, E. M. Aggoune, et M. M. Alwakeel,"A novel Kronecker-based stochastic model for massive
MIMO channels", in 2015 IEEE/CIC International Conference on Communications in China (ICCC), 2015, p. 1-6
Parameters
----------
t : complex with abs(t) = 1
Correlation coefficient for the transceiver.
r : complex with abs(r) = 1
Correlation coefficient for the receiver.
betat : positive float
Constant for the transmitter.
*Default* = 0 i.e. classic model
betar : positive float
Constant for the receiver.
*Default* = 0 i.e. classic model
Raises
------
ValueError
If abs(t) != 1 or abs(r) != 1
ValueError
If betat or betar are negative.
"""
# Check inputs
if abs(t) - 1 > 1e-4:
raise ValueError('abs(t) must be one.')
if abs(r) - 1 > 1e-4:
raise ValueError('abs(r) must be one.')
# Construct the exponent matrix
expo_tx = fromiter((j - i for i in range(self.nb_tx) for j in range(self.nb_tx)), int, self.nb_tx ** 2)
expo_rx = fromiter((j - i for i in range(self.nb_rx) for j in range(self.nb_rx)), int, self.nb_rx ** 2)
# Reshape
expo_tx = expo_tx.reshape(self.nb_tx, self.nb_tx)
expo_rx = expo_rx.reshape(self.nb_rx, self.nb_rx)
# Set fading
self.fading_param = zeros((self.nb_rx, self.nb_tx), complex), t ** expo_tx, r ** expo_rx
# Update Rr and Rt
self._update_corr_KBSM(betat, betar)
def uncorr_rician_fading(self, mean, k_factor):
""" Set the fading parameters to an uncorrelated rician channel.
mean will be scaled to fit the required k-factor.
Parameters
----------
mean : ndarray (shape: nb_rx x nb_tx)
Mean of the channel gain.
k_factor : positive float
Requested k-factor (the power ratio between LOS and NLOS).
"""
nb_antennas = mean.size
NLOS_gain = nb_antennas / (k_factor + 1)
mean = mean * sqrt(k_factor * NLOS_gain / einsum('ij,ij->', absolute(mean), absolute(mean)))
self.fading_param = mean, identity(self.nb_tx) * NLOS_gain / nb_antennas, identity(self.nb_rx)
def expo_corr_rician_fading(self, mean, k_factor, t, r, betat=0, betar=0):
""" Set the fading parameters to a complex correlated rician channel following the exponential model [1].
A KBSM-BD-AA can be used as in [2] to improve the model.
ref: [1] S. L. Loyka, "Channel capacity if MIMO architecture using the exponential correlation matrix ", IEEE
Commun. Lett., vol.5, n. 9, p. 369-371, sept. 2001.
[2] S. Wu, C. Wang, E. M. Aggoune, et M. M. Alwakeel,"A novel Kronecker-based stochastic model for massive
MIMO channels", in 2015 IEEE/CIC International Conference on Communications in China (ICCC), 2015, p. 1-6
mean and correlation matricies will be scaled to fit the required k-factor. The k-factor is also preserved is
beta are provided.
Parameters
----------
mean : ndarray (shape: nb_rx x nb_tx)
Mean of the channel gain.
k_factor : positive float
Requested k-factor (the power ratio between LOS and NLOS).
t : complex with abs(t) = 1
Correlation coefficient for the transceiver.
r : complex with abs(r) = 1
Correlation coefficient for the receiver.
betat : positive float
Constant for the transmitter.
*Default* = 0 i.e. classic model
betar : positive float
Constant for the receiver.
*Default* = 0 i.e. classic model
Raises
------
ValueError
If abs(t) != 1 or abs(r) != 1
ValueError
If betat or betar are negative.
"""
# Check inputs
if abs(t) - 1 > 1e-4:
raise ValueError('abs(t) must be one.')
if abs(r) - 1 > 1e-4:
raise ValueError('abs(r) must be one.')
# Scaling
nb_antennas = mean.size
NLOS_gain = nb_antennas / (k_factor + 1)
mean = mean * sqrt(k_factor * NLOS_gain / einsum('ij,ij->', absolute(mean), absolute(mean)))
# Construct the exponent matrix
expo_tx = fromiter((j - i for i in range(self.nb_tx) for j in range(self.nb_tx)), int, self.nb_tx ** 2)
expo_rx = fromiter((j - i for i in range(self.nb_rx) for j in range(self.nb_rx)), int, self.nb_rx ** 2)
# Reshape
expo_tx = expo_tx.reshape(self.nb_tx, self.nb_tx)
expo_rx = expo_rx.reshape(self.nb_rx, self.nb_rx)
# Set fading
self.fading_param = mean, t ** expo_tx * NLOS_gain / nb_antennas, r ** expo_rx
# Update Rr and Rt
self._update_corr_KBSM(betat, betar)
def bec(input_bits, p_e):
"""
Binary Erasure Channel.
Parameters
----------
input_bits : 1D ndarray containing {0, 1}
Input arrary of bits to the channel.
p_e : float in [0, 1]
Erasure probability of the channel.
Returns
-------
output_bits : 1D ndarray containing {0, 1}
Output bits from the channel.
"""
output_bits = input_bits.copy()
output_bits[random(len(output_bits)) <= p_e] = -1
return output_bits
def bsc(input_bits, p_t):
"""
Binary Symmetric Channel.
Parameters
----------
input_bits : 1D ndarray containing {0, 1}
Input arrary of bits to the channel.
p_t : float in [0, 1]
Transition/Error probability of the channel.
Returns
-------
output_bits : 1D ndarray containing {0, 1}
Output bits from the channel.
"""
output_bits = input_bits.copy()
flip_locs = (random(len(output_bits)) <= p_t)
output_bits[flip_locs] = 1 ^ output_bits[flip_locs]
return output_bits
# Kept for retro-compatibility. Use FlatChannel for new programs.
def awgn(input_signal, snr_dB, rate=1.0):
"""
Addditive White Gaussian Noise (AWGN) Channel.
Parameters
----------
input_signal : 1D ndarray of floats
Input signal to the channel.
snr_dB : float
Output SNR required in dB.
rate : float
Rate of the a FEC code used if any, otherwise 1.
Returns
-------
output_signal : 1D ndarray of floats
Output signal from the channel with the specified SNR.
"""
avg_energy = sum(abs(input_signal) * abs(input_signal)) / len(input_signal)
snr_linear = 10 ** (snr_dB / 10.0)
noise_variance = avg_energy / (2 * rate * snr_linear)
if isinstance(input_signal[0], complex):
noise = (sqrt(noise_variance) * randn(len(input_signal))) + (sqrt(noise_variance) * randn(len(input_signal))*1j)
else:
noise = sqrt(2 * noise_variance) * randn(len(input_signal))
output_signal = input_signal + noise
return output_signal | /scikit-commpy-0.8.0.tar.gz/scikit-commpy-0.8.0/commpy/channels.py | 0.963882 | 0.587766 | channels.py | pypi |
__all__ = ['pnsequence', 'zcsequence']
import numpy as np
from numpy import empty, exp, pi, arange, int8, fromiter, sum
def pnsequence(pn_order, pn_seed, pn_mask, seq_length):
"""
Generate a PN (Pseudo-Noise) sequence using a Linear Feedback Shift Register (LFSR).
Seed and mask are ordered so that:
- seed[-1] will be the first output
- the new bit computed as :math:`sum(shift_register & mask) % 2` is inserted in shift[0]
Parameters
----------
pn_order : int
Number of delay elements used in the LFSR.
pn_seed : iterable providing 0's and 1's
Seed for the initialization of the LFSR delay elements.
The length of this string must be equal to 'pn_order'.
pn_mask : iterable providing 0's and 1's
Mask representing which delay elements contribute to the feedback
in the LFSR. The length of this string must be equal to 'pn_order'.
seq_length : int
Length of the PN sequence to be generated. Usually (2^pn_order - 1)
Returns
-------
pnseq : 1D ndarray of ints
PN sequence generated.
Raises
------
ValueError
If the pn_order is equal to the length of the strings pn_seed and pn_mask.
"""
# Check if pn_order is equal to the length of the strings 'pn_seed' and 'pn_mask'
if len(pn_seed) != pn_order:
raise ValueError('pn_seed has not the same length as pn_order')
if len(pn_mask) != pn_order:
raise ValueError('pn_mask has not the same length as pn_order')
# Pre-allocate memory for output
pnseq = empty(seq_length, int8)
# Convert input as array
sr = fromiter(pn_seed, int8, pn_order)
mask = fromiter(pn_mask, int8, pn_order)
for i in range(seq_length):
pnseq[i] = sr[-1]
new_bit = sum(sr & mask) % 2
sr[1:] = sr[:-1]
sr[0] = new_bit
return pnseq
def zcsequence(u, seq_length, q=0):
"""
Generate a Zadoff-Chu (ZC) sequence.
Parameters
----------
u : int
Root index of the the ZC sequence: u>0.
seq_length : int
Length of the sequence to be generated. Usually a prime number:
u<seq_length, greatest-common-denominator(u,seq_length)=1.
q : int
Cyclic shift of the sequence (default 0).
Returns
-------
zcseq : 1D ndarray of complex floats
ZC sequence generated.
"""
for el in [u,seq_length,q]:
if not float(el).is_integer():
raise ValueError('{} is not an integer'.format(el))
if u<=0:
raise ValueError('u is not stricly positive')
if u>=seq_length:
raise ValueError('u is not stricly smaller than seq_length')
if np.gcd(u,seq_length)!=1:
raise ValueError('the greatest common denominator of u and seq_length is not 1')
cf = seq_length%2
n = np.arange(seq_length)
zcseq = np.exp( -1j * np.pi * u * n * (n+cf+2.*q) / seq_length)
return zcseq | /scikit-commpy-0.8.0.tar.gz/scikit-commpy-0.8.0/commpy/sequences.py | 0.906424 | 0.68225 | sequences.py | pypi |
import functools
import numpy as np
__all__ = ['dec2bitarray', 'decimal2bitarray', 'bitarray2dec', 'hamming_dist', 'euclid_dist', 'upsample',
'signal_power']
vectorized_binary_repr = np.vectorize(np.binary_repr)
def dec2bitarray(in_number, bit_width):
"""
Converts a positive integer or an array-like of positive integers to NumPy array of the specified size containing
bits (0 and 1).
Parameters
----------
in_number : int or array-like of int
Positive integer to be converted to a bit array.
bit_width : int
Size of the output bit array.
Returns
-------
bitarray : 1D ndarray of numpy.int8
Array containing the binary representation of all the input decimal(s).
"""
if isinstance(in_number, (np.integer, int)):
return decimal2bitarray(in_number, bit_width).copy()
result = np.zeros(bit_width * len(in_number), np.int8)
for pox, number in enumerate(in_number):
result[pox * bit_width:(pox + 1) * bit_width] = decimal2bitarray(number, bit_width).copy()
return result
@functools.lru_cache(maxsize=128, typed=False)
def decimal2bitarray(number, bit_width):
"""
Converts a positive integer to NumPy array of the specified size containing bits (0 and 1). This version is slightly
quicker that dec2bitarray but only work for one integer.
Parameters
----------
in_number : int
Positive integer to be converted to a bit array.
bit_width : int
Size of the output bit array.
Returns
-------
bitarray : 1D ndarray of numpy.int8
Array containing the binary representation of all the input decimal(s).
"""
result = np.zeros(bit_width, np.int8)
i = 1
pox = 0
while i <= number:
if i & number:
result[bit_width - pox - 1] = 1
i <<= 1
pox += 1
return result
def bitarray2dec(in_bitarray):
"""
Converts an input NumPy array of bits (0 and 1) to a decimal integer.
Parameters
----------
in_bitarray : 1D ndarray of ints
Input NumPy array of bits.
Returns
-------
number : int
Integer representation of input bit array.
"""
number = 0
for i in range(len(in_bitarray)):
number = number + in_bitarray[i] * pow(2, len(in_bitarray) - 1 - i)
return number
def hamming_dist(in_bitarray_1, in_bitarray_2):
"""
Computes the Hamming distance between two NumPy arrays of bits (0 and 1).
Parameters
----------
in_bit_array_1 : 1D ndarray of ints
NumPy array of bits.
in_bit_array_2 : 1D ndarray of ints
NumPy array of bits.
Returns
-------
distance : int
Hamming distance between input bit arrays.
"""
distance = np.bitwise_xor(in_bitarray_1, in_bitarray_2).sum()
return distance
def euclid_dist(in_array1, in_array2):
"""
Computes the squared euclidean distance between two NumPy arrays
Parameters
----------
in_array1 : 1D ndarray of floats
NumPy array of real values.
in_array2 : 1D ndarray of floats
NumPy array of real values.
Returns
-------
distance : float
Squared Euclidean distance between two input arrays.
"""
distance = ((in_array1 - in_array2) * (in_array1 - in_array2)).sum()
return distance
def upsample(x, n):
"""
Upsample the input array by a factor of n
Adds n-1 zeros between consecutive samples of x
Parameters
----------
x : 1D ndarray
Input array.
n : int
Upsampling factor
Returns
-------
y : 1D ndarray
Output upsampled array.
"""
y = np.empty(len(x) * n, dtype=complex)
y[0::n] = x
zero_array = np.zeros(len(x), dtype=complex)
for i in range(1, n):
y[i::n] = zero_array
return y
def signal_power(signal):
"""
Compute the power of a discrete time signal.
Parameters
----------
signal : 1D ndarray
Input signal.
Returns
-------
P : float
Power of the input signal.
"""
@np.vectorize
def square_abs(s):
return abs(s) ** 2
P = np.mean(square_abs(signal))
return P | /scikit-commpy-0.8.0.tar.gz/scikit-commpy-0.8.0/commpy/utilities.py | 0.901146 | 0.793546 | utilities.py | pypi |
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as splg
__all__ = ['build_matrix', 'get_ldpc_code_params', 'ldpc_bp_decode', 'write_ldpc_params',
'triang_ldpc_systematic_encode']
_llr_max = 500
def build_matrix(ldpc_code_params):
"""
Build the parity check and generator matrices from parameters dictionary and add the result in this dictionary.
Generator matrix is valid only for triangular systematic LDPC codes.
Parameters
----------
ldpc_code_params: dictionary that at least contains these parameters
Parameters of the LDPC code:
n_vnodes (int) - number of variable nodes.
n_cnodes (int) - number of check nodes.
max_cnode_deg (int) - maximal degree of a check node.
cnode_adj_list (1D-ndarray of ints) - flatten array so that
cnode_adj_list.reshape((n_cnodes, max_cnode_deg)) gives for each check node the adjacent variable nodes.
cnode_deg_list (1D-ndarray of ints) - degree of each check node.
Add
---
to ldpc_code_param:
parity_check_matrix (CSC sparse matrix of int8) - parity check matrix.
generator_matrix (CSR sparse matrix) - generator matrix of the code.
"""
n_cnodes = ldpc_code_params['n_cnodes']
cnode_deg_list = ldpc_code_params['cnode_deg_list']
cnode_adj_list = ldpc_code_params['cnode_adj_list'].reshape((n_cnodes, ldpc_code_params['max_cnode_deg']))
parity_check_matrix = sp.lil_matrix((n_cnodes, ldpc_code_params['n_vnodes']), dtype=np.int8)
for cnode_idx in range(n_cnodes):
parity_check_matrix[cnode_idx, cnode_adj_list[cnode_idx, :cnode_deg_list[cnode_idx]]] = 1
parity_check_matrix = parity_check_matrix.tocsc()
systematic_part = parity_check_matrix[:, -n_cnodes:]
parity_part = parity_check_matrix[:, :-n_cnodes]
ldpc_code_params['parity_check_matrix'] = parity_check_matrix
ldpc_code_params['generator_matrix'] = splg.inv(systematic_part).dot(parity_part).tocsr()
def get_ldpc_code_params(ldpc_design_filename, compute_matrix=False):
"""
Extract parameters from LDPC code design file and produce an parity check matrix if asked.
The file is structured as followed (examples are available in designs/ldpc/):
n_vnode n_cnode
max_vnode_deg max_cnode_deg
List of the degree of each vnode
List of the degree of each cnode
For each vnode (line by line, separated by '\t'): index of the connected cnodes
For each cnode (line by line, separated by '\t'): index of the connected vnodes
Parameters
----------
ldpc_design_filename : string
Filename of the LDPC code design file.
compute_matrix : boolean
Specify if the parity check matrix must be computed.
*Default* is False.
Returns
-------
ldpc_code_params : dictionary that at least contains these parameters
Parameters of the LDPC code:
n_vnodes (int) - number of variable nodes.
n_cnodes (int) - number of check nodes.
max_vnode_deg (int) - maximal degree of a variable node.
max_cnode_deg (int) - maximal degree of a check node.
vnode_adj_list (1D-ndarray of ints) - flatten array so that
vnode_adj_list.reshape((n_vnodes, max_vnode_deg)) gives for each variable node the adjacent check nodes.
cnode_adj_list (1D-ndarray of ints) - flatten array so that
cnode_adj_list.reshape((n_cnodes, max_cnode_deg)) gives for each check node the adjacent variable nodes.
vnode_cnode_map (1D-ndarray of ints) - flatten array providing the mapping between vnode and cnode indexes.
cnode_vnode_map (1D-ndarray of ints) - flatten array providing the mapping between vnode and cnode indexes.
vnode_deg_list (1D-ndarray of ints) - degree of each variable node.
cnode_deg_list (1D-ndarray of ints) - degree of each check node.
parity_check_matrix (CSC sparse matrix of int8) - parity check matrix if asked.
"""
with open(ldpc_design_filename) as ldpc_design_file:
[n_vnodes, n_cnodes] = [int(x) for x in ldpc_design_file.readline().split(' ')]
[max_vnode_deg, max_cnode_deg] = [int(x) for x in ldpc_design_file.readline().split(' ')]
vnode_deg_list = np.array([int(x) for x in ldpc_design_file.readline().split(' ')[:-1]], np.int32)
cnode_deg_list = np.array([int(x) for x in ldpc_design_file.readline().split(' ')[:-1]], np.int32)
cnode_adj_list = -np.ones([n_cnodes, max_cnode_deg], int)
vnode_adj_list = -np.ones([n_vnodes, max_vnode_deg], int)
for vnode_idx in range(n_vnodes):
vnode_adj_list[vnode_idx, 0:vnode_deg_list[vnode_idx]] = \
np.array([int(x)-1 for x in ldpc_design_file.readline().split('\t')])
for cnode_idx in range(n_cnodes):
cnode_adj_list[cnode_idx, 0:cnode_deg_list[cnode_idx]] = \
np.array([int(x)-1 for x in ldpc_design_file.readline().split('\t')])
cnode_vnode_map = -np.ones([n_cnodes, max_cnode_deg], int)
vnode_cnode_map = -np.ones([n_vnodes, max_vnode_deg], int)
for cnode in range(n_cnodes):
for i, vnode in enumerate(cnode_adj_list[cnode, 0:cnode_deg_list[cnode]]):
cnode_vnode_map[cnode, i] = np.where(vnode_adj_list[vnode, :] == cnode)[0]
for vnode in range(n_vnodes):
for i, cnode in enumerate(vnode_adj_list[vnode, 0:vnode_deg_list[vnode]]):
vnode_cnode_map[vnode, i] = np.where(cnode_adj_list[cnode, :] == vnode)[0]
cnode_adj_list_1d = cnode_adj_list.flatten().astype(np.int32)
vnode_adj_list_1d = vnode_adj_list.flatten().astype(np.int32)
cnode_vnode_map_1d = cnode_vnode_map.flatten().astype(np.int32)
vnode_cnode_map_1d = vnode_cnode_map.flatten().astype(np.int32)
ldpc_code_params = {}
ldpc_code_params['n_vnodes'] = n_vnodes
ldpc_code_params['n_cnodes'] = n_cnodes
ldpc_code_params['max_cnode_deg'] = max_cnode_deg
ldpc_code_params['max_vnode_deg'] = max_vnode_deg
ldpc_code_params['cnode_adj_list'] = cnode_adj_list_1d
ldpc_code_params['cnode_vnode_map'] = cnode_vnode_map_1d
ldpc_code_params['vnode_adj_list'] = vnode_adj_list_1d
ldpc_code_params['vnode_cnode_map'] = vnode_cnode_map_1d
ldpc_code_params['cnode_deg_list'] = cnode_deg_list
ldpc_code_params['vnode_deg_list'] = vnode_deg_list
if compute_matrix:
build_matrix(ldpc_code_params)
return ldpc_code_params
def ldpc_bp_decode(llr_vec, ldpc_code_params, decoder_algorithm, n_iters):
"""
LDPC Decoder using Belief Propagation (BP). If several blocks are provided, they are all decoded at once.
Parameters
----------
llr_vec : 1D array of float with a length multiple of block length.
Received codeword LLR values from the channel. They will be clipped in [-500, 500].
ldpc_code_params : dictionary that at least contains these parameters
Parameters of the LDPC code as provided by `get_ldpc_code_params`:
n_vnodes (int) - number of variable nodes.
n_cnodes (int) - number of check nodes.
max_vnode_deg (int) - maximal degree of a variable node.
max_cnode_deg (int) - maximal degree of a check node.
vnode_adj_list (1D-ndarray of ints) - flatten array so that
vnode_adj_list.reshape((n_vnodes, max_vnode_deg)) gives for each variable node the adjacent check nodes.
cnode_adj_list (1D-ndarray of ints) - flatten array so that
cnode_adj_list.reshape((n_cnodes, max_cnode_deg)) gives for each check node the adjacent variable nodes.
vnode_cnode_map (1D-ndarray of ints) - flatten array providing the mapping between vnode and cnode indexes.
cnode_vnode_map (1D-ndarray of ints) - flatten array providing the mapping between vnode and cnode indexes.
vnode_deg_list (1D-ndarray of ints) - degree of each variable node.
cnode_deg_list (1D-ndarray of ints) - degree of each check node.
decoder_algorithm: string
Specify the decoder algorithm type.
'SPA' for Sum-Product Algorithm
'MSA' for Min-Sum Algorithm
n_iters : int
Max. number of iterations of decoding to be done.
Returns
-------
dec_word : 1D array or 2D array of 0's and 1's with one block per column.
The codeword after decoding.
out_llrs : 1D array or 2D array of float with one block per column.
LLR values corresponding to the decoded output.
"""
# Clip LLRs
llr_vec.clip(-_llr_max, _llr_max, llr_vec)
# Build parity_check_matrix if required
if ldpc_code_params.get('parity_check_matrix') is None:
build_matrix(ldpc_code_params)
# Initialization
dec_word = np.signbit(llr_vec)
out_llrs = llr_vec.copy()
parity_check_matrix = ldpc_code_params['parity_check_matrix'].astype(float).tocoo()
for i_start in range(0, llr_vec.size, ldpc_code_params['n_vnodes']):
i_stop = i_start + ldpc_code_params['n_vnodes']
message_matrix = parity_check_matrix.multiply(llr_vec[i_start:i_stop])
# Main loop of Belief Propagation (BP) decoding iterations
for iter_cnt in range(n_iters):
# Compute early termination using parity check matrix
if np.all(ldpc_code_params['parity_check_matrix'].multiply(dec_word[i_start:i_stop]).sum(1) % 2 == 0):
break
# Check Node Update
if decoder_algorithm == 'SPA':
# Compute incoming messages
message_matrix.data *= .5
np.tanh(message_matrix.data, out=message_matrix.data)
# Runtime Warnings are expected when llr = 0. No warn should be raised as this case are expected.
with np.errstate(divide='ignore', invalid='ignore'):
# Compute product as exponent of the sum of logarithm
log2_msg_matrix = message_matrix.astype(complex).copy()
np.log2(message_matrix.data.astype(complex), out=log2_msg_matrix.data)
msg_products = np.exp2(log2_msg_matrix.sum(1)).real
# Compute outgoing messages
message_matrix.data = 1 / message_matrix.data
message_matrix = message_matrix.multiply(msg_products)
message_matrix.data.clip(-1, 1, message_matrix.data)
np.arctanh(message_matrix.data, out=message_matrix.data)
message_matrix.data *= 2
message_matrix.data.clip(-_llr_max, _llr_max, message_matrix.data)
elif decoder_algorithm == 'MSA':
message_matrix = message_matrix.tocsr()
for row_idx in range(message_matrix.shape[0]):
begin_row = message_matrix.indptr[row_idx]
end_row = message_matrix.indptr[row_idx+1]
row_data = message_matrix.data[begin_row:end_row].copy()
indexes = np.arange(len(row_data))
for j, i in enumerate(range(begin_row, end_row)):
other_val = row_data[indexes != j]
message_matrix.data[i] = np.sign(other_val).prod() * np.abs(other_val).min()
else:
raise NameError('Please input a valid decoder_algorithm string (meanning "SPA" or "MSA").')
# Variable Node Update
msg_sum = np.array(message_matrix.sum(0)).squeeze()
message_matrix.data *= -1
message_matrix.data += parity_check_matrix.multiply(msg_sum + llr_vec[i_start:i_stop]).data
out_llrs[i_start:i_stop] = msg_sum + llr_vec[i_start:i_stop]
np.signbit(out_llrs[i_start:i_stop], out=dec_word[i_start:i_stop])
# Reformat outputs
n_blocks = llr_vec.size // ldpc_code_params['n_vnodes']
dec_word = dec_word.reshape(-1, n_blocks, order='F').squeeze().astype(np.int8)
out_llrs = out_llrs.reshape(-1, n_blocks, order='F').squeeze()
return dec_word, out_llrs
def write_ldpc_params(parity_check_matrix, file_path):
"""
Write parameters from LDPC parity check matrix to a design file.
The file is structured as followed (examples are available in designs/ldpc/):
n_vnode n_cnode
max_vnode_deg max_cnode_deg
List of the degree of each vnode
List of the degree of each cnode
For each vnode (line by line, separated by '\t'): index of the connected cnodes
For each cnode (line by line, separated by '\t'): index of the connected vnodes
Parameters
----------
parity_check_matrix : 2D-array of int
Parity check matrix to save.
file_path
File path of the LDPC code design file.
"""
with open(file_path, 'x') as file:
file.write('{} {}\n'.format(parity_check_matrix.shape[1], parity_check_matrix.shape[0]))
file.write('{} {}\n'.format(parity_check_matrix.sum(0).max(), parity_check_matrix.sum(1).max()))
for deg in parity_check_matrix.sum(0):
file.write('{} '.format(deg))
file.write('\n')
for deg in parity_check_matrix.sum(1):
file.write('{} '.format(deg))
file.write('\n')
for line in parity_check_matrix.T:
nodes = line.nonzero()[0]
for node in nodes[:-1]:
file.write('{}\t'.format(node + 1))
file.write('{}\n'.format(nodes[-1] + 1))
for col in parity_check_matrix:
nodes = col.nonzero()[0]
for node in nodes[:-1]:
file.write('{}\t'.format(node + 1))
file.write('{}\n'.format(nodes[-1] + 1))
file.write('\n')
def triang_ldpc_systematic_encode(message_bits, ldpc_code_params, pad=True):
"""
Encode bits using the LDPC code specified. If the generator matrix is not computed, this function will build it
and add it to the dictionary. It will also add the parity check matrix.
This function work only for LDPC specified by a approximate triangular parity check matrix.
Parameters
----------
message_bits : 1D-array
Message bit to encode.
ldpc_code_params : dictionary that at least contains one of these options:
Option 1: generator matrix and parity-check matrix are available.
parity_check_matrix (CSC sparse matrix of int8) - parity check matrix.
generator_matrix (2D-array or sparse matrix) - generator matrix of the code.
Option 2: generator and parity check matrices will be added as sparse matrices.
n_vnodes (int) - number of variable nodes.
n_cnodes (int) - number of check nodes.
max_cnode_deg (int) - maximal degree of a check node.
cnode_adj_list (1D-ndarray of ints) - flatten array so that
cnode_adj_list.reshape((n_cnodes, max_cnode_deg)) gives for each check node the adjacent variable nodes.
cnode_deg_list (1D-ndarray of ints) - degree of each check node.
pad : boolean
Whether to add '0' padding to the message to fit the block length.
*Default* is True.
Returns
-------
coded_message : 1D-ndarray or 2D-ndarray of int8 depending on the number of blocks
Coded message with the systematic part at the beginning.
Raises
------
ValueError
If the message length is not a multiple of block length and pad is False.
"""
if ldpc_code_params.get('generator_matrix') is None or ldpc_code_params.get('parity_check_matrix') is None:
build_matrix(ldpc_code_params)
block_length = ldpc_code_params['generator_matrix'].shape[1]
modulo = len(message_bits) % block_length
if modulo:
if pad:
message_bits = np.concatenate((message_bits, np.zeros(block_length - modulo, message_bits.dtype)))
else:
raise ValueError('Padding is disable but message length is not a multiple of block length.')
message_bits = message_bits.reshape(block_length, -1, order='F')
parity_part = ldpc_code_params['generator_matrix'].dot(message_bits) % 2
return np.vstack((message_bits, parity_part)).squeeze().astype(np.int8) | /scikit-commpy-0.8.0.tar.gz/scikit-commpy-0.8.0/commpy/channelcoding/ldpc.py | 0.795658 | 0.530723 | ldpc.py | pypi |
from __future__ import division
import functools
import math
from warnings import warn
import matplotlib.colors as mcolors
import matplotlib.patches as mpatches
import matplotlib.path as mpath
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import PatchCollection
from commpy.utilities import dec2bitarray, bitarray2dec, hamming_dist, euclid_dist
__all__ = ['Trellis', 'conv_encode', 'viterbi_decode']
class Trellis:
"""
Class defining a Trellis corresponding to a k/n - rate convolutional code.
This follow the classical representation. See [1] for instance.
Input and output are represented as little endian e.g. output = decimal(output[0], output[1] ...).
Parameters
----------
memory : 1D ndarray of ints
Number of memory elements per input of the convolutional encoder.
g_matrix : 2D ndarray of ints (decimal representation)
Generator matrix G(D) of the convolutional encoder. Each element of G(D) represents a polynomial.
Coef [i,j] is the influence of input i on output j.
feedback : 2D ndarray of ints (decimal representation), optional
Feedback matrix F(D) of the convolutional encoder. Each element of F(D) represents a polynomial.
Coef [i,j] is the feedback influence of input i on input j.
*Default* implies no feedback.
The backwards compatibility version is triggered if feedback is an int.
code_type : {'default', 'rsc'}, optional
Use 'rsc' to generate a recursive systematic convolutional code.
If 'rsc' is specified, then the first 'k x k' sub-matrix of
G(D) must represent a identity matrix along with a non-zero
feedback polynomial.
*Default* is 'default'.
polynomial_format : {'MSB', 'LSB', 'Matlab'}, optional
Defines how to interpret g_matrix and feedback. In MSB format, we have 1+D <-> 3 <-> 011.
In LSB format, which is used in Matlab, we have 1+D <-> 6 <-> 110.
*Default* is 'MSB' format.
Attributes
----------
k : int
Size of the smallest block of input bits that can be encoded using
the convolutional code.
n : int
Size of the smallest block of output bits generated using
the convolutional code.
total_memory : int
Total number of delay elements needed to implement the convolutional
encoder.
number_states : int
Number of states in the convolutional code trellis.
number_inputs : int
Number of branches from each state in the convolutional code trellis.
next_state_table : 2D ndarray of ints
Table representing the state transition matrix of the
convolutional code trellis. Rows represent current states and
columns represent current inputs in decimal. Elements represent the
corresponding next states in decimal.
output_table : 2D ndarray of ints
Table representing the output matrix of the convolutional code trellis.
Rows represent current states and columns represent current inputs in
decimal. Elements represent corresponding outputs in decimal.
Raises
------
ValueError
polynomial_format is not 'MSB', 'LSB' or 'Matlab'.
Examples
--------
>>> from numpy import array
>>> import commpy.channelcoding.convcode as cc
>>> memory = array([2])
>>> g_matrix = array([[5, 7]]) # G(D) = [1+D^2, 1+D+D^2]
>>> trellis = cc.Trellis(memory, g_matrix)
>>> print trellis.k
1
>>> print trellis.n
2
>>> print trellis.total_memory
2
>>> print trellis.number_states
4
>>> print trellis.number_inputs
2
>>> print trellis.next_state_table
[[0 2]
[0 2]
[1 3]
[1 3]]
>>>print trellis.output_table
[[0 3]
[3 0]
[1 2]
[2 1]]
References
----------
[1] S. Benedetto, R. Garello et G. Montorsi, "A search for good convolutional codes to be used in the
construction of turbo codes", IEEE Transactions on Communications, vol. 46, n. 9, p. 1101-1005, spet. 1998
"""
def __init__(self, memory, g_matrix, feedback=None, code_type='default', polynomial_format='MSB'):
[self.k, self.n] = g_matrix.shape
self.code_type = code_type
self.total_memory = memory.sum()
self.number_states = pow(2, self.total_memory)
self.number_inputs = pow(2, self.k)
self.next_state_table = np.zeros([self.number_states,
self.number_inputs], 'int')
self.output_table = np.zeros([self.number_states,
self.number_inputs], 'int')
if isinstance(feedback, int):
warn('Trellis will only accept feedback as a matrix in the future. '
'Using the backwards compatibility version that may contain bugs for k > 1 or with LSB format.',
DeprecationWarning)
if code_type == 'rsc':
for i in range(self.k):
g_matrix[i][i] = feedback
# Compute the entries in the next state table and the output table
for current_state in range(self.number_states):
for current_input in range(self.number_inputs):
outbits = np.zeros(self.n, 'int')
# Compute the values in the output_table
for r in range(self.n):
output_generator_array = np.zeros(self.k, 'int')
shift_register = dec2bitarray(current_state,
self.total_memory)
for l in range(self.k):
# Convert the number representing a polynomial into a
# bit array
generator_array = dec2bitarray(g_matrix[l][r],
memory[l] + 1)
# Loop over M delay elements of the shift register
# to compute their contribution to the r-th output
for i in range(memory[l]):
outbits[r] = (outbits[r] + \
(shift_register[i + l] * generator_array[i + 1])) % 2
output_generator_array[l] = generator_array[0]
if l == 0:
feedback_array = (dec2bitarray(feedback, memory[l] + 1)[1:] * shift_register[0:memory[l]]).sum()
shift_register[1:memory[l]] = \
shift_register[0:memory[l] - 1]
shift_register[0] = (dec2bitarray(current_input,
self.k)[0] + feedback_array) % 2
else:
feedback_array = (dec2bitarray(feedback, memory[l] + 1) *
shift_register[
l + memory[l - 1] - 1:l + memory[l - 1] + memory[l] - 1]).sum()
shift_register[l + memory[l - 1]:l + memory[l - 1] + memory[l] - 1] = \
shift_register[l + memory[l - 1] - 1:l + memory[l - 1] + memory[l] - 2]
shift_register[l + memory[l - 1] - 1] = \
(dec2bitarray(current_input, self.k)[l] + feedback_array) % 2
# Compute the contribution of the current_input to output
outbits[r] = (outbits[r] + \
(np.sum(dec2bitarray(current_input, self.k) * \
output_generator_array + feedback_array) % 2)) % 2
# Update the ouput_table using the computed output value
self.output_table[current_state][current_input] = \
bitarray2dec(outbits)
# Update the next_state_table using the new state of
# the shift register
self.next_state_table[current_state][current_input] = \
bitarray2dec(shift_register)
else:
if polynomial_format == 'MSB':
bit_order = -1
elif polynomial_format in ('LSB', 'Matlab'):
bit_order = 1
else:
raise ValueError('polynomial_format must be "LSB", "MSB" or "Matlab"')
if feedback is None:
feedback = np.identity(self.k, int)
if polynomial_format in ('LSB', 'Matlab'):
feedback *= 2**memory.max()
max_values_lign = memory.max() + 1 # Max number of value on a delay lign
# feedback_array[i] holds the i-th bit corresponding to each feedback polynomial.
feedback_array = np.zeros((max_values_lign, self.k, self.k), np.int8)
for i in range(self.k):
for j in range(self.k):
binary_view = dec2bitarray(feedback[i, j], max_values_lign)[::bit_order]
feedback_array[:max_values_lign, i, j] = binary_view[-max_values_lign-2:]
# g_matrix_array[i] holds the i-th bit corresponding to each g_matrix polynomial.
g_matrix_array = np.zeros((max_values_lign, self.k, self.n), np.int8)
for i in range(self.k):
for j in range(self.n):
binary_view = dec2bitarray(g_matrix[i, j], max_values_lign)[::bit_order]
g_matrix_array[:max_values_lign, i, j] = binary_view[-max_values_lign-2:]
# shift_regs holds on each column the state of a shift register.
# The first row is the input of each shift reg.
shift_regs = np.empty((max_values_lign, self.k), np.int8)
# Compute the entries in the next state table and the output table
for current_state in range(self.number_states):
for current_input in range(self.number_inputs):
current_state_array = dec2bitarray(current_state, self.total_memory)
# Set the first row as the input.
shift_regs[0] = dec2bitarray(current_input, self.k)
# Set the other rows based on the current_state
idx = 0
for idx_mem, mem in enumerate(memory):
shift_regs[1:mem+1, idx_mem] = current_state_array[idx:idx + mem]
idx += mem
# Compute the output table
outputs_array = np.einsum('ik,ikl->l', shift_regs, g_matrix_array) % 2
self.output_table[current_state, current_input] = bitarray2dec(outputs_array)
# Update the first line based on the feedback polynomial
np.einsum('ik,ilk->l', shift_regs, feedback_array, out=shift_regs[0])
shift_regs %= 2
# Update current state array and compute next state table
idx = 0
for idx_mem, mem in enumerate(memory):
current_state_array[idx:idx + mem] = shift_regs[:mem, idx_mem]
idx += mem
self.next_state_table[current_state, current_input] = bitarray2dec(current_state_array)
def _generate_grid(self, trellis_length):
""" Private method """
grid = np.mgrid[0.12:0.22*trellis_length:(trellis_length+1)*(0+1j),
0.1:0.5+self.number_states*0.1:self.number_states*(0+1j)].reshape(2, -1)
return grid
def _generate_states(self, trellis_length, grid, state_order, state_radius, font):
""" Private method """
state_patches = []
for state_count in range(self.number_states * trellis_length):
state_patch = mpatches.Circle(grid[:,state_count], state_radius,
color="#003399", ec="#cccccc")
state_patches.append(state_patch)
plt.text(grid[0, state_count], grid[1, state_count]-0.02,
str(state_order[state_count % self.number_states]),
ha="center", family=font, size=20, color="#ffffff")
return state_patches
def _generate_edges(self, trellis_length, grid, state_order, state_radius, edge_colors):
""" Private method """
edge_patches = []
for current_time_index in range(trellis_length-1):
grid_subset = grid[:,self.number_states * current_time_index:]
for state_count_1 in range(self.number_states):
input_count = 0
for state_count_2 in range(self.number_states):
dx = grid_subset[0, state_count_2+self.number_states] - grid_subset[0,state_count_1] - 2*state_radius
dy = grid_subset[1, state_count_2+self.number_states] - grid_subset[1,state_count_1]
if np.count_nonzero(self.next_state_table[state_order[state_count_1],:] == state_order[state_count_2]):
found_index = np.where(self.next_state_table[state_order[state_count_1]] ==
state_order[state_count_2])
edge_patch = mpatches.FancyArrow(grid_subset[0,state_count_1]+state_radius,
grid_subset[1,state_count_1], dx, dy, width=0.005,
length_includes_head = True, color = edge_colors[found_index[0][0]-1])
edge_patches.append(edge_patch)
input_count = input_count + 1
return edge_patches
def _generate_labels(self, grid, state_order, state_radius, font):
""" Private method """
for state_count in range(self.number_states):
for input_count in range(self.number_inputs):
edge_label = str(input_count) + "/" + str(
self.output_table[state_order[state_count], input_count])
plt.text(grid[0, state_count]-1.5*state_radius,
grid[1, state_count]+state_radius*(1-input_count-0.7),
edge_label, ha="center", family=font, size=14)
def visualize(self, trellis_length = 2, state_order = None,
state_radius = 0.04, edge_colors = None, save_path = None):
""" Plot the trellis diagram.
Parameters
----------
trellis_length : int, optional
Specifies the number of time steps in the trellis diagram.
Default value is 2.
state_order : list of ints, optional
Specifies the order in the which the states of the trellis
are to be displayed starting from the top in the plot.
Default order is [0,...,number_states-1]
state_radius : float, optional
Radius of each state (circle) in the plot.
Default value is 0.04
edge_colors : list of hex color codes, optional
A list of length equal to the number_inputs,
containing color codes that represent the edge corresponding
to the input.
save_path : str or None
If not None, save the figure to the file specified by its path.
*Default* is no saving.
"""
if edge_colors is None:
edge_colors = [mcolors.hsv_to_rgb((i/self.number_inputs, 1, 1)) for i in range(self.number_inputs)]
if state_order is None:
state_order = list(range(self.number_states))
font = "sans-serif"
fig = plt.figure(figsize=(12, 6), dpi=150)
ax = plt.axes([0,0,1,1])
trellis_patches = []
state_order.reverse()
trellis_grid = self._generate_grid(trellis_length)
state_patches = self._generate_states(trellis_length, trellis_grid,
state_order, state_radius, font)
edge_patches = self._generate_edges(trellis_length, trellis_grid,
state_order, state_radius,
edge_colors)
self._generate_labels(trellis_grid, state_order, state_radius, font)
trellis_patches.extend(state_patches)
trellis_patches.extend(edge_patches)
collection = PatchCollection(trellis_patches, match_original=True)
ax.add_collection(collection)
ax.set_xticks([])
ax.set_yticks([])
plt.legend(edge_patches, [str(i) + "-input" for i in range(self.number_inputs)])
plt.show()
if save_path is not None:
plt.savefig(save_path)
def visualize_fsm(self, state_order=None, state_radius=0.04, edge_colors=None, save_path=None):
""" Plot the FSM corresponding to the the trellis
This method is not intended to display large FSMs and its use is advisable only for simple trellises.
Parameters
----------
state_order : list of ints, optional
Specifies the order in the which the states of the trellis are to be displayed starting from the top in the
plot.
*Default* order is [0,...,number_states-1]
state_radius : float, optional
Radius of each state (circle) in the plot.
*Default* value is 0.04
edge_colors : list of hex color codes, optional
A list of length equal to the number_inputs, containing color codes that represent the edge corresponding to
the input.
save_path : str or None
If not None, save the figure to the file specified by its path.
*Default* is no saving.
"""
# Default arguments
if edge_colors is None:
edge_colors = [mcolors.hsv_to_rgb((i/self.number_inputs, 1, 1)) for i in range(self.number_inputs)]
if state_order is None:
state_order = list(range(self.number_states))
# Init the figure
ax = plt.axes((0, 0, 1, 1))
# Plot states
radius = state_radius * self.number_states
angles = 2 * np.pi / self.number_states * np.arange(self.number_states)
positions = [(radius * math.cos(angle), radius * math.sin(angle)) for angle in angles]
state_patches = []
arrows = []
for idx, state in enumerate(state_order):
state_patches.append(mpatches.Circle(positions[idx], state_radius, color="#003399", ec="#cccccc"))
plt.text(positions[idx][0], positions[idx][1], str(state), ha='center', va='center', size=20)
# Plot transition
for input in range(self.number_inputs):
next_state = self.next_state_table[state, input]
next_idx = (state_order == next_state).nonzero()[0][0]
output = self.output_table[state, input]
# Transition arrow
if next_state == state:
# Positions
arrow_start_x = positions[idx][0] + state_radius * math.cos(angles[idx] + math.pi / 6)
arrow_start_y = positions[idx][1] + state_radius * math.sin(angles[idx] + math.pi / 6)
arrow_end_x = positions[idx][0] + state_radius * math.cos(angles[idx] - math.pi / 6)
arrow_end_y = positions[idx][1] + state_radius * math.sin(angles[idx] - math.pi / 6)
arrow_mid_x = positions[idx][0] + state_radius * 2 * math.cos(angles[idx])
arrow_mid_y = positions[idx][1] + state_radius * 2 * math.sin(angles[idx])
# Add text
plt.text(arrow_mid_x, arrow_mid_y, '({})'.format(output),
ha='center', va='center', backgroundcolor=edge_colors[input])
else:
# Positions
dx = positions[next_idx][0] - positions[idx][0]
dy = positions[next_idx][1] - positions[idx][1]
relative_angle = math.atan(dy / dx) + np.where(dx > 0, 0, math.pi)
arrow_start_x = positions[idx][0] + state_radius * math.cos(relative_angle + math.pi * 0.05)
arrow_start_y = positions[idx][1] + state_radius * math.sin(relative_angle + math.pi * 0.05)
arrow_end_x = positions[next_idx][0] - state_radius * math.cos(relative_angle - math.pi * 0.05)
arrow_end_y = positions[next_idx][1] - state_radius * math.sin(relative_angle - math.pi * 0.05)
arrow_mid_x = (arrow_start_x + arrow_end_x) / 2 + \
radius * 0.25 * math.cos((angles[idx] + angles[next_idx]) / 2) * np.sign(dx)
arrow_mid_y = (arrow_start_y + arrow_end_y) / 2 + \
radius * 0.25 * math.sin((angles[idx] + angles[next_idx]) / 2) * np.sign(dx)
text_x = arrow_mid_x + 0.01 * math.cos((angles[idx] + angles[next_idx]) / 2)
text_y = arrow_mid_y + 0.01 * math.sin((angles[idx] + angles[next_idx]) / 2)
# Add text
plt.text(text_x, text_y, '({})'.format(output),
ha='center', va='center', backgroundcolor=edge_colors[input])
# Path creation
codes = (mpath.Path.MOVETO, mpath.Path.CURVE3, mpath.Path.CURVE3)
verts = ((arrow_start_x, arrow_start_y),
(arrow_mid_x, arrow_mid_y),
(arrow_end_x, arrow_end_y))
path = mpath.Path(verts, codes)
# Plot arrow
arrow = mpatches.FancyArrowPatch(path=path, mutation_scale=20, color=edge_colors[input])
ax.add_artist(arrow)
arrows.append(arrow)
# Format and plot
ax.set_xlim(radius * -2, radius * 2)
ax.set_ylim(radius * -2, radius * 2)
ax.add_collection(PatchCollection(state_patches, True))
plt.legend(arrows, [str(i) + "-input" for i in range(self.number_inputs)], loc='lower right')
plt.text(0, 1.5 * radius, 'Finite State Machine (output on transition)', ha='center', size=18)
plt.show()
if save_path is not None:
plt.savefig(save_path)
def conv_encode(message_bits, trellis, termination = 'term', puncture_matrix=None):
"""
Encode bits using a convolutional code.
Parameters
----------
message_bits : 1D ndarray containing {0, 1}
Stream of bits to be convolutionally encoded.
trellis: pre-initialized Trellis structure.
termination: {'cont', 'term'}, optional
Create ('term') or not ('cont') termination bits.
puncture_matrix: 2D ndarray containing {0, 1}, optional
Matrix used for the puncturing algorithm
Returns
-------
coded_bits : 1D ndarray containing {0, 1}
Encoded bit stream.
"""
k = trellis.k
n = trellis.n
total_memory = trellis.total_memory
rate = float(k)/n
code_type = trellis.code_type
if puncture_matrix is None:
puncture_matrix = np.ones((trellis.k, trellis.n))
number_message_bits = np.size(message_bits)
if termination == 'cont':
inbits = message_bits
number_inbits = number_message_bits
number_outbits = int(number_inbits/rate)
else:
# Initialize an array to contain the message bits plus the truncation zeros
if code_type == 'rsc':
inbits = message_bits
number_inbits = number_message_bits
number_outbits = int((number_inbits + k * total_memory)/rate)
else:
number_inbits = number_message_bits + total_memory + total_memory % k
inbits = np.zeros(number_inbits, 'int')
# Pad the input bits with M zeros (L-th terminated truncation)
inbits[0:number_message_bits] = message_bits
number_outbits = int(number_inbits/rate)
outbits = np.zeros(number_outbits, 'int')
if puncture_matrix is not None:
p_outbits = np.zeros(number_outbits, 'int')
else:
p_outbits = np.zeros(int(number_outbits*
puncture_matrix[0:].sum()/np.size(puncture_matrix, 1)), 'int')
next_state_table = trellis.next_state_table
output_table = trellis.output_table
# Encoding process - Each iteration of the loop represents one clock cycle
current_state = 0
j = 0
for i in range(int(number_inbits/k)): # Loop through all input bits
current_input = bitarray2dec(inbits[i*k:(i+1)*k])
current_output = output_table[current_state][current_input]
outbits[j*n:(j+1)*n] = dec2bitarray(current_output, n)
current_state = next_state_table[current_state][current_input]
j += 1
if code_type == 'rsc' and termination == 'term':
term_bits = dec2bitarray(current_state, trellis.total_memory)
term_bits = term_bits[::-1]
for i in range(trellis.total_memory):
current_input = bitarray2dec(term_bits[i*k:(i+1)*k])
current_output = output_table[current_state][current_input]
outbits[j*n:(j+1)*n] = dec2bitarray(current_output, n)
current_state = next_state_table[current_state][current_input]
j += 1
j = 0
for i in range(number_outbits):
if puncture_matrix[0][i % np.size(puncture_matrix, 1)] == 1:
p_outbits[j] = outbits[i]
j = j + 1
return p_outbits
def _where_c(inarray, rows, cols, search_value, index_array):
number_found = 0
res = np.where(inarray == search_value)
i_s, j_s = res
for i, j in zip(i_s, j_s):
if inarray[i, j] == search_value:
index_array[number_found, 0] = i
index_array[number_found, 1] = j
number_found += 1
return number_found
@functools.lru_cache(maxsize=128, typed=False)
def _compute_branch_metrics(decoding_type, _r_codeword: tuple, _i_codeword_array: tuple):
r_codeword = np.array(_r_codeword)
i_codeword_array = np.array(_i_codeword_array)
if decoding_type == 'hard':
return hamming_dist(r_codeword.astype(int), i_codeword_array.astype(int))
elif decoding_type == 'soft':
neg_LL_0 = np.log(np.exp(r_codeword) + 1) # negative log-likelihood to have received a 0
neg_LL_1 = neg_LL_0 - r_codeword # negative log-likelihood to have received a 1
return np.where(i_codeword_array, neg_LL_1, neg_LL_0).sum()
elif decoding_type == 'unquantized':
i_codeword_array = 2 * i_codeword_array - 1
return euclid_dist(r_codeword, i_codeword_array)
def _acs_traceback(r_codeword, trellis, decoding_type,
path_metrics, paths, decoded_symbols,
decoded_bits, tb_count, t, count,
tb_depth, current_number_states):
k = trellis.k
n = trellis.n
number_states = trellis.number_states
number_inputs = trellis.number_inputs
branch_metric = 0.0
next_state_table = trellis.next_state_table
output_table = trellis.output_table
pmetrics = np.empty(number_inputs)
index_array = np.empty([number_states, 2], 'int')
# Loop over all the current states (Time instant: t)
for state_num in range(current_number_states):
# Using the next state table find the previous states and inputs
# leading into the current state (Trellis)
number_found = _where_c(next_state_table, number_states, number_inputs, state_num, index_array)
# Loop over all the previous states (Time instant: t-1)
for i in range(number_found):
previous_state = index_array[i, 0]
previous_input = index_array[i, 1]
# Using the output table, find the ideal codeword
i_codeword = output_table[previous_state, previous_input]
i_codeword_array = dec2bitarray(i_codeword, n)
# Compute Branch Metrics
branch_metric = _compute_branch_metrics(decoding_type, tuple(r_codeword), tuple(i_codeword_array))
# ADD operation: Add the branch metric to the
# accumulated path metric and store it in the temporary array
pmetrics[i] = path_metrics[previous_state, 0] + branch_metric
# COMPARE and SELECT operations
# Compare and Select the minimum accumulated path metric
path_metrics[state_num, 1] = pmetrics.min()
# Store the previous state corresponding to the minimum
# accumulated path metric
min_idx = pmetrics.argmin()
paths[state_num, tb_count] = index_array[min_idx, 0]
# Store the previous input corresponding to the minimum
# accumulated path metric
decoded_symbols[state_num, tb_count] = index_array[min_idx, 1]
if t >= tb_depth - 1:
current_state = path_metrics[:,1].argmin()
# Traceback Loop
for j in reversed(range(1, tb_depth)):
dec_symbol = decoded_symbols[current_state, j]
previous_state = paths[current_state, j]
decoded_bitarray = dec2bitarray(dec_symbol, k)
decoded_bits[t - tb_depth + 1 + (j - 1) * k + count:t - tb_depth + 1 + j * k + count] = decoded_bitarray
current_state = previous_state
paths[:,0:tb_depth-1] = paths[:,1:]
decoded_symbols[:,0:tb_depth-1] = decoded_symbols[:,1:]
def viterbi_decode(coded_bits, trellis, tb_depth=None, decoding_type='hard'):
"""
Decodes a stream of convolutionally encoded bits using the Viterbi Algorithm.
Parameters
----------
coded_bits : 1D ndarray
Stream of convolutionally encoded bits which are to be decoded.
treillis : treillis object
Treillis representing the convolutional code.
tb_depth : int
Traceback depth.
*Default* is 5 times the number of memories in the code.
decoding_type : str {'hard', 'soft', 'unquantized'}
The type of decoding to be used.
'hard' option is used for hard inputs (bits) to the decoder, e.g., BSC channel.
'soft' option is used for soft inputs (LLRs) to the decoder. LLRs are clipped in [-500, 500].
'unquantized' option is used for soft inputs (real numbers) to the decoder, e.g., BAWGN channel.
Returns
-------
decoded_bits : 1D ndarray
Decoded bit stream.
Raises
------
ValueError
If decoding_type is something else than 'hard', 'soft' or 'unquantized'.
References
----------
.. [1] Todd K. Moon. Error Correction Coding: Mathematical Methods and
Algorithms. John Wiley and Sons, 2005.
"""
# k = Rows in G(D), n = columns in G(D)
k = trellis.k
n = trellis.n
rate = k/n
total_memory = trellis.total_memory
# Number of message bits after decoding
L = int(len(coded_bits)*rate)
if tb_depth is None:
tb_depth = min(5 * total_memory, L)
path_metrics = np.full((trellis.number_states, 2), np.inf)
path_metrics[0][0] = 0
paths = np.empty((trellis.number_states, tb_depth), 'int')
paths[0][0] = 0
decoded_symbols = np.zeros([trellis.number_states, tb_depth], 'int')
decoded_bits = np.empty(int(math.ceil((L + tb_depth) / k) * k), 'int')
r_codeword = np.zeros(n, 'int')
tb_count = 1
count = 0
current_number_states = trellis.number_states
if decoding_type == 'soft':
coded_bits = coded_bits.clip(-500, 500)
for t in range(1, int((L+total_memory)/k)):
# Get the received codeword corresponding to t
if t <= L // k:
r_codeword = coded_bits[(t-1)*n:t*n]
# Pad with '0'
else:
if decoding_type == 'hard':
r_codeword[:] = 0
elif decoding_type == 'soft':
r_codeword[:] = 0
elif decoding_type == 'unquantized':
r_codeword[:] = -1
else:
raise ValueError('The available decoding types are "hard", "soft" and "unquantized')
_acs_traceback(r_codeword, trellis, decoding_type, path_metrics, paths,
decoded_symbols, decoded_bits, tb_count, t, count, tb_depth,
current_number_states)
if t >= tb_depth - 1:
tb_count = tb_depth - 1
count = count + k - 1
else:
tb_count = tb_count + 1
# Path metrics (at t-1) = Path metrics (at t)
path_metrics[:, 0] = path_metrics[:, 1]
return decoded_bits[:L]
def puncturing(message: np.ndarray, punct_vec: np.ndarray) -> np.ndarray:
"""
Applying of the punctured procedure.
Parameters
----------
message : 1D ndarray
Input message {0,1} bit array.
punct_vec : 1D ndarray
Puncturing vector {0,1} bit array.
Returns
-------
punctured : 1D ndarray
Output punctured vector {0,1} bit array.
"""
shift = 0
N = len(punct_vec)
punctured = []
for idx, item in enumerate(message):
if punct_vec[idx-shift*N] == 1:
punctured.append(item)
if idx%N == 0:
shift = shift + 1
return np.array(punctured)
def depuncturing(punctured: np.ndarray, punct_vec: np.ndarray, shouldbe: int) -> np.ndarray:
"""
Applying of the inserting zeros procedure.
Parameters
----------
punctured : 1D ndarray
Input punctured message {0,1} bit array.
punct_vec : 1D ndarray
Puncturing vector {0,1} bit array.
shouldbe : int
Length of the initial message (before puncturing).
Returns
-------
depunctured : 1D ndarray
Output vector {0,1} bit array.
"""
shift = 0
shift2 = 0
N = len(punct_vec)
depunctured = np.zeros((shouldbe,))
for idx, item in enumerate(depunctured):
if punct_vec[idx - shift*N] == 1:
depunctured[idx] = float(punctured[idx-shift2])
else:
shift2 = shift2 + 1
if idx%N == 0:
shift = shift + 1
return depunctured | /scikit-commpy-0.8.0.tar.gz/scikit-commpy-0.8.0/commpy/channelcoding/convcode.py | 0.91895 | 0.631651 | convcode.py | pypi |
from math import gcd
from numpy import array, zeros, arange, convolve, ndarray, concatenate
from commpy.utilities import dec2bitarray, bitarray2dec
__all__ = ['GF', 'polydivide', 'polymultiply', 'poly_to_string']
class GF:
"""
Defines a Binary Galois Field of order m, containing n,
where n can be a single element or a list of elements within the field.
Parameters
----------
n : int
Represents the Galois field element(s).
m : int
Specifies the order of the Galois Field.
Returns
-------
x : int
A Galois Field GF(2\ :sup:`m`) object.
Examples
--------
>>> from numpy import arange
>>> from gfields import GF
>>> x = arange(16)
>>> m = 4
>>> x = GF(x, m)
>>> print x.elements
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
>>> print x.prim_poly
19
"""
# Initialization
def __init__(self, x, m):
self.m = m
primpoly_array = array([0, 3, 7, 11, 19, 37, 67, 137, 285, 529, 1033,
2053, 4179, 8219, 17475, 32771, 69643])
self.prim_poly = primpoly_array[self.m]
if type(x) is int and x >= 0 and x < pow(2, m):
self.elements = array([x])
elif type(x) is ndarray and len(x) >= 1:
self.elements = x.astype(int)
# Overloading addition operator for Galois Field
def __add__(self, x):
if len(self.elements) == len(x.elements):
return GF(self.elements ^ x.elements, self.m)
else:
raise ValueError("The arguments should have the same number of elements")
# Overloading multiplication operator for Galois Field
def __mul__(self, x):
if len(x.elements) == len(self.elements):
prod_elements = arange(len(self.elements))
for i in range(len(self.elements)):
prod_elements[i] = polymultiply(self.elements[i], x.elements[i], self.m, self.prim_poly)
return GF(prod_elements, self.m)
else:
raise ValueError("Two sets of elements cannot be multiplied")
def power_to_tuple(self):
"""
Convert Galois field elements from power form to tuple form representation.
"""
y = zeros(len(self.elements))
for idx, i in enumerate(self.elements):
if 2**i < 2**self.m:
y[idx] = 2**i
else:
y[idx] = polydivide(2**i, self.prim_poly)
return GF(y, self.m)
def tuple_to_power(self):
"""
Convert Galois field elements from tuple form to power form representation.
"""
y = zeros(len(self.elements))
for idx, i in enumerate(self.elements):
if i != 0:
init_state = 1
cur_state = 1
power = 0
while cur_state != i:
cur_state = ((cur_state << 1) & (2**self.m-1)) ^ (-((cur_state & 2**(self.m-1)) >> (self.m - 1)) &
(self.prim_poly & (2**self.m-1)))
power+=1
y[idx] = power
else:
y[idx] = 0
return GF(y, self.m)
def order(self):
"""
Compute the orders of the Galois field elements.
"""
orders = zeros(len(self.elements))
power_gf = self.tuple_to_power()
for idx, i in enumerate(power_gf.elements):
orders[idx] = (2**self.m - 1)/(gcd(i, 2**self.m-1))
return orders
def cosets(self):
"""
Compute the cyclotomic cosets of the Galois field.
"""
coset_list = []
x = self.tuple_to_power().elements
mark_list = zeros(len(x))
coset_count = 1
for idx in range(len(x)):
if mark_list[idx] == 0:
a = x[idx]
mark_list[idx] = coset_count
i = 1
while (a*(2**i) % (2**self.m-1)) != a:
for idx2 in range(len(x)):
if (mark_list[idx2] == 0) and (x[idx2] == a*(2**i)%(2**self.m-1)):
mark_list[idx2] = coset_count
i+=1
coset_count+=1
for counts in range(1, coset_count):
coset_list.append(GF(self.elements[mark_list==counts], self.m))
return coset_list
def minpolys(self):
"""
Compute the minimal polynomials for all elements of the Galois field.
"""
minpol_list = array([])
full_gf = GF(arange(2**self.m), self.m)
full_cosets = full_gf.cosets()
for x in self.elements:
for i in range(len(full_cosets)):
if x in full_cosets[i].elements:
t = array([1, full_cosets[i].elements[0]])[::-1]
for root in full_cosets[i].elements[1:]:
t2 = concatenate((zeros(len(t)-1), array([1, root]), zeros(len(t)-1)))
prod_poly = array([])
for n in range(len(t2)-len(t)+1):
root_sum = 0
for k in range(len(t)):
root_sum = root_sum ^ polymultiply(int(t[k]), int(t2[n+k]), self.m, self.prim_poly)
prod_poly = concatenate((prod_poly, array([root_sum])))
t = prod_poly[::-1]
minpol_list = concatenate((minpol_list, array([bitarray2dec(t[::-1])])))
return minpol_list.astype(int)
# Divide two polynomials and returns the remainder
def polydivide(x, y):
r = y
while len(bin(r)) >= len(bin(y)):
shift_count = len(bin(x)) - len(bin(y))
if shift_count > 0:
d = y << shift_count
else:
d = y
x = x ^ d
r = x
return r
def polymultiply(x, y, m, prim_poly):
x_array = dec2bitarray(x, m)
y_array = dec2bitarray(y, m)
prod = bitarray2dec(convolve(x_array, y_array) % 2)
return polydivide(prod, prim_poly)
def poly_to_string(x):
i = 0
polystr = ""
while x != 0:
y = x%2
x = x >> 1
if y == 1:
polystr = polystr + "x^" + str(i) + " + "
i+=1
return polystr[:-2] | /scikit-commpy-0.8.0.tar.gz/scikit-commpy-0.8.0/commpy/channelcoding/gfields.py | 0.770292 | 0.576989 | gfields.py | pypi |
# Channel codes basics
## Main idea
The main idea of the channel codes can be formulated as following thesises:
- **noise immunity** of the signal should be increased;
- **redundant bits** are added for *error detection* and *error correction*;
- some special algorithms (<u>coding schemes</u>) are used for this.
<img src="https://raw.githubusercontent.com/veeresht/CommPy/master/commpy/channelcoding/doc/assets/FECmainidea1.png" width="800" />
The fact how "further" a certain algorithm divides the code words among themselves, and determines how strongly it protects the signal from noise [1, p.23].
<img src="https://habrastorage.org/webt/n7/o4/bs/n7o4bsf7_htlv10gsatc-yojbrq.png" width="800" />
In the case of binary codes, the minimum distance between all existing code words is called **Hamming distance** and is usually denoted **dmin**:
<img src="https://raw.githubusercontent.com/veeresht/CommPy/master/commpy/channelcoding/doc/assets/FECexamp2.png" alt="examp2" width="400"/>
## Classification
Some classification is needed to talk about those or other implementations of the encoding and decoding algorithms.
First, the channel codes:
- can only [*detect*](https://en.wikipedia.org/wiki/Cyclic_redundancy_check) the presence of errors
- and they can also [*correct* errors](https://en.wikipedia.org/wiki/Error_correction_code).
Secondly, codes can be classified as **block** and **continuous**:
![](https://raw.githubusercontent.com/veeresht/CommPy/master/commpy/channelcoding/doc/assets/BlockCont.png)
## Net bit rate
Redundancy of the channel coding schemes influences (decreases) bit rate. Actually, it is the cost for the noiseless increasing.
[**Net bit rate**](https://en.wikipedia.org/wiki/Bit_rate#Information_rate) concept is usually used:
<img src="https://raw.githubusercontent.com/veeresht/CommPy/master/commpy/channelcoding/doc/assets/nebitrate.png" alt="net" width="500"/>
To change the code rate (k/n) of the block code dimensions of the Generator matrix can be changed:
![blockcoderate](https://raw.githubusercontent.com/veeresht/CommPy/master/commpy/channelcoding/doc/assets/coderateblock.png)
To change the coderate of the continuous code, e.g. convolutional code, **puncturing** procedure is frequently used:
![punct](https://raw.githubusercontent.com/veeresht/CommPy/master/commpy/channelcoding/doc/assets/punct.png)
## Example
Let us consider implematation of the **convolutional codes** as an example:
<img src="https://habrastorage.org/webt/v3/v5/w2/v3v5w2gbwk34nzk_2qt25baoebq.png" width="500"/>
*Main modeling routines: random message genaration, channel encoding, baseband modulation, additive noise (e.g. AWGN), baseband demodulation, channel decoding, BER calculation.*
```python
import numpy as np
import commpy.channelcoding.convcode as cc
import commpy.modulation as modulation
def BER_calc(a, b):
num_ber = np.sum(np.abs(a - b))
ber = np.mean(np.abs(a - b))
return int(num_ber), ber
N = 100000 #number of symbols per the frame
message_bits = np.random.randint(0, 2, N) # message
M = 4 # modulation order (QPSK)
k = np.log2(M) #number of bit per modulation symbol
modem = modulation.PSKModem(M) # M-PSK modem initialization
```
The [following](https://en.wikipedia.org/wiki/File:Conv_code_177_133.png) convolutional code will be used:
![](https://upload.wikimedia.org/wikipedia/commons/thumb/b/b3/Conv_code_177_133.png/800px-Conv_code_177_133.png)
*Shift-register for the (7, [171, 133]) convolutional code polynomial.*
Convolutional encoder parameters:
```python
generator_matrix = np.array([[5, 7]]) # generator branches
trellis = cc.Trellis(np.array([M]), generator_matrix) # Trellis structure
rate = 1/2 # code rate
L = 7 # constraint length
m = np.array([L-1]) # number of delay elements
```
Viterbi decoder parameters:
```python
tb_depth = 5*(m.sum() + 1) # traceback depth
```
Two oppitions of the Viterbi decoder will be tested:
- *hard* (hard inputs)
- *unquatized* (soft inputs)
Additionally, uncoded case will be considered.
Simulation loop:
```python
EbNo = 5 # energy per bit to noise power spectral density ratio (in dB)
snrdB = EbNo + 10*np.log10(k*rate) # Signal-to-Noise ratio (in dB)
noiseVar = 10**(-snrdB/10) # noise variance (power)
N_c = 10 # number of trials
BER_soft = np.zeros(N_c)
BER_hard = np.zeros(N_c)
BER_uncoded = np.zeros(N_c)
for cntr in range(N_c):
message_bits = np.random.randint(0, 2, N) # message
coded_bits = cc.conv_encode(message_bits, trellis) # encoding
modulated = modem.modulate(coded_bits) # modulation
modulated_uncoded = modem.modulate(message_bits) # modulation (uncoded case)
Es = np.mean(np.abs(modulated)**2) # symbol energy
No = Es/((10**(EbNo/10))*np.log2(M)) # noise spectrum density
noisy = modulated + np.sqrt(No/2)*\
(np.random.randn(modulated.shape[0])+\
1j*np.random.randn(modulated.shape[0])) # AWGN
noisy_uncoded = modulated_uncoded + np.sqrt(No/2)*\
(np.random.randn(modulated_uncoded.shape[0])+\
1j*np.random.randn(modulated_uncoded.shape[0])) # AWGN (uncoded case)
demodulated_soft = modem.demodulate(noisy, demod_type='soft', noise_var=noiseVar) # demodulation (soft output)
demodulated_hard = modem.demodulate(noisy, demod_type='hard') # demodulation (hard output)
demodulated_uncoded = modem.demodulate(noisy_uncoded, demod_type='hard') # demodulation (uncoded case)
decoded_soft = cc.viterbi_decode(demodulated_soft, trellis, tb_depth, decoding_type='unquantized') # decoding (soft decision)
decoded_hard = cc.viterbi_decode(demodulated_hard, trellis, tb_depth, decoding_type='hard') # decoding (hard decision)
NumErr, BER_soft[cntr] = BER_calc(message_bits, decoded_soft[:message_bits.size]) # bit-error ratio (soft decision)
NumErr, BER_hard[cntr] = BER_calc(message_bits, decoded_hard[:message_bits.size]) # bit-error ratio (hard decision)
NumErr, BER_uncoded[cntr] = BER_calc(message_bits, demodulated_uncoded[:message_bits.size]) # bit-error ratio (uncoded case)
mean_BER_soft = BER_soft.mean() # averaged bit-error ratio (soft decision)
mean_BER_hard = BER_hard.mean() # averaged bit-error ratio (hard decision)
mean_BER_uncoded = BER_uncoded.mean() # averaged bit-error ratio (uncoded case)
print("Soft decision:\n{}\n".format(mean_BER_soft))
print("Hard decision:\n{}\n".format(mean_BER_hard))
print("Uncoded message:\n{}\n".format(mean_BER_uncoded))
```
Outputs:
```python
Soft decision:
2.8000000000000003e-05
Hard decision:
0.0007809999999999999
Uncoded message:
0.009064
```
### Reference
[1] Moon, Todd K. "Error correction coding." Mathematical Methods and Algorithms. Jhon Wiley and Son (2005).
| /scikit-commpy-0.8.0.tar.gz/scikit-commpy-0.8.0/commpy/channelcoding/README.md | 0.565059 | 0.913484 | README.md | pypi |
import collections
import encoder
from estimator import LRWrapper, XgBoostWrapper
from utils import common
__author__ = 'jiyue'
from sklearn.metrics import confusion_matrix
from sklearn.metrics import recall_score, precision_score, accuracy_score, roc_auc_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split, cross_val_predict, StratifiedKFold, \
KFold
from sklearn.metrics import recall_score, precision_score, accuracy_score, roc_auc_score
from sklearn.dummy import DummyClassifier
from sklearn.metrics import roc_curve, auc, roc_auc_score, accuracy_score
from sklearn.linear_model import LogisticRegression
from abc import ABCMeta, abstractmethod
import pandas as pd
class Bootstrap(object):
__metaclass__ = ABCMeta
def __init__(self):
self.X_src = None
self.y_src = None
self.X_featured = None
@abstractmethod
def load_data(self,
file_path, # 训练文件或预测文件路径
fea_list, # 特征文件
target, # 目标
split_mode='ratio',
in_format='dataframe',
in_postfix='csv'
):
pass
@abstractmethod
def go_binning(self, event_identify, binning_spec_with, binned_other_value,
binned_width=5, binning_mode='ef'):
pass
@abstractmethod
def do_train(self, params, model='lr'):
pass
@abstractmethod
def train_score_card(self, feature_list, target, feature_weights_file_path, feature_data_file, output_score_file,
sep=','):
pass
@abstractmethod
def predict_score_card(self, target):
pass
@abstractmethod
def model_evaluation(self, score_col_name, target, event_identify, ks_bin_num=100):
pass
def go_bootstrap(self,
file_path,
split_mode,
in_format,
in_postfix,
fea_list,
target,
event_identify,
binning_mode,
binning_spec_with,
binned_other_value,
binned_num,
feature_method,
model,
score_col_name,
ks_bin_num):
self.load_data(file_path, fea_list, target, split_mode, in_format, in_postfix)
self.go_binning(event_identify, binning_mode, binning_spec_with, binned_other_value,
binned_num)
self.train_score_card(target, event_identify, feature_method, model)
self.predict_score_card(target)
self.model_evaluation(score_col_name, target, event_identify, ks_bin_num)
class MainBootstrap(Bootstrap):
def __init__(self):
super(MainBootstrap, self).__init__()
self.binned_X = None
self.woe = None
self.df_binned_dummy_x = None
self.weight_of_feature = None
self.fea_list = None
self.binned_range = None
self.x_train = None
self.x_test = None
self.y_train = None
self.y_test = None
self.estimator = None
def load_data(self,
file_path,
fea_list=None,
target='label',
split_mode='ratio',
in_format='dataframe',
in_postfix='csv'
):
data_frame = pd.DataFrame()
if in_format == 'dataframe' and in_postfix == 'csv':
data_frame = pd.read_csv(file_path)
elif in_format == 'dataframe' and in_postfix == 'xls':
data_frame = pd.read_excel(file_path)
else:
data0, data1 = common.load_svm_format_file(file_path)
self.y_src = data_frame[target]
if fea_list:
self.X_src = data_frame.drop([target], axis=1).loc[:fea_list]
else:
self.X_src = data_frame.drop([target], axis=1)
self.fea_list = self.X_src.columns.values.tolist()
def _do_featuring(self):
df_binned_x = pd.DataFrame(data=self.binned_X, columns=self.X_src.columns.values)
for col in self.X_src.columns.values.tolist():
df_binned_x[col] = df_binned_x[col].astype('category')
self.df_binned_dummy_x = pd.get_dummies(df_binned_x)
def go_binning(self, event_identify, binning_spec_with, binned_other_value,
binned_width=5, exclude_col=None, binning_mode='ef'):
woe_encoder = encoder.WoeEncoder(binning_mode=binning_mode, bin_width=binned_width, exclude_col=exclude_col)
woe_encoder.fit_transform(self.X_src.values, self.y_src.values)
self.binned_X = woe_encoder._X_binned
self.woe = woe_encoder._woe
self.binned_range = woe_encoder._binned_range
self._do_featuring()
def do_train(self, params, model='lr'):
self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(self.df_binned_dummy_x.values,
self.y_src, test_size=0.3)
if model == 'lr':
self.estimator = LRWrapper(params)
else:
self.estimator = XgBoostWrapper(params)
dummy_classifier = DummyClassifier(random_state=0, strategy='uniform') # 基线模型
dummy_classifier.fit(self.x_train, self.y_train)
self.estimator.do_train(self.x_train, self.y_train)
def do_predict(self):
if not self.estimator:
raise Exception(u'预测前必须先训练模型')
pred, pred_proba = self.estimator.do_predict(self.x_test)
return pred, pred_proba
def get_column_name(self, column):
column_arr = column.split('_')
num = str(column_arr[len(column_arr) - 1])
return column[:-len(num) - 1]
def output_features_weight(self, path='/home/jiyue/Desktop/output_encoding'):
columns = self.df_binned_dummy_x.columns.values
with open(path, 'w') as f:
index = 0
f.write('feature\tweight\twoe\n')
for fea_idx, features_list in enumerate(self.binned_range):
for binned_index, feature_item_range in enumerate(features_list):
col_name = self.get_column_name(columns[index])
print(col_name + '$' + str(feature_item_range[0])
+ '~' + str(feature_item_range[1])
+ '\t' + str(self.estimator.weight_of_features[index])
+ '\t' + str(self.woe[binned_index][fea_idx])
+ '\t' + str(fea_idx)
+ '\t' + str(binned_index)
+ '\n')
f.write(col_name + '$' + str(feature_item_range[0])
+ '~' + str(feature_item_range[1])
+ '\t' + str(self.estimator.weight_of_features[index])
+ '\t' + str(self.woe[binned_index][fea_idx])
+ '\n')
index += 1
def train_score_card(self, feature_list, target,
feature_weights_file_path, feature_data_file, output_score_file,
sep=','):
df = pd.read_csv(feature_data_file, sep=sep)
with open(output_score_file, 'w') as f:
feature_list.append('label')
feature_list.append('prob')
feature_list.append('zengxin_score')
feature_list_str = ' '.join(feature_list)
f.write(feature_list_str + '\n')
for index, item in df.iterrows():
sample = collections.OrderedDict()
for feature in feature_list:
if feature in ('prob', 'zengxin_score'):
continue
sample[feature] = item[feature]
common.cal_user_query_score(feature_weights_file_path, f, sample)
def model_evaluation(self, score_col_name, target, event_identify, ks_bin_num=100):
pass
def predict_score_card(self, target):
pass | /scikit-credit-0.0.23.tar.gz/scikit-credit-0.0.23/scikit_credit/framework/bootstrap.py | 0.599954 | 0.198919 | bootstrap.py | pypi |
import math
from sklearn.base import TransformerMixin
from sklearn.utils.multiclass import type_of_target
import numpy as np
from scipy import stats
import pandas as pd
__author__ = 'jiyue'
class WoeEncoder(TransformerMixin):
def __init__(self,
binning_mode='ew',
bin_width=5,
bin_cols=None,
woe_min=-20,
woe_max=20,
binned_spec_width=None,
exclude_col=None
):
self._binning_mode = binning_mode
self._bin_width = bin_width
self._bin_cols = bin_cols
self._WOE_MIN = woe_min
self._WOE_MAX = woe_max
self._features_woe = None
self._features_iv = None
self._X_binned = None
self._woe = None
self._binned_range = list()
self._binned_spec_width = binned_spec_width
def _check_target_type(self, y):
type_y = type_of_target(y)
if type_y not in ['binary']:
raise ValueError('y must be binary variable')
def _count_binary(self, x, event=1):
event_count = (x == event).sum()
non_event_count = x.shape[-1] - event_count
return event_count, non_event_count
def _compute_woe_iv(self, x, y, event=1):
self._check_target_type(y)
event_count, non_event_count = self._count_binary(y, event)
x_labels = np.unique(x)
woe_dict = dict()
iv = 0
for x1 in x_labels:
y1 = y[np.where(x == x1)[0]]
event_count_infeature, non_event_count_infeature = self._count_binary(y1, event)
rate_event = 1.0 * event_count_infeature / event_count
rate_non_event = 1.0 * non_event_count_infeature / non_event_count
if rate_event == 0:
woe1 = self._WOE_MIN
elif rate_non_event == 0:
woe1 = self._WOE_MAX
else:
woe1 = math.log(rate_event / rate_non_event)
woe_dict[x1] = woe1
iv += (rate_event - rate_non_event) * woe1
return woe_dict, iv
def _woe_compute(self, X, y, event=1):
self._check_target_type(y)
self._do_binning(X)
tmp_features_woe = list()
tmp_features_iv = list()
for i in range(self._X_binned.shape[-1]):
x = self._X_binned[:, i]
woe_dict, iv = self._compute_woe_iv(x, y, event)
tmp_features_woe.append(woe_dict)
tmp_features_iv.append(iv)
self._features_woe = np.array(tmp_features_woe)
self._features_iv = np.array(tmp_features_iv)
def eq_freq_binning(self, X, index_X):
res = np.array([1] * X.shape[-1], dtype=int)
bining_range = list()
bin_index = 0
q = 100 / self._bin_width
binned_categories = pd.qcut(X, q, duplicates='drop')
interval_list = binned_categories.categories.values.tolist()
for interval in interval_list:
left_point = round(interval.left, 3)
right_point = round(interval.right, 3)
X1 = X[np.where((X >= left_point) & (X < right_point))]
mask = np.in1d(X, X1)
res[mask] = bin_index + 1
bin_index += 1
bining_range.append([left_point, right_point])
return res, bining_range
def eq_width_binning2(self, X, index_X):
res = np.array([1] * X.shape[-1], dtype=int)
bining_range = list()
bin_index = 0
binned_categories = pd.cut(X, bins=self._bin_width)
interval_list = binned_categories.categories.values.tolist()
for interval in interval_list:
left_point = int(math.ceil(interval.left))
right_point = int(math.ceil(interval.right))
X1 = X[np.where((X >= left_point) & (X < right_point))]
mask = np.in1d(X, X1)
res[mask] = bin_index + 1
bin_index += 1
bining_range.append([left_point, right_point])
return res, bining_range
# 这里实现等宽分箱
def eq_width_binning(self, X, index_X):
res = np.array([1] * X.shape[-1], dtype=int)
bining_range = list()
bin_index = 0
if not self._binned_spec_width:
percentile = 100 / self._bin_width
else:
percentile = 100 / self._binned_spec_width[index_X]
i = 0
flag = False
X_target = X
while True and i < self._bin_width:
left_point = stats.scoreatpercentile(X_target, i * percentile)
right_point = stats.scoreatpercentile(X_target, (i + 1) * percentile)
i += 1
if left_point == right_point and left_point == 0:
flag = False
continue
if not flag:
X_target = X[np.where(X >= right_point)]
flag = True
i = 0
X1 = X[np.where((X >= left_point) & (X < right_point))]
mask = np.in1d(X, X1)
res[mask] = bin_index + 1
bin_index += 1
print np.unique(res)
bining_range.append([left_point, right_point])
continue
X1 = X[np.where((X >= left_point) & (X < right_point))]
mask = np.in1d(X, X1)
res[mask] = bin_index + 1
bin_index += 1
print np.unique(res)
bining_range.append([left_point, right_point])
return res, bining_range
def _do_smooth(self, X, i):
if self._binning_mode == 'ew': # 等宽分箱
return self.eq_width_binning2(X, i)
else: # 等频分箱
return self.eq_freq_binning(X, i)
def _do_binning(self, X):
tmp = list()
for i in range(X.shape[-1]):
x = X[:, i]
x_discrete, bining_range = self._do_smooth(x, i)
tmp.append(x_discrete)
self._binned_range.append(bining_range)
self._X_binned = np.array(tmp).T
def _convert_to_woe(self, X_binned, woe_arr):
if X_binned.shape[-1] != woe_arr.shape[-1]:
raise ValueError('dimension is not consistence')
self._woe = np.copy(X_binned).astype(float)
idx = 0
for woe_dict in woe_arr:
for k in woe_dict.keys():
woe = woe_dict[k]
self._woe[:, idx][np.where(self._woe[:, idx] == k)[0]] = woe * 1.0
idx += 1
return self._woe
def fit(self, X, y):
self._woe_compute(X, y)
return self
def transform(self, X):
return self._convert_to_woe(self._X_binned, self._features_woe) | /scikit-credit-0.0.23.tar.gz/scikit-credit-0.0.23/scikit_credit/encoder/risk_encoder.py | 0.576423 | 0.204322 | risk_encoder.py | pypi |
__author__ = 'jiyue'
import pandas as pd
import math
from sklearn.datasets import dump_svmlight_file, load_svmlight_file
from sklearn.externals.joblib import Memory
import numpy as np
mem = Memory("~/.svmmem")
def compute_missing_pct(dataframe, dtype):
dataframe.select_dtypes(include=[dtype]).describe().T \
.assign(missing_pct=dataframe.apply(lambda x: (len(x) - x.count()) / float(len(x))))
def dump_to_svm_format_file(data_frame, label='label', file_name='svm-output.libsvm'):
dummy = pd.get_dummies(data_frame)
y = data_frame[label]
mat = dummy.as_matrix()
dump_svmlight_file(mat, y, file_name)
def load_svm_format_file(path):
data = load_svmlight_file(path)
return data[0], data[1]
def cal_sigmod(x):
one = 1.0
return one / (one + math.exp(-x))
def cal_predict_prob(weights):
inner = 0.0
for i in weights:
inner += i
return cal_sigmod(inner)
def cal_aplus_score(pred):
BASE_SCORE = 600
init_score = BASE_SCORE - 20 / math.log(2) * math.log(60) + 20 / math.log(2) * math.log(
(1 - pred) / (pred + 0.000001))
return int(init_score)
def cal_weight(encoding_file_path, header=False):
res = dict()
with open(encoding_file_path, 'r') as f:
list_all_data = f.readlines()
for idx, item in enumerate(list_all_data):
if not header and idx == 0:
continue
sign = item.split('\t')[0]
weight = item.split('\t')[1]
feature_name = sign.split('$')[0]
feature_binning_range = sign.split('$')[1]
if feature_name not in res:
res[feature_name] = dict()
res[feature_name]['bins'] = list()
res[feature_name]['weight'] = list()
res[feature_name]['bins'].append(
(feature_binning_range.split('~')[0], feature_binning_range.split('~')[1]))
res[feature_name]['weight'].append(weight)
return res
def cal_user_query_score(encoding_file_path, output_file_handler, sample, estimator=None):
weight_list = list()
mapping_res = cal_weight(encoding_file_path)
for feature_name, feature_item in mapping_res.iteritems():
for fea_key, fea_value in sample.iteritems():
if feature_name == fea_key:
bins_range = mapping_res[feature_name]['bins']
for index, item_range in enumerate(bins_range):
if fea_value >= float(item_range[0]) and fea_value < float(item_range[1]):
weight_list.append(float(mapping_res[feature_name]['weight'][index]))
if not estimator:
prob = cal_predict_prob(weight_list)
else:
columns = list()
data = list()
for col_name, col_value in sample.iteritems():
columns.append(col_name)
data.append(col_value)
prob = estimator.do_predict(np.array(data))[1]
score = cal_aplus_score(prob)
for k, v in sample.iteritems():
print str(k) + ':' + str(v) + ' '
output_file_handler.write(str(v) + ' ')
output_file_handler.write(str(round(prob, 3)) + ' ')
output_file_handler.write(str(score) + '\n')
print score
return score | /scikit-credit-0.0.23.tar.gz/scikit-credit-0.0.23/scikit_credit/utils/common.py | 0.44746 | 0.253024 | common.py | pypi |
# Changelog of Scikit-Criteria
<!-- BODY -->
## Version 0.8.3
- Fixed a bug detected on the EntropyWeighted, Now works as the literature
specifies
## Version 0.8.2
- We bring back Python 3.7 because is the version used in google.colab.
- Bugfixes in `plot.frontier` and `dominance.eq`.
## Version 0.8
- **New** The `skcriteria.cmp` package utilities to compare rankings.
- **New** The new package `skcriteria.datasets` include two datasets (one a
toy and one real) to quickly start your experiments.
- **New** DecisionMatrix now can be sliced with a syntax similar of the
pandas.DataFrame.
- `dm["c0"]` cut the *c0* criteria.
- `dm[["c0", "c2"]` cut the criteria *c0* and *c2*.
- `dm.loc["a0"]` cut the alternative *a0*.
- `dm.loc[["a0", "a1"]]` cut the alternatives *a0* and *a1*.
- `dm.iloc[0:3]` cuts from the first to the third alternative.
- **New** imputation methods for replacing missing data with substituted
values. These methods are in the module `skcriteria.preprocessing.impute`.
- **New** results object now has a `to_series` method.
- **Changed Behaviour**: The ranks and kernels `equals` are now called
`values_equals`. The new `aequals` support tolerances to compare
numpy arrays internally stored in `extra_`, and the `equals` method is
equivalent to `aequals(rtol=0, atol=0)`.
- We detected a bad behavior in ELECTRE2, so we decided to launch a `FutureWarning` when the
class is instantiated. In the version after 0.8, a new implementation of ELECTRE2 will be
provided.
- Multiple `__repr__` was improved to folow the
[Python recomendation](https://docs.python.org/3/library/functions.html#repr)
- `Critic` weighter was renamed to `CRITIC` (all capitals) to be consistent
with the literature. The old class is still there but is deprecated.
- All the functions and classes of `skcriteria.preprocessing.distance` was
moved to `skcriteria.preprocessing.scalers`.
- The `StdWeighter` now uses the **sample** standar-deviation.
From the numerical point of view, this does not generate any change,
since the deviations are scaled by the sum. Computationally speaking there
may be some difference from the ~5th decimal digit onwards.
- Two method of the `Objective` enum was deprecated and replaced:
- `Objective.construct_from_alias()` `->` `Objective.from_alias()` (*classmethod*)
- `Objective.to_string()` `->` `Objective.to_symbol()`
The deprecated methods will be removed in version *1.0*.
- Add a dominance plot `DecisionMatrix.plot.dominance()`.
- `WeightedSumModel` raises a `ValueError` when some value *< 0*.
- Moved internal modules
- `skcriteria.core.methods.SKCTransformerABC` `->`
`skcriteria.preprocessing.SKCTransformerABC`
- `skcriteria.core.methods.SKCMatrixAndWeightTransformerABC` `->`
`skcriteria.preprocessing.SKCMatrixAndWeightTransformerABC`
## Version 0.7
- **New method**: `ELECTRE2`.
- **New preprocessing strategy:** A new way to transform from minimization to
maximization criteria: `NegateMinimize()` which reverses the sign of the
values of the criteria to be minimized (useful for not breaking distance
relations in methods like *TOPSIS*). Additionally the previous we rename the
`MinimizeToMaximize()` transformer to `InvertMinimize()`.
- Now the `RankingResult`, support repeated/tied rankings and some methods were
implemented to deal with these cases.
- `RankingResult.has_ties_` to see if there are tied values.
- `RankingResult.ties_` to see how often values are repeated.
- `RankingResult.untied_rank_` to get a ranking with no repeated values.
repeated values.
- `KernelResult` now implements several new properties:
- `kernel_alternatives_` to know which alternatives are in the kernel.
- `kernel_size_` to know the number of alternatives in the kernel.
- `kernel_where_` was replaced by `kernelwhere_` to standardize the api.
## Version 0.6
- Support for Python 3.10.
- All the objects of the project are now immutable by design, and can only
be mutated troughs the `object.copy()` method.
- Dominance analysis tools (`DecisionMatrix.dominance`).
- The method `DecisionMatrix.describe()` was deprecated and will be removed
in version *1.0*.
- New statistics functionalities `DecisionMatrix.stats` accessor.
- The accessors are now cached in the `DecisionMatrix`.
- Tutorial for dominance and satisfaction analysis.
- TOPSIS now support hyper-parameters to select different metrics.
- Generalize the idea of accessors in scikit-criteria througth a common
framework (`skcriteria.utils.accabc` module).
- New deprecation mechanism through the
- `skcriteria.utils.decorators.deprecated` decorator.
## Version 0.5
In this version scikit-criteria was rewritten from scratch. Among other things:
- The model implementation API was simplified.
- The `Data` object was removed in favor of `DecisionMatrix` which implements many more useful features for MCDA.
- Plots were completely re-implemented using [Seaborn](http://seaborn.pydata.org/).
- Coverage was increased to 100%.
- Pipelines concept was added (Thanks to [Scikit-learn](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html)).
- New documentation. The quick start is totally rewritten!
**Full Changelog**: https://github.com/quatrope/scikit-criteria/commits/0.5
## Version 0.2
First OO stable version.
## Version 0.1
Only functions.
| /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/CHANGELOG.md | 0.871803 | 0.680574 | CHANGELOG.md | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""The Module implements utilities to build a composite decision-maker."""
# =============================================================================
# IMPORTS
# =============================================================================
from .core import SKCMethodABC
from .utils import Bunch, unique_names
# =============================================================================
# CLASS
# =============================================================================
class SKCPipeline(SKCMethodABC):
"""Pipeline of transforms with a final decision-maker.
Sequentially apply a list of transforms and a final decisionmaker.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement `transform` method.
The final decision-maker only needs to implement `evaluate`.
The purpose of the pipeline is to assemble several steps that can be
applied together while setting different parameters.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing evaluate/transform)
that are chained, in the order in which they are chained, with the last
object an decision-maker.
See Also
--------
skcriteria.pipeline.mkpipe : Convenience function for simplified
pipeline construction.
"""
_skcriteria_dm_type = "pipeline"
_skcriteria_parameters = ["steps"]
def __init__(self, steps):
steps = list(steps)
self._validate_steps(steps)
self._steps = steps
# INTERNALS ===============================================================
def _validate_steps(self, steps):
for name, step in steps[:-1]:
if not isinstance(name, str):
raise TypeError("step names must be instance of str")
if not (hasattr(step, "transform") and callable(step.transform)):
raise TypeError(
f"step '{name}' must implement 'transform()' method"
)
name, dmaker = steps[-1]
if not isinstance(name, str):
raise TypeError("step names must be instance of str")
if not (hasattr(dmaker, "evaluate") and callable(dmaker.evaluate)):
raise TypeError(
f"step '{name}' must implement 'evaluate()' method"
)
# PROPERTIES ==============================================================
@property
def steps(self):
"""List of steps of the pipeline."""
return list(self._steps)
@property
def named_steps(self):
"""Dictionary-like object, with the following attributes.
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
"""
return Bunch("steps", dict(self.steps))
# DUNDERS =================================================================
def __len__(self):
"""Return the length of the Pipeline."""
return len(self._steps)
def __getitem__(self, ind):
"""Return a sub-pipeline or a single step in the pipeline.
Indexing with an integer will return an step; using a slice
returns another Pipeline instance which copies a slice of this
Pipeline. This copy is shallow: modifying steps in the sub-pipeline
will affect the larger pipeline and vice-versa.
However, replacing a value in `step` will not affect a copy.
"""
if isinstance(ind, slice):
if ind.step not in (1, None):
cname = type(self).__name__
raise ValueError(f"{cname} slicing only supports a step of 1")
return self.__class__(self.steps[ind])
elif isinstance(ind, int):
return self.steps[ind][-1]
elif isinstance(ind, str):
return self.named_steps[ind]
raise KeyError(ind)
# API =====================================================================
def evaluate(self, dm):
"""Run the all the transformers and the decision maker.
Parameters
----------
dm: :py:class:`skcriteria.data.DecisionMatrix`
Decision matrix on which the result will be calculated.
Returns
-------
r : Result
Whatever the last step (decision maker) returns from their evaluate
method.
"""
dm = self.transform(dm)
_, dmaker = self.steps[-1]
result = dmaker.evaluate(dm)
return result
def transform(self, dm):
"""Run the all the transformers.
Parameters
----------
dm: :py:class:`skcriteria.data.DecisionMatrix`
Decision matrix on which the transformations will be applied.
Returns
-------
dm: :py:class:`skcriteria.data.DecisionMatrix`
Transformed decision matrix.
"""
for _, step in self.steps[:-1]:
dm = step.transform(dm)
return dm
# =============================================================================
# FACTORY
# =============================================================================
def mkpipe(*steps):
"""Construct a Pipeline from the given transformers and decision-maker.
This is a shorthand for the SKCPipeline constructor; it does not require,
and does not permit, naming the estimators. Instead, their names will
be set to the lowercase of their types automatically.
Parameters
----------
*steps: list of transformers and decision-maker object
List of the scikit-criteria transformers and decision-maker
that are chained together.
Returns
-------
p : SKCPipeline
Returns a scikit-criteria :class:`SKCPipeline` object.
"""
names = [type(step).__name__.lower() for step in steps]
named_steps = unique_names(names=names, elements=steps)
return SKCPipeline(named_steps) | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/pipeline.py | 0.932029 | 0.570212 | pipeline.py | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""Some simple and compensatory methods."""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
from ._madm_base import RankResult, SKCDecisionMakerABC
from ..core import Objective
from ..utils import doc_inherit, rank
# =============================================================================
# SAM
# =============================================================================
def wsm(matrix, weights):
"""Execute weighted sum model without any validation."""
# calculate ranking by inner prodcut
rank_mtx = np.inner(matrix, weights)
score = np.squeeze(rank_mtx)
return rank.rank_values(score, reverse=True), score
class WeightedSumModel(SKCDecisionMakerABC):
r"""The weighted sum model.
WSM is the best known and simplest multi-criteria decision analysis for
evaluating a number of alternatives in terms of a number of decision
criteria. It is very important to state here that it is applicable only
when all the data are expressed in exactly the same unit. If this is not
the case, then the final result is equivalent to "adding apples and
oranges". To avoid this problem a previous normalization step is necessary.
In general, suppose that a given MCDA problem is defined on :math:`m`
alternatives and :math:`n` decision criteria. Furthermore, let us assume
that all the criteria are benefit criteria, that is, the higher the values
are, the better it is. Next suppose that :math:`w_j` denotes the relative
weight of importance of the criterion :math:`C_j` and :math:`a_{ij}` is
the performance value of alternative :math:`A_i` when it is evaluated in
terms of criterion :math:`C_j`. Then, the total (i.e., when all the
criteria are considered simultaneously) importance of alternative
:math:`A_i`, denoted as :math:`A_{i}^{WSM-score}`, is defined as follows:
.. math::
A_{i}^{WSM-score} = \sum_{j=1}^{n} w_j a_{ij},\ for\ i = 1,2,3,...,m
For the maximization case, the best alternative is the one that yields
the maximum total performance value.
Raises
------
ValueError:
If some objective is for minimization.
References
----------
:cite:p:`fishburn1967letter`, :cite:p:`enwiki:1033561221`,
:cite:p:`tzeng2011multiple`
"""
_skcriteria_parameters = []
@doc_inherit(SKCDecisionMakerABC._evaluate_data)
def _evaluate_data(self, matrix, weights, objectives, **kwargs):
if Objective.MIN.value in objectives:
raise ValueError(
"WeightedSumModel can't operate with minimize objective"
)
if np.any(matrix < 0):
raise ValueError("WeightedSumModel can't operate with values < 0")
rank, score = wsm(matrix, weights)
return rank, {"score": score}
@doc_inherit(SKCDecisionMakerABC._make_result)
def _make_result(self, alternatives, values, extra):
return RankResult(
"WeightedSumModel",
alternatives=alternatives,
values=values,
extra=extra,
)
# =============================================================================
# WPROD
# =============================================================================
def wpm(matrix, weights):
"""Execute weighted product model without any validation."""
# instead of multiply we sum the logarithms
lmtx = np.log10(matrix)
# add the weights to the mtx
rank_mtx = np.multiply(lmtx, weights)
score = np.sum(rank_mtx, axis=1)
return rank.rank_values(score, reverse=True), score
class WeightedProductModel(SKCDecisionMakerABC):
r"""The weighted product model.
WPM is a popular multi-criteria decision
analysis method. It is similar to the weighted sum model.
The main difference is that instead of addition in the main mathematical
operation now there is multiplication.
In general, suppose that a given MCDA problem is defined on :math:`m`
alternatives and :math:`n` decision criteria. Furthermore, let us assume
that all the criteria are benefit criteria, that is, the higher the values
are, the better it is. Next suppose that :math:`w_j` denotes the relative
weight of importance of the criterion :math:`C_j` and :math:`a_{ij}` is
the performance value of alternative :math:`A_i` when it is evaluated in
terms of criterion :math:`C_j`. Then, the total (i.e., when all the
criteria are considered simultaneously) importance of alternative
:math:`A_i`, denoted as :math:`A_{i}^{WPM-score}`, is defined as follows:
.. math::
A_{i}^{WPM-score} = \prod_{j=1}^{n} a_{ij}^{w_j},\ for\ i = 1,2,3,...,m
To avoid underflow, instead the multiplication of the values we add the
logarithms of the values; so :math:`A_{i}^{WPM-score}`,
is finally defined as:
.. math::
A_{i}^{WPM-score} = \sum_{j=1}^{n} w_j \log(a_{ij}),\
for\ i = 1,2,3,...,m
For the maximization case, the best alternative is the one that yields
the maximum total performance value.
Raises
------
ValueError:
If some objective is for minimization or some value in the matrix
is <= 0.
References
----------
:cite:p:`bridgman1922dimensional`
:cite:p:`miller1963executive`
"""
_skcriteria_parameters = []
@doc_inherit(SKCDecisionMakerABC._evaluate_data)
def _evaluate_data(self, matrix, weights, objectives, **kwargs):
if Objective.MIN.value in objectives:
raise ValueError(
"WeightedProductModel can't operate with minimize objective"
)
if np.any(matrix <= 0):
raise ValueError(
"WeightedProductModel can't operate with values <= 0"
)
rank, score = wpm(matrix, weights)
return rank, {"score": score}
@doc_inherit(SKCDecisionMakerABC._make_result)
def _make_result(self, alternatives, values, extra):
return RankResult(
"WeightedProductModel",
alternatives=alternatives,
values=values,
extra=extra,
) | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/madm/simple.py | 0.870253 | 0.68115 | simple.py | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""Core functionalities to create madm decision-maker classes."""
# =============================================================================
# imports
# =============================================================================
import abc
from collections import Counter
import numpy as np
import pandas as pd
from ..core import SKCMethodABC
from ..utils import Bunch, deprecated, doc_inherit
# =============================================================================
# DM BASE
# =============================================================================
class SKCDecisionMakerABC(SKCMethodABC):
"""Abstract class for all decisor based methods in scikit-criteria."""
_skcriteria_abstract_class = True
_skcriteria_dm_type = "decision_maker"
@abc.abstractmethod
def _evaluate_data(self, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def _make_result(self, alternatives, values, extra):
raise NotImplementedError()
def evaluate(self, dm):
"""Validate the dm and calculate and evaluate the alternatives.
Parameters
----------
dm: :py:class:`skcriteria.data.DecisionMatrix`
Decision matrix on which the ranking will be calculated.
Returns
-------
:py:class:`skcriteria.data.RankResult`
Ranking.
"""
data = dm.to_dict()
result_data, extra = self._evaluate_data(**data)
alternatives = data["alternatives"]
result = self._make_result(
alternatives=alternatives, values=result_data, extra=extra
)
return result
# =============================================================================
# RESULTS
# =============================================================================
class ResultABC(metaclass=abc.ABCMeta):
"""Base class to implement different types of results.
Any evaluation of the DecisionMatrix is expected to result in an object
that extends the functionalities of this class.
Parameters
----------
method: str
Name of the method that generated the result.
alternatives: array-like
Names of the alternatives evaluated.
values: array-like
Values assigned to each alternative by the method, where the i-th
value refers to the valuation of the i-th. alternative.
extra: dict-like
Extra information provided by the method regarding the evaluation of
the alternatives.
"""
_skcriteria_result_series = None
def __init_subclass__(cls):
"""Validate if the subclass are well formed."""
result_column = cls._skcriteria_result_series
if result_column is None:
raise TypeError(f"{cls} must redefine '_skcriteria_result_series'")
def __init__(self, method, alternatives, values, extra):
self._validate_result(values)
self._method = str(method)
self._extra = Bunch("extra", extra)
self._result_series = pd.Series(
values,
index=pd.Index(alternatives, name="Alternatives", copy=True),
name=self._skcriteria_result_series,
copy=True,
)
@abc.abstractmethod
def _validate_result(self, values):
"""Validate that the values are the expected by the result type."""
raise NotImplementedError()
@property
def values(self):
"""Values assigned to each alternative by the method.
The i-th value refers to the valuation of the i-th. alternative.
"""
return self._result_series.to_numpy(copy=True)
@property
def method(self):
"""Name of the method that generated the result."""
return self._method
@property
def alternatives(self):
"""Names of the alternatives evaluated."""
return self._result_series.index.to_numpy(copy=True)
@property
def extra_(self):
"""Additional information about the result.
Note
----
``e_`` is an alias for this property
"""
return self._extra
e_ = extra_
# UTILS ===================================================================
def to_series(self):
"""The result as `pandas.Series`."""
series = self._result_series.copy(deep=True)
series.index = self._result_series.index.copy(deep=True)
return series
# CMP =====================================================================
@property
def shape(self):
"""Tuple with (number_of_alternatives, ).
rank.shape <==> np.shape(rank)
"""
return np.shape(self._result_series)
def __len__(self):
"""Return the number ot alternatives.
rank.__len__() <==> len(rank).
"""
return len(self._result_series)
def values_equals(self, other):
"""Check if the alternatives and ranking are the same.
The method doesn't check the method or the extra parameters.
"""
return (self is other) or (
isinstance(other, type(self))
and self._result_series.equals(other._result_series)
)
def aequals(self, other, rtol=1e-05, atol=1e-08, equal_nan=False):
"""Return True if the result are equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
NaNs are treated as equal if they are in the same place and if
``equal_nan=True``. Infs are treated as equal if they are in the same
place and of the same sign in both arrays.
The proceeds as follows:
- If ``other`` is the same object return ``True``.
- If ``other`` is not instance of 'DecisionMatrix', has different shape
'criteria', 'alternatives' or 'objectives' returns ``False``.
- Next check the 'weights' and the matrix itself using the provided
tolerance.
Parameters
----------
other : Result
Other result to compare.
rtol : float
The relative tolerance parameter
(see Notes in :py:func:`numpy.allclose`).
atol : float
The absolute tolerance parameter
(see Notes in :py:func:`numpy.allclose`).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in dm will be
considered equal to NaN's in `other` in the output array.
Returns
-------
aequals : :py:class:`bool:py:class:`
Returns True if the two result are equal within the given
tolerance; False otherwise.
See Also
--------
equals, :py:func:`numpy.isclose`, :py:func:`numpy.all`,
:py:func:`numpy.any`, :py:func:`numpy.equal`,
:py:func:`numpy.allclose`.
"""
if self is other:
return True
is_veq = self.values_equals(other) and set(self._extra) == set(
other._extra
)
keys = set(self._extra)
while is_veq and keys:
k = keys.pop()
sv = self._extra[k]
ov = other._extra[k]
if isinstance(ov, np.ndarray):
is_veq = is_veq and np.allclose(
sv,
ov,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
)
else:
is_veq = is_veq and sv == ov
return is_veq
def equals(self, other):
"""Return True if the results are equal.
This method calls `aquals` without tolerance.
Parameters
----------
other : :py:class:`skcriteria.DecisionMatrix`
Other instance to compare.
Returns
-------
equals : :py:class:`bool:py:class:`
Returns True if the two results are equals.
See Also
--------
aequals, :py:func:`numpy.isclose`, :py:func:`numpy.all`,
:py:func:`numpy.any`, :py:func:`numpy.equal`,
:py:func:`numpy.allclose`.
"""
return self.aequals(other, 0, 0, False)
def __eq__(self, other):
"""x.__eq__(y) <==> x == y."""
return self.equals(other)
def __ne__(self, other):
"""x.__eq__(y) <==> x == y."""
return not self == other
# REPR ====================================================================
def __repr__(self):
"""result.__repr__() <==> repr(result)."""
kwargs = {"show_dimensions": False}
# retrieve the original string
df = self._result_series.to_frame().T
original_string = df.to_string(**kwargs)
# add dimension
string = f"{original_string}\n[Method: {self.method}]"
return string
def _repr_html_(self):
"""Return a html representation for a particular result.
Mainly for IPython notebook.
"""
df = self._result_series.to_frame().T
original_html = df.style._repr_html_()
rtype = self._skcriteria_result_series.lower()
# add metadata
html = (
f"<div class='skcresult-{rtype} skcresult'>\n"
f"{original_html}"
f"<em class='skcresult-method'>Method: {self.method}</em>\n"
"</div>"
)
return html
@doc_inherit(ResultABC, warn_class=False)
class RankResult(ResultABC):
"""Ranking of alternatives.
This type of results is used by methods that generate a ranking of
alternatives.
"""
_skcriteria_result_series = "Rank"
@doc_inherit(ResultABC._validate_result)
def _validate_result(self, values):
cleaned_values = np.unique(values)
length = len(cleaned_values)
expected = np.arange(length) + 1
if not np.array_equal(np.sort(cleaned_values), expected):
raise ValueError(f"The data {values} doesn't look like a ranking")
@property
def has_ties_(self):
"""Return True if two alternatives shares the same ranking."""
values = self.values
return len(np.unique(values)) != len(values)
@property
def ties_(self):
"""Counter object that counts how many times each value appears."""
return Counter(self.values)
@property
def rank_(self):
"""Alias for ``values``."""
return self.values
@property
def untied_rank_(self):
"""Ranking whitout ties.
if the ranking has ties this property assigns unique and consecutive
values in the ranking. This method only assigns the values using the
command ``numpy.argsort(rank_) + 1``.
"""
if self.has_ties_:
return np.argsort(self.rank_) + 1
return self.rank_
def to_series(self, *, untied=False):
"""The result as `pandas.Series`."""
if untied:
return pd.Series(
self.untied_rank_,
index=self._result_series.index.copy(deep=True),
copy=True,
name="Untied rank",
)
return super().to_series()
@doc_inherit(ResultABC, warn_class=False)
class KernelResult(ResultABC):
"""Separates the alternatives between good (kernel) and bad.
This type of results is used by methods that select which alternatives
are good and bad. The good alternatives are called "kernel"
"""
_skcriteria_result_series = "Kernel"
@doc_inherit(ResultABC._validate_result)
def _validate_result(self, values):
if np.asarray(values).dtype != bool:
raise ValueError(f"The data {values} doesn't look like a kernel")
@property
def kernel_(self):
"""Alias for ``values``."""
return self.values
@property
def kernel_size_(self):
"""How many alternatives has the kernel."""
return np.sum(self.kernel_)
@property
def kernel_where_(self):
"""Indexes of the alternatives that are part of the kernel."""
return np.where(self.kernel_)[0]
@property
@deprecated(
reason=("Use ``kernel_where_`` instead"),
version=0.7,
)
def kernelwhere_(self):
"""Indexes of the alternatives that are part of the kernel."""
return self.kernel_where_
@property
def kernel_alternatives_(self):
"""Return the names of alternatives in the kernel."""
return self._result_series.index[self._result_series].to_numpy(
copy=True
) | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/madm/_madm_base.py | 0.936825 | 0.621828 | _madm_base.py | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""Methods based on a similarity between alternatives."""
# =============================================================================
# IMPORTS
# =============================================================================
import warnings
import numpy as np
from scipy.spatial import distance
from ._madm_base import RankResult, SKCDecisionMakerABC
from ..core import Objective
from ..utils import doc_inherit, rank
# =============================================================================
# TOPSIS
# =============================================================================
def topsis(matrix, objectives, weights, metric="euclidean", **kwargs):
"""Execute TOPSIS without any validation."""
# apply weights
wmtx = np.multiply(matrix, weights)
# extract mins and maxes
mins = np.min(wmtx, axis=0)
maxs = np.max(wmtx, axis=0)
# create the ideal and the anti ideal arrays
where_max = np.equal(objectives, Objective.MAX.value)
ideal = np.where(where_max, maxs, mins)
anti_ideal = np.where(where_max, mins, maxs)
# calculate distances
d_better = distance.cdist(
wmtx, ideal[True], metric=metric, out=None, **kwargs
).flatten()
d_worst = distance.cdist(
wmtx, anti_ideal[True], metric=metric, out=None, **kwargs
).flatten()
# relative closeness
similarity = d_worst / (d_better + d_worst)
# compute the rank and return the result
return (
rank.rank_values(similarity, reverse=True),
ideal,
anti_ideal,
similarity,
)
class TOPSIS(SKCDecisionMakerABC):
"""The Technique for Order of Preference by Similarity to Ideal Solution.
TOPSIS is based on the concept that the chosen alternative should have
the shortest geometric distance from the ideal solution and the longest
euclidean distance from the worst solution.
An assumption of TOPSIS is that the criteria are monotonically increasing
or decreasing, and also allow trade-offs between criteria, where a poor
result in one criterion can be negated by a good result in another
criterion.
Parameters
----------
metric : str or callable, optional
The distance metric to use. If a string, the distance function
can be ``braycurtis``, ``canberra``, ``chebyshev``, ``cityblock``,
``correlation``, ``cosine``, ``dice``, ``euclidean``, ``hamming``,
``jaccard``, ``jensenshannon``, ``kulsinski``, ``mahalanobis``,
``matching``, ``minkowski``, ``rogerstanimoto``, ``russellrao``,
``seuclidean``, ``sokalmichener``, ``sokalsneath``,
``sqeuclidean``, ``wminkowski``, ``yule``.
Warnings
--------
UserWarning:
If some objective is to minimize.
References
----------
:cite:p:`hwang1981methods`
:cite:p:`enwiki:1034743168`
:cite:p:`tzeng2011multiple`
"""
_skcriteria_parameters = ["metric"]
def __init__(self, *, metric="euclidean"):
if not callable(metric) and metric not in distance._METRICS_NAMES:
metrics = ", ".join(f"'{m}'" for m in distance._METRICS_NAMES)
raise ValueError(
f"Invalid metric '{metric}'. Plese choose from: {metrics}"
)
self._metric = metric
@property
def metric(self):
"""Which distance metric will be used."""
return self._metric
@doc_inherit(SKCDecisionMakerABC._evaluate_data)
def _evaluate_data(self, matrix, objectives, weights, **kwargs):
if Objective.MIN.value in objectives:
warnings.warn(
"Although TOPSIS can operate with minimization objectives, "
"this is not recommended. Consider reversing the weights "
"for these cases."
)
rank, ideal, anti_ideal, similarity = topsis(
matrix,
objectives,
weights,
metric=self.metric,
)
return rank, {
"ideal": ideal,
"anti_ideal": anti_ideal,
"similarity": similarity,
}
@doc_inherit(SKCDecisionMakerABC._make_result)
def _make_result(self, alternatives, values, extra):
return RankResult(
"TOPSIS", alternatives=alternatives, values=values, extra=extra
) | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/madm/similarity.py | 0.92222 | 0.490053 | similarity.py | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""SIMUS (Sequential Interactive Model for Urban Systems) Method."""
# =============================================================================
# IMPORTS
# =============================================================================
import warnings
import numpy as np
from ._madm_base import RankResult, SKCDecisionMakerABC
from ..core import Objective
from ..preprocessing.scalers import scale_by_sum
from ..utils import doc_inherit, lp, rank
# =============================================================================
# INTERNAL FUNCTIONS
# =============================================================================
# STAGES ======================================================================
def _make_and_run_stage(transposed_matrix, b, senses, z_index, solver):
# retrieve the problem class
problem = (
lp.Minimize if senses[z_index] == Objective.MIN.value else lp.Maximize
)
# create the variables
xs = [
lp.Float(f"x{idx}", low=0) for idx in range(transposed_matrix.shape[1])
]
# create the objective function based on the criteria of row "z_index"
stage_z_coefficients = transposed_matrix[z_index]
stage_z = sum(
coefficients * x for coefficients, x in zip(stage_z_coefficients, xs)
)
# create the stage
stage = problem(z=stage_z, solver=solver)
# the constraints are other files except the row of z_index
for idx in range(transposed_matrix.shape[0]):
if idx == z_index:
continue
coefficients = transposed_matrix[idx]
# the two parts of the comparison
left = sum(c * x for c, x in zip(coefficients, xs))
right = b[idx]
# >= if objective is to minimize <= maximize
constraint = (
(left >= right)
if senses[idx] == Objective.MIN.value
else (left <= right)
)
stage.subject_to(constraint)
stage_result = stage.solve()
return stage_result
def _solve_stages(transposed_matrix, b, objectives, solver):
# execute the function inside the joblib environment one by objective.
stages = []
for idx in range(transposed_matrix.shape[0]):
stage = _make_and_run_stage(
transposed_matrix=transposed_matrix,
b=b,
senses=objectives,
z_index=idx,
solver=solver,
)
stages.append(stage)
# create the results mtx
arr_result = np.vstack([r.lp_values for r in stages])
with np.errstate(invalid="ignore"):
stages_result = scale_by_sum(arr_result, axis=1)
# replace nan for 0
stages_result[np.isnan(stages_result)] = 0
return stages, stages_result
# FIRST METHOD ===============================================================
def _first_method(*, stages_results):
# project sum value
sp = np.sum(stages_results, axis=0)
# times that $v_{ij} > 0$ ($q$)
q = np.sum(stages_results > 0, axis=0).astype(float)
# participation factor
fp = q / len(stages_results)
# first method points
vp = sp * fp
return vp
# SECOND METHOD ==============================================================
def _calculate_dominance_by_criteria(crit):
shape = len(crit), 1
crit_B = np.tile(crit, shape)
crit_A = crit_B.T
dominance = crit_A - crit_B
dominance[dominance < 0] = 0
return dominance
def _second_method(*, stages_results):
# dominances by criteria
dominance_by_criteria = []
for crit in stages_results:
dominance = _calculate_dominance_by_criteria(crit)
dominance_by_criteria.append(dominance)
# dominance
dominance = np.sum(dominance_by_criteria, axis=0)
# domination
tita_j_p = np.sum(dominance, axis=1)
# subordination
tita_j_d = np.sum(dominance, axis=0)
# second method score
score = tita_j_p - tita_j_d
return score, tita_j_p, tita_j_d, dominance, tuple(dominance_by_criteria)
# SIMUS =======================================================================
def simus(matrix, objectives, b=None, rank_by=1, solver="pulp"):
"""Execute SIMUS without any validation."""
transposed_matrix = matrix.T
# check the b array and complete the missing values
b = np.asarray(b)
if None in b:
mins = np.min(transposed_matrix, axis=1)
maxs = np.max(transposed_matrix, axis=1)
auto_b = np.where(objectives == Objective.MIN.value, mins, maxs)
b = np.where(b != None, b, auto_b) # noqa
# create and execute the stages
stages, stages_results = _solve_stages(
transposed_matrix=transposed_matrix,
b=b,
objectives=objectives,
solver=solver,
)
# first method
method_1_score = _first_method(stages_results=stages_results)
# second method
(
method_2_score,
tita_j_p,
tita_j_d,
dominance,
dominance_by_criteria,
) = _second_method(stages_results=stages_results)
# calculate ranking
score = [method_1_score, method_2_score][rank_by - 1]
ranking = rank.rank_values(score, reverse=True)
return (
ranking,
stages,
stages_results,
method_1_score,
method_2_score,
tita_j_p,
tita_j_d,
dominance,
dominance_by_criteria,
)
class SIMUS(SKCDecisionMakerABC):
r"""SIMUS (Sequential Interactive Model for Urban Systems).
SIMUS developed by Nolberto Munier (2011) is a tool to aid decision-making
problems with multiple objectives. The method solves successive scenarios
formulated as linear programs. For each scenario, the decision-maker must
choose the criterion to be considered objective while the remaining
restrictions constitute the constrains system that the projects are subject
to. In each case, if there is a feasible solution that is optimum, it is
recorded in a matrix of efficient results. Then, from this matrix two
rankings allow the decision maker to compare results obtained by different
procedures. The first ranking is obtained through a linear weighting of
each column by a factor - equivalent of establishing a weight - and that
measures the participation of the corresponding project. In the second
ranking, the method uses dominance and subordinate relationships between
projects, concepts from the French school of MCDM.
Parameters
----------
rank_by : 1 or 2 (default=1)
Witch of the two methods are used to calculate the ranking.
The two methods are executed always.
solver : str, (default="pulp")
Which solver to use to solve the underlying linear programs. The full
list are available in `pulp.listSolvers(True)`. "pulp" or None used
the default solver selected by "PuLP".
Warnings
--------
UserWarning:
If the method detect different weights by criteria.
Raises
------
ValueError:
If the length of b does not match the number of criteria.
See
---
`PuLP Documentation <https://coin-or.github.io/pulp/>`_
"""
_skcriteria_parameters = ["rank_by", "solver"]
def __init__(self, *, rank_by=1, solver="pulp"):
if not (
isinstance(solver, lp.pulp.LpSolver)
or lp.is_solver_available(solver)
):
raise ValueError(f"solver {solver} not available")
self._solver = solver
if rank_by not in (1, 2):
raise ValueError("'rank_by' must be 1 or 2")
self._rank_by = rank_by
@property
def solver(self):
"""Solver used by PuLP."""
return self._solver
@property
def rank_by(self):
"""Which of the two ranking provided by SIMUS is used."""
return self._rank_by
@doc_inherit(SKCDecisionMakerABC._evaluate_data)
def _evaluate_data(self, matrix, objectives, b, weights, **kwargs):
if len(np.unique(weights)) > 1:
warnings.warn("SIMUS not take into account the weights")
if b is not None and len(objectives) != len(b):
raise ValueError("'b' must be the same leght as criteria or None")
(
ranking,
stages,
stages_results,
method_1_score,
method_2_score,
tita_j_p,
tita_j_d,
dominance,
dominance_by_criteria,
) = simus(
matrix,
objectives,
b=b,
rank_by=self.rank_by,
solver=self.solver,
)
return ranking, {
"rank_by": self._rank_by,
"b": np.copy(b),
"stages": stages,
"stages_results": stages_results,
"method_1_score": method_1_score,
"method_2_score": method_2_score,
"tita_j_p": tita_j_p,
"tita_j_d": tita_j_d,
"dominance": dominance,
"dominance_by_criteria": dominance_by_criteria,
}
@doc_inherit(SKCDecisionMakerABC._make_result)
def _make_result(self, alternatives, values, extra):
return RankResult(
"SIMUS", alternatives=alternatives, values=values, extra=extra
)
def evaluate(self, dm, *, b=None):
"""Validate the decision matrix and calculate a ranking.
Parameters
----------
dm: :py:class:`skcriteria.data.DecisionMatrix`
Decision matrix on which the ranking will be calculated.
b: :py:class:`numpy.ndarray`
Right-side-value of the LP problem,
SIMUS automatically assigns the vector of the right side (b) in
the constraints of linear programs.
If the criteria are to maximize, then the constraint is <=;
and if the column minimizes the constraint is >=.
The b/right side value limits of the constraint are chosen
automatically based on the minimum or maximum value of the
criteria/column if the constraint is <= or >= respectively.
The user provides "b" in some criteria and lets SIMUS choose
automatically others. For example, if you want to limit the two
constraints of the dm with 4 criteria by the value 100, b must be
`[None, 100, 100, None]` where None will be chosen automatically
by SIMUS.
Returns
-------
:py:class:`skcriteria.data.RankResult`
Ranking.
"""
data = dm.to_dict()
b = b if b is None else np.asarray(b)
rank, extra = self._evaluate_data(b=b, **data)
alternatives = data["alternatives"]
result = self._make_result(
alternatives=alternatives, values=rank, extra=extra
)
return result | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/madm/simus.py | 0.863478 | 0.540742 | simus.py | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""Implementation of functionalities for convert minimization criteria into \
maximization ones."""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
from ._preprocessing_base import SKCTransformerABC
from ..core import Objective
from ..utils import deprecated, doc_inherit
# =============================================================================
# Base Class
# =============================================================================
class SKCObjectivesInverterABC(SKCTransformerABC):
"""Abstract class capable of invert objectives.
This abstract class require to redefine ``_invert``, instead of
``_transform_data``.
"""
_skcriteria_abstract_class = True
def _invert(self, matrix, minimize_mask):
"""Invert the minimization objectives.
Parameters
----------
matrix: :py:class:`numpy.ndarray`
The decision matrix to weights.
minimize_mask: :py:class:`numpy.ndarray`
Mask with the same size as the columns in the matrix. True values
indicate that this column is a criterion to be minimized.
Returns
-------
:py:class:`numpy.ndarray`
A new matrix with the minimization objectives inverted.
"""
raise NotImplementedError()
@doc_inherit(SKCTransformerABC._transform_data)
def _transform_data(self, matrix, objectives, dtypes, **kwargs):
# check where we need to transform
minimize_mask = np.equal(objectives, Objective.MIN.value)
# execute the transformation
inv_mtx = self._invert(matrix, minimize_mask)
# new objective array
inv_objectives = np.full(
len(objectives), Objective.MAX.value, dtype=int
)
# we are trying to preserve the original dtype as much as possible
# only the minimize criteria are changed.
inv_dtypes = np.where(minimize_mask, inv_mtx.dtype, dtypes)
kwargs.update(
matrix=inv_mtx, objectives=inv_objectives, dtypes=inv_dtypes
)
return kwargs
# =============================================================================
# -x
# =============================================================================
class NegateMinimize(SKCObjectivesInverterABC):
r"""Transform all minimization criteria into maximization ones.
The transformations are made by calculating the inverse value of
the minimization criteria. :math:`\min{C} \equiv \max{-{C}}`.
"""
_skcriteria_parameters = []
@doc_inherit(SKCObjectivesInverterABC._invert)
def _invert(self, matrix, minimize_mask):
inv_mtx = np.array(matrix, dtype=float)
inverted_values = -inv_mtx[:, minimize_mask]
inv_mtx[:, minimize_mask] = inverted_values
return inv_mtx
# =============================================================================
# 1/x
# =============================================================================
class InvertMinimize(SKCObjectivesInverterABC):
r"""Transform all minimization criteria into maximization ones.
The transformations are made by calculating the inverse value of
the minimization criteria. :math:`\min{C} \equiv \max{\frac{1}{C}}`
Notes
-----
All the dtypes of the decision matrix are preserved except the inverted
ones thar are converted to ``numpy.float64``.
"""
_skcriteria_parameters = []
@doc_inherit(SKCObjectivesInverterABC._invert)
def _invert(self, matrix, minimize_mask):
inv_mtx = np.array(matrix, dtype=float)
inverted_values = 1.0 / inv_mtx[:, minimize_mask]
inv_mtx[:, minimize_mask] = inverted_values
return inv_mtx
# =============================================================================
# DEPRECATED
# =============================================================================
@deprecated(
reason=(
"Use ``skcriteria.preprocessing.invert_objectives.InvertMinimize`` "
"instead"
),
version=0.7,
)
class MinimizeToMaximize(InvertMinimize):
r"""Transform all minimization criteria into maximization ones.
The transformations are made by calculating the inverse value of
the minimization criteria. :math:`\min{C} \equiv \max{\frac{1}{C}}`
Notes
-----
All the dtypes of the decision matrix are preserved except the inverted
ones thar are converted to ``numpy.float64``.
""" | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/preprocessing/invert_objectives.py | 0.9226 | 0.600891 | invert_objectives.py | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""Normalization through the distance to distance function."""
# =============================================================================
# IMPORTS
# =============================================================================
import abc
from collections.abc import Collection
import numpy as np
from ._preprocessing_base import SKCTransformerABC
from ..core import DecisionMatrix
from ..utils import doc_inherit
# =============================================================================
# BASE CLASS
# =============================================================================
class SKCByCriteriaFilterABC(SKCTransformerABC):
"""Abstract class capable of filtering alternatives based on criteria \
values.
This abstract class require to redefine ``_coerce_filters`` and
``_make_mask``, instead of ``_transform_data``.
Parameters
----------
criteria_filters: dict
It is a dictionary in which the key is the name of a criterion, and
the value is the filter condition.
ignore_missing_criteria: bool, default: False
If True, it is ignored if a decision matrix does not have any
particular criteria that should be filtered.
"""
_skcriteria_parameters = ["criteria_filters", "ignore_missing_criteria"]
_skcriteria_abstract_class = True
def __init__(self, criteria_filters, *, ignore_missing_criteria=False):
if not len(criteria_filters):
raise ValueError("Must provide at least one filter")
self._criteria, self._filters = self._coerce_filters(criteria_filters)
self._ignore_missing_criteria = bool(ignore_missing_criteria)
@property
def criteria_filters(self):
"""Conditions on which the alternatives will be evaluated.
It is a dictionary in which the key is the name of a
criterion, and the value is the filter condition.
"""
return dict(zip(self._criteria, self._filters))
@property
def ignore_missing_criteria(self):
"""If the value is True the filter ignores the lack of a required \
criterion.
If the value is False, the lack of a criterion causes the filter to
fail.
"""
return self._ignore_missing_criteria
@abc.abstractmethod
def _coerce_filters(self, filters):
"""Validate the filters.
Parameters
----------
filters: dict-like
It is a dictionary in which the key is the name of a
criterion, and the value is the filter condition.
Returns
-------
(criteria, filters): tuple of two elements.
The tuple contains two iterables:
1. The first is the list of criteria.
2. The second is the filters.
"""
raise NotImplementedError()
@abc.abstractmethod
def _make_mask(self, matrix, criteria, criteria_to_use, criteria_filters):
raise NotImplementedError()
@doc_inherit(SKCTransformerABC._transform_data)
def _transform_data(self, matrix, criteria, alternatives, **kwargs):
# determine which criteria defined in the filter are in the DM
criteria_to_use, criteria_filters = [], []
for crit, flt in zip(self._criteria, self._filters):
if crit not in criteria and not self._ignore_missing_criteria:
raise ValueError(f"Missing criteria: {crit}")
elif crit in criteria:
criteria_to_use.append(crit)
criteria_filters.append(flt)
if criteria_to_use:
mask = self._make_mask(
matrix=matrix,
criteria=criteria,
criteria_to_use=criteria_to_use,
criteria_filters=criteria_filters,
)
filtered_matrix = matrix[mask]
filtered_alternatives = alternatives[mask]
else:
filtered_matrix = matrix
filtered_alternatives = alternatives
kwargs.update(
matrix=filtered_matrix,
criteria=criteria,
alternatives=filtered_alternatives,
dtypes=None,
)
return kwargs
# =============================================================================
# GENERIC FILTER
# =============================================================================
@doc_inherit(SKCByCriteriaFilterABC, warn_class=False)
class Filter(SKCByCriteriaFilterABC):
"""Function based filter.
This class accepts as a filter any arbitrary function that receives as a
parameter a as a parameter a criterion and returns a mask of the same size
as the number of the number of alternatives in the decision matrix.
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import filters
>>> dm = skc.mkdm(
... matrix=[
... [7, 5, 35],
... [5, 4, 26],
... [5, 6, 28],
... [1, 7, 30],
... [5, 8, 30]
... ],
... objectives=[max, max, min],
... alternatives=["PE", "JN", "AA", "MM", "FN"],
... criteria=["ROE", "CAP", "RI"],
... )
>>> tfm = filters.Filter({
... "ROE": lambda e: e > 1,
... "RI": lambda e: e >= 28,
... })
>>> tfm.transform(dm)
ROE[▲ 2.0] CAP[▲ 4.0] RI[▼ 1.0]
PE 7 5 35
AA 5 6 28
FN 5 8 30
[3 Alternatives x 3 Criteria]
"""
def _coerce_filters(self, filters):
criteria, criteria_filters = [], []
for filter_name, filter_value in filters.items():
if not isinstance(filter_name, str):
raise ValueError("All filter keys must be instance of 'str'")
if not callable(filter_value):
raise ValueError("All filter values must be callable")
criteria.append(filter_name)
criteria_filters.append(filter_value)
return tuple(criteria), tuple(criteria_filters)
def _make_mask(self, matrix, criteria, criteria_to_use, criteria_filters):
mask_list = []
for crit_name, crit_filter in zip(criteria_to_use, criteria_filters):
crit_idx = np.in1d(criteria, crit_name, assume_unique=False)
crit_array = matrix[:, crit_idx].flatten()
crit_mask = np.apply_along_axis(
crit_filter, axis=0, arr=crit_array
)
mask_list.append(crit_mask)
mask = np.all(np.column_stack(mask_list), axis=1)
return mask
# =============================================================================
# ARITHMETIC FILTER
# =============================================================================
@doc_inherit(SKCByCriteriaFilterABC, warn_class=False)
class SKCArithmeticFilterABC(SKCByCriteriaFilterABC):
"""Provide a common behavior to make filters based on the same comparator.
This abstract class require to redefine ``_filter`` method, and this will
apply to each criteria separately.
This class is designed to implement in general arithmetic comparisons of
"==", "!=", ">", ">=", "<", "<=" taking advantage of the functions
provided by numpy (e.g. ``np.greater_equal()``).
Notes
-----
The filter implemented with this class are slightly faster than
function-based filters.
"""
_skcriteria_abstract_class = True
@abc.abstractmethod
def _filter(self, arr, cond):
raise NotImplementedError()
def _coerce_filters(self, filters):
criteria, criteria_filters = [], []
for filter_name, filter_value in filters.items():
if not isinstance(filter_name, str):
raise ValueError("All filter keys must be instance of 'str'")
if not isinstance(filter_value, (int, float, complex, np.number)):
raise ValueError(
"All filter values must be some kind of number"
)
criteria.append(filter_name)
criteria_filters.append(filter_value)
return tuple(criteria), tuple(criteria_filters)
def _make_mask(self, matrix, criteria, criteria_to_use, criteria_filters):
idxs = np.in1d(criteria, criteria_to_use)
matrix = matrix[:, idxs]
mask = np.all(self._filter(matrix, criteria_filters), axis=1)
return mask
@doc_inherit(SKCArithmeticFilterABC, warn_class=False)
class FilterGT(SKCArithmeticFilterABC):
"""Keeps the alternatives for which the criteria value are greater than a \
value.
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import filters
>>> dm = skc.mkdm(
... matrix=[
... [7, 5, 35],
... [5, 4, 26],
... [5, 6, 28],
... [1, 7, 30],
... [5, 8, 30]
... ],
... objectives=[max, max, min],
... alternatives=["PE", "JN", "AA", "MM", "FN"],
... criteria=["ROE", "CAP", "RI"],
... )
>>> tfm = filters.FilterGT({"ROE": 1, "RI": 27})
>>> tfm.transform(dm)
ROE[▲ 2.0] CAP[▲ 4.0] RI[▼ 1.0]
PE 7 5 35
AA 5 6 28
FN 5 8 30
[3 Alternatives x 3 Criteria]
"""
_filter = np.greater
@doc_inherit(SKCArithmeticFilterABC, warn_class=False)
class FilterGE(SKCArithmeticFilterABC):
"""Keeps the alternatives for which the criteria value are greater or \
equal than a value.
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import filters
>>> dm = skc.mkdm(
... matrix=[
... [7, 5, 35],
... [5, 4, 26],
... [5, 6, 28],
... [1, 7, 30],
... [5, 8, 30]
... ],
... objectives=[max, max, min],
... alternatives=["PE", "JN", "AA", "MM", "FN"],
... criteria=["ROE", "CAP", "RI"],
... )
>>> tfm = filters.FilterGE({"ROE": 1, "RI": 27})
>>> tfm.transform(dm)
ROE[▲ 2.0] CAP[▲ 4.0] RI[▼ 1.0]
PE 7 5 35
AA 5 6 28
MM 1 7 30
FN 5 8 30
[4 Alternatives x 3 Criteria]
"""
_filter = np.greater_equal
@doc_inherit(SKCArithmeticFilterABC, warn_class=False)
class FilterLT(SKCArithmeticFilterABC):
"""Keeps the alternatives for which the criteria value are less than a \
value.
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import filters
>>> dm = skc.mkdm(
... matrix=[
... [7, 5, 35],
... [5, 4, 26],
... [5, 6, 28],
... [1, 7, 30],
... [5, 8, 30]
... ],
... objectives=[max, max, min],
... alternatives=["PE", "JN", "AA", "MM", "FN"],
... criteria=["ROE", "CAP", "RI"],
... )
>>> tfm = filters.FilterLT({"RI": 28})
>>> tfm.transform(dm)
ROE[▲ 2.0] CAP[▲ 4.0] RI[▼ 1.0]
JN 5 4 26
[1 Alternatives x 3 Criteria]
"""
_filter = np.less
@doc_inherit(SKCArithmeticFilterABC, warn_class=False)
class FilterLE(SKCArithmeticFilterABC):
"""Keeps the alternatives for which the criteria value are less or equal \
than a value.
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import filters
>>> dm = skc.mkdm(
... matrix=[
... [7, 5, 35],
... [5, 4, 26],
... [5, 6, 28],
... [1, 7, 30],
... [5, 8, 30]
... ],
... objectives=[max, max, min],
... alternatives=["PE", "JN", "AA", "MM", "FN"],
... criteria=["ROE", "CAP", "RI"],
... )
>>> tfm = filters.FilterLE({"RI": 28})
>>> tfm.transform(dm)
ROE[▲ 2.0] CAP[▲ 4.0] RI[▼ 1.0]
JN 5 4 26
AA 5 6 28
[2 Alternatives x 3 Criteria]
"""
_filter = np.less_equal
@doc_inherit(SKCArithmeticFilterABC, warn_class=False)
class FilterEQ(SKCArithmeticFilterABC):
"""Keeps the alternatives for which the criteria value are equal than a \
value.
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import filters
>>> dm = skc.mkdm(
... matrix=[
... [7, 5, 35],
... [5, 4, 26],
... [5, 6, 28],
... [1, 7, 30],
... [5, 8, 30]
... ],
... objectives=[max, max, min],
... alternatives=["PE", "JN", "AA", "MM", "FN"],
... criteria=["ROE", "CAP", "RI"],
... )
>>> tfm = filters.FilterEQ({"CAP": 7, "RI": 30})
>>> tfm.transform(dm)
ROE[▲ 2.0] CAP[▲ 4.0] RI[▼ 1.0]
MM 1 7 30
[1 Alternatives x 3 Criteria]
"""
_filter = np.equal
@doc_inherit(SKCArithmeticFilterABC, warn_class=False)
class FilterNE(SKCArithmeticFilterABC):
"""Keeps the alternatives for which the criteria value are not equal than \
a value.
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import filters
>>> dm = skc.mkdm(
... matrix=[
... [7, 5, 35],
... [5, 4, 26],
... [5, 6, 28],
... [1, 7, 30],
... [5, 8, 30]
... ],
... objectives=[max, max, min],
... alternatives=["PE", "JN", "AA", "MM", "FN"],
... criteria=["ROE", "CAP", "RI"],
... )
>>> tfm = filters.FilterNE({"CAP": 7, "RI": 30})
>>> tfm.transform(dm)
ROE[▲ 2.0] CAP[▲ 4.0] RI[▼ 1.0]
PE 7 5 35
JN 5 4 26
AA 5 6 28
[3 Alternatives x 3 Criteria]
"""
_filter = np.not_equal
# =============================================================================
# SET FILTERS
# =============================================================================
@doc_inherit(SKCByCriteriaFilterABC, warn_class=False)
class SKCSetFilterABC(SKCByCriteriaFilterABC):
"""Provide a common behavior to make filters based on set operations.
This abstract class require to redefine ``_set_filter`` method, and this
will apply to each criteria separately.
This class is designed to implement in general set comparison like
"inclusion" and "exclusion".
"""
_skcriteria_abstract_class = True
@abc.abstractmethod
def _set_filter(self, arr, cond):
raise NotImplementedError()
def _coerce_filters(self, filters):
criteria, criteria_filters = [], []
for filter_name, filter_value in filters.items():
if not isinstance(filter_name, str):
raise ValueError("All filter keys must be instance of 'str'")
if not (
isinstance(filter_value, Collection) and len(filter_value)
):
raise ValueError(
"All filter values must be iterable with length > 1"
)
criteria.append(filter_name)
criteria_filters.append(np.asarray(filter_value))
return criteria, criteria_filters
def _make_mask(self, matrix, criteria, criteria_to_use, criteria_filters):
mask_list = []
for fname, fset in zip(criteria_to_use, criteria_filters):
crit_idx = np.in1d(criteria, fname, assume_unique=False)
crit_array = matrix[:, crit_idx].flatten()
crit_mask = self._set_filter(crit_array, fset)
mask_list.append(crit_mask)
mask = np.all(np.column_stack(mask_list), axis=1)
return mask
@doc_inherit(SKCSetFilterABC, warn_class=False)
class FilterIn(SKCSetFilterABC):
"""Keeps the alternatives for which the criteria value are included in a \
set of values.
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import filters
>>> dm = skc.mkdm(
... matrix=[
... [7, 5, 35],
... [5, 4, 26],
... [5, 6, 28],
... [1, 7, 30],
... [5, 8, 30]
... ],
... objectives=[max, max, min],
... alternatives=["PE", "JN", "AA", "MM", "FN"],
... criteria=["ROE", "CAP", "RI"],
... )
>>> tfm = filters.FilterIn({"ROE": [7, 1], "RI": [30, 35]})
>>> tfm.transform(dm)
ROE[▲ 2.0] CAP[▲ 4.0] RI[▼ 1.0]
PE 7 5 35
MM 1 7 30
[2 Alternatives x 3 Criteria]
"""
def _set_filter(self, arr, cond):
return np.isin(arr, cond)
@doc_inherit(SKCSetFilterABC, warn_class=False)
class FilterNotIn(SKCSetFilterABC):
"""Keeps the alternatives for which the criteria value are not included \
in a set of values.
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import filters
>>> dm = skc.mkdm(
... matrix=[
... [7, 5, 35],
... [5, 4, 26],
... [5, 6, 28],
... [1, 7, 30],
... [5, 8, 30]
... ],
... objectives=[max, max, min],
... alternatives=["PE", "JN", "AA", "MM", "FN"],
... criteria=["ROE", "CAP", "RI"],
... )
>>> tfm = filters.FilterNotIn({"ROE": [7, 1], "RI": [30, 35]})
>>> tfm.transform(dm)
ROE[▲ 2.0] CAP[▲ 4.0] RI[▼ 1.0]
JN 5 4 26
AA 5 6 28
[2 Alternatives x 3 Criteria]
"""
def _set_filter(self, arr, cond):
return np.isin(arr, cond, invert=True)
# =============================================================================
# DOMINANCE
# =============================================================================
class FilterNonDominated(SKCTransformerABC):
"""Keeps the non dominated or non strictly-dominated alternatives.
In order to evaluate the dominance of an alternative *a0* over an
alternative *a1*, the algorithm evaluates that *a0* is better in at
least one criterion and that *a1* is not better in any criterion than
*a0*. In the case that ``strict = True`` it also evaluates that there
are no equal criteria.
Parameters
----------
strict: bool, default ``False``
If ``True``, strictly dominated alternatives are removed, otherwise all
dominated alternatives are removed.
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import filters
>>> dm = skc.mkdm(
... matrix=[
... [7, 5, 35],
... [5, 4, 26],
... [5, 6, 28],
... [1, 7, 30],
... [5, 8, 30]
... ],
... objectives=[max, max, min],
... alternatives=["PE", "JN", "AA", "MM", "FN"],
... criteria=["ROE", "CAP", "RI"],
... )
>>> tfm = filters.FilterNonDominated(strict=False)
>>> tfm.transform(dm)
ROE[▲ 1.0] CAP[▲ 1.0] RI[▼ 1.0]
PE 7 5 35
JN 5 4 26
AA 5 6 28
FN 5 8 30
[4 Alternatives x 3 Criteria]
"""
_skcriteria_parameters = ["strict"]
def __init__(self, *, strict=False):
self._strict = bool(strict)
@property
def strict(self):
"""If the filter must remove the dominated or strictly-dominated \
alternatives."""
return self._strict
@doc_inherit(SKCTransformerABC._transform_data)
def _transform_data(self, matrix, alternatives, dominated_mask, **kwargs):
filtered_matrix = matrix[~dominated_mask]
filtered_alternatives = alternatives[~dominated_mask]
kwargs.update(
matrix=filtered_matrix,
alternatives=filtered_alternatives,
)
return kwargs
@doc_inherit(SKCTransformerABC.transform)
def transform(self, dm):
data = dm.to_dict()
dominated_mask = dm.dominance.dominated(strict=self._strict).to_numpy()
transformed_data = self._transform_data(
dominated_mask=dominated_mask, **data
)
transformed_dm = DecisionMatrix.from_mcda_data(**transformed_data)
return transformed_dm | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/preprocessing/filters.py | 0.933203 | 0.509764 | filters.py | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""Functionalities for remove negatives from criteria.
In addition to the main functionality, an MCDA agnostic function is offered
to push negatives values on an array along an arbitrary axis.
"""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
from ._preprocessing_base import SKCMatrixAndWeightTransformerABC
from ..utils import doc_inherit
# =============================================================================
# FUNCTIONS
# =============================================================================
def push_negatives(arr, axis):
r"""Increment the array until all the valuer are sean >= 0.
If an array has negative values this function increment the values
proportionally to made all the array positive along an axis.
.. math::
\overline{X}_{ij} =
\begin{cases}
X_{ij} + min_{X_{ij}} & \text{if } X_{ij} < 0\\
X_{ij} & \text{otherwise}
\end{cases}
Parameters
----------
arr: :py:class:`numpy.ndarray` like.
A array with values
axis : :py:class:`int` optional
Axis along which to operate. By default, flattened input is used.
Returns
-------
:py:class:`numpy.ndarray`
array with all values >= 0.
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import push_negatives
>>> mtx = [[1, 2], [3, 4]]
>>> mtx_lt0 = [[-1, 2], [3, 4]] # has a negative value
>>> push_negatives(mtx) # array without negatives don't be affected
array([[1, 2],
[3, 4]])
# all the array is incremented by 1 to eliminate the negative
>>> push_negatives(mtx_lt0)
array([[0, 3],
[4, 5]])
# by column only the first one (with the negative value) is affected
>>> push_negatives(mtx_lt0, axis=0)
array([[0, 2],
[4, 4]])
# by row only the first row (with the negative value) is affected
>>> push_negatives(mtx_lt0, axis=1)
array([[0, 3],
[3, 4]])
"""
arr = np.asarray(arr)
mins = np.min(arr, axis=axis, keepdims=True)
delta = (mins < 0) * mins
return arr - delta
class PushNegatives(SKCMatrixAndWeightTransformerABC):
r"""Increment the matrix/weights until all the valuer are sean >= 0.
If the matrix/weights has negative values this function increment the
values proportionally to made all the matrix/weights positive along an
axis.
.. math::
\overline{X}_{ij} =
\begin{cases}
X_{ij} + min_{X_{ij}} & \text{if } X_{ij} < 0\\
X_{ij} & \text{otherwise}
\end{cases}
"""
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)
def _transform_weights(self, weights):
return push_negatives(weights, axis=None)
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)
def _transform_matrix(self, matrix):
return push_negatives(matrix, axis=0) | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/preprocessing/push_negatives.py | 0.936037 | 0.643203 | push_negatives.py | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""Core functionalities to create transformers."""
# =============================================================================
# IMPORTS
# =============================================================================
import abc
from ..core import DecisionMatrix, SKCMethodABC
from ..utils import doc_inherit
# =============================================================================
# SKCTransformer ABC
# =============================================================================
class SKCTransformerABC(SKCMethodABC):
"""Abstract class for all transformer in scikit-criteria."""
_skcriteria_dm_type = "transformer"
_skcriteria_abstract_class = True
@abc.abstractmethod
def _transform_data(self, **kwargs):
"""Apply the transformation logic to the decision matrix parameters.
Parameters
----------
kwargs:
The decision matrix as separated parameters.
Returns
-------
:py:class:`dict`
A dictionary with all the values of the decision matrix
transformed.
"""
raise NotImplementedError()
def transform(self, dm):
"""Perform transformation on `dm`.
Parameters
----------
dm: :py:class:`skcriteria.data.DecisionMatrix`
The decision matrix to transform.
Returns
-------
:py:class:`skcriteria.data.DecisionMatrix`
Transformed decision matrix.
"""
data = dm.to_dict()
transformed_data = self._transform_data(**data)
transformed_dm = DecisionMatrix.from_mcda_data(**transformed_data)
return transformed_dm
# =============================================================================
# MATRIX & WEIGHTS TRANSFORMER
# =============================================================================
class SKCMatrixAndWeightTransformerABC(SKCTransformerABC):
"""Transform weights and matrix together or independently.
The Transformer that implements this abstract class can be configured to
transform
`weights`, `matrix` or `both` so only that part of the DecisionMatrix
is altered.
This abstract class require to redefine ``_transform_weights`` and
``_transform_matrix``, instead of ``_transform_data``.
"""
_skcriteria_abstract_class = True
_skcriteria_parameters = ["target"]
_TARGET_WEIGHTS = "weights"
_TARGET_MATRIX = "matrix"
_TARGET_BOTH = "both"
def __init__(self, target):
if target not in (
self._TARGET_MATRIX,
self._TARGET_WEIGHTS,
self._TARGET_BOTH,
):
raise ValueError(
f"'target' can only be '{self._TARGET_WEIGHTS}', "
f"'{self._TARGET_MATRIX}' or '{self._TARGET_BOTH}', "
f"found '{target}'"
)
self._target = target
@property
def target(self):
"""Determine which part of the DecisionMatrix will be transformed."""
return self._target
@abc.abstractmethod
def _transform_weights(self, weights):
"""Execute the transform method over the weights.
Parameters
----------
weights: :py:class:`numpy.ndarray`
The weights to transform.
Returns
-------
:py:class:`numpy.ndarray`
The transformed weights.
"""
raise NotImplementedError()
@abc.abstractmethod
def _transform_matrix(self, matrix):
"""Execute the transform method over the matrix.
Parameters
----------
matrix: :py:class:`numpy.ndarray`
The decision matrix to transform
Returns
-------
:py:class:`numpy.ndarray`
The transformed matrix.
"""
raise NotImplementedError()
@doc_inherit(SKCTransformerABC._transform_data)
def _transform_data(self, matrix, weights, **kwargs):
transformed_mtx = matrix
transformed_weights = weights
if self._target in (self._TARGET_MATRIX, self._TARGET_BOTH):
transformed_mtx = self._transform_matrix(matrix)
if self._target in (self._TARGET_WEIGHTS, self._TARGET_BOTH):
transformed_weights = self._transform_weights(weights)
kwargs.update(
matrix=transformed_mtx, weights=transformed_weights, dtypes=None
)
return kwargs | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/preprocessing/_preprocessing_base.py | 0.931905 | 0.496643 | _preprocessing_base.py | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""Data abstraction layer.
This module defines the DecisionMatrix object, which internally encompasses
the alternative matrix, weights and objectives (MIN, MAX) of the criteria.
"""
# =============================================================================
# IMPORTS
# =============================================================================
import functools
from collections import abc
import numpy as np
import pandas as pd
from pandas.io.formats import format as pd_fmt
from .dominance import DecisionMatrixDominanceAccessor
from .objectives import Objective
from .plot import DecisionMatrixPlotter
from .stats import DecisionMatrixStatsAccessor
from ..utils import deprecated, df_temporal_header, doc_inherit
# =============================================================================
# SLICERS ARRAY
# =============================================================================
class _ACArray(np.ndarray, abc.Mapping):
"""Immutable Array to provide access to the alternative and criteria \
values.
The behavior is the same as a numpy.ndarray but if the slice it receives
is a value contained in the array it uses an external function
to access the series with that criteria/alternative.
Besides this it has the typical methods of a dictionary.
"""
def __new__(cls, input_array, skc_slicer):
obj = np.asarray(input_array).view(cls)
obj._skc_slicer = skc_slicer
return obj
@doc_inherit(np.ndarray.__getitem__)
def __getitem__(self, k):
try:
if k in self:
return self._skc_slicer(k)
return super().__getitem__(k)
except IndexError:
raise IndexError(k)
def __setitem__(self, k, v):
"""Raise an AttributeError, this object are read-only."""
raise AttributeError("_SlicerArray are read-only")
@doc_inherit(abc.Mapping.items)
def items(self):
return ((e, self[e]) for e in self)
@doc_inherit(abc.Mapping.keys)
def keys(self):
return iter(self)
@doc_inherit(abc.Mapping.values)
def values(self):
return (self[e] for e in self)
class _Loc:
"""Locator abstraction.
this class ensures that the correct objectives and weights are applied to
the sliced ``DecisionMatrix``.
"""
def __init__(self, name, real_loc, objectives, weights):
self._name = name
self._real_loc = real_loc
self._objectives = objectives
self._weights = weights
@property
def name(self):
"""The name of the locator."""
return self._name
def __getitem__(self, slc):
"""dm[slc] <==> dm.__getitem__(slc)."""
df = self._real_loc.__getitem__(slc)
if isinstance(df, pd.Series):
df = df.to_frame().T
dtypes = self._real_loc.obj.dtypes
dtypes = dtypes[dtypes.index.isin(df.columns)]
df = df.astype(dtypes)
objectives = self._objectives
objectives = objectives[objectives.index.isin(df.columns)].to_numpy()
weights = self._weights
weights = weights[weights.index.isin(df.columns)].to_numpy()
return DecisionMatrix(df, objectives, weights)
# =============================================================================
# DECISION MATRIX
# =============================================================================
class DecisionMatrix:
"""Representation of all data needed in the MCDA analysis.
This object gathers everything necessary to represent a data set used
in MCDA:
- An alternative matrix where each row is an alternative and each
column is of a different criteria.
- An optimization objective (Minimize, Maximize) for each criterion.
- A weight for each criterion.
- An independent type of data for each criterion
DecisionMatrix has two main forms of construction:
1. Use the default constructor of the DecisionMatrix class
:py:class:`pandas.DataFrame` where the index is the alternatives
and the columns are the criteria; an iterable with the objectives with
the same amount of elements that columns/criteria has the dataframe;
and an iterable with the weights also with the same amount of elements
as criteria.
.. code-block:: pycon
>>> import pandas as pd
>>> from skcriteria import DecisionMatrix, mkdm
>>> data_df = pd.DataFrame(
... [[1, 2, 3], [4, 5, 6]],
... index=["A0", "A1"],
... columns=["C0", "C1", "C2"]
... )
>>> objectives = [min, max, min]
>>> weights = [1, 1, 1]
>>> dm = DecisionMatrix(data_df, objectives, weights)
>>> dm
C0[▼ 1.0] C1[▲ 1.0] C2[▲ 1.0]
A0 1 2 3
A1 4 5 6
[2 Alternatives x 3 Criteria]
2. Use the classmethod `DecisionMatrix.from_mcda_data` which requests the
data in a more natural way for this type of analysis
(the weights, the criteria / alternative names, and the data types
are optional)
>>> DecisionMatrix.from_mcda_data(
... [[1, 2, 3], [4, 5, 6]],
... [min, max, min],
... [1, 1, 1])
C0[▼ 1.0] C1[▲ 1.0] C2[▲ 1.0]
A0 1 2 3
A1 4 5 6
[2 Alternatives x 3 Criteria]
For simplicity a function is offered at the module level analogous to
``from_mcda_data`` called ``mkdm`` (make decision matrix).
Parameters
----------
data_df: :py:class:`pandas.DatFrame`
Dataframe where the index is the alternatives and the columns
are the criteria.
objectives: :py:class:`numpy.ndarray`
Aan iterable with the targets with sense of optimality of every
criteria (You can use any alias defined in Objective)
the same length as columns/criteria has the data_df.
weights: :py:class:`numpy.ndarray`
An iterable with the weights also with the same amount of elements
as criteria.
"""
def __init__(self, data_df, objectives, weights):
self._data_df = (
data_df.copy(deep=True)
if isinstance(data_df, pd.DataFrame)
else pd.DataFrame(data_df, copy=True)
)
self._objectives = np.asarray(objectives, dtype=object)
self._weights = np.asanyarray(weights, dtype=float)
if not (
len(self._data_df.columns)
== len(self._weights)
== len(self._objectives)
):
raise ValueError(
"The number of weights, and objectives must be equal to the "
"number of criteria (number of columns in data_df)"
)
# CUSTOM CONSTRUCTORS =====================================================
@classmethod
def from_mcda_data(
cls,
matrix,
objectives,
weights=None,
alternatives=None,
criteria=None,
dtypes=None,
):
"""Create a new DecisionMatrix object.
This method receives the parts of the matrix, in what conceptually
the matrix of alternatives is usually divided
Parameters
----------
matrix: Iterable
The matrix of alternatives. Where every row is an alternative
and every column is a criteria.
objectives: Iterable
The array with the sense of optimality of every
criteria. You can use any alias provided by the objective class.
weights: Iterable o None (default ``None``)
Optional weights of the criteria. If is ``None`` all the criteria
are weighted with 1.
alternatives: Iterable o None (default ``None``)
Optional names of the alternatives. If is ``None``,
al the alternatives are names "A[n]" where n is the number of
the row of `matrix` statring at 0.
criteria: Iterable o None (default ``None``)
Optional names of the criteria. If is ``None``,
al the alternatives are names "C[m]" where m is the number of
the columns of `matrix` statring at 0.
dtypes: Iterable o None (default ``None``)
Optional types of the criteria. If is None, the type is inferred
automatically by pandas.
Returns
-------
:py:class:`DecisionMatrix`
A new decision matrix.
Example
-------
>>> DecisionMatrix.from_mcda_data(
... [[1, 2, 3], [4, 5, 6]],
... [min, max, min],
... [1, 1, 1])
C0[▼ 1.0] C1[▲ 1.0] C2[▲ 1.0]
A0 1 2 3
A1 4 5 6
[2 Alternatives x 3 Criteria]
For simplicity a function is offered at the module level analogous to
``from_mcda_data`` called ``mkdm`` (make decision matrix).
Notes
-----
This functionality generates more sensitive defaults than using the
constructor of the DecisionMatrix class but is slower.
"""
# first we need the number of alternatives and criteria
try:
a_number, c_number = np.shape(matrix)
except ValueError:
matrix_ndim = np.ndim(matrix)
raise ValueError(
f"'matrix' must have 2 dimensions, found {matrix_ndim} instead"
)
alternatives = np.asarray(
[f"A{idx}" for idx in range(a_number)]
if alternatives is None
else alternatives
)
if len(alternatives) != a_number:
raise ValueError(f"'alternatives' must have {a_number} elements")
criteria = np.asarray(
[f"C{idx}" for idx in range(c_number)]
if criteria is None
else criteria
)
if len(criteria) != c_number:
raise ValueError(f"'criteria' must have {c_number} elements")
weights = np.asarray(np.ones(c_number) if weights is None else weights)
data_df = pd.DataFrame(matrix, index=alternatives, columns=criteria)
if dtypes is not None and len(dtypes) != c_number:
raise ValueError(f"'dtypes' must have {c_number} elements")
elif dtypes is not None:
dtypes = {c: dt for c, dt in zip(criteria, dtypes)}
data_df = data_df.astype(dtypes)
return cls(data_df=data_df, objectives=objectives, weights=weights)
# MCDA ====================================================================
# This properties are usefull to access interactively to the
# underlying data a. Except for alternatives and criteria all other
# properties expose the data as dataframes or series
@property
def alternatives(self):
"""Names of the alternatives.
From this array you can also access the values of the alternatives as
``pandas.Series``.
"""
arr = self._data_df.index.to_numpy(copy=True)
slicer = self._data_df.loc.__getitem__
return _ACArray(arr, slicer)
@property
def criteria(self):
"""Names of the criteria.
From this array you can also access the values of the criteria as
``pandas.Series``.
"""
arr = self._data_df.columns.to_numpy(copy=True)
slicer = self._data_df.__getitem__
return _ACArray(arr, slicer)
@property
def weights(self):
"""Weights of the criteria."""
return pd.Series(
self._weights,
dtype=float,
index=self._data_df.columns.copy(deep=True),
name="Weights",
copy=True,
)
@property
def objectives(self):
"""Objectives of the criteria as ``Objective`` instances."""
return pd.Series(
[Objective.from_alias(a) for a in self._objectives],
index=self._data_df.columns,
name="Objectives",
copy=True,
)
@property
def minwhere(self):
"""Mask with value True if the criterion is to be minimized."""
mask = self.objectives == Objective.MIN
mask.name = "minwhere"
return mask
@property
def maxwhere(self):
"""Mask with value True if the criterion is to be maximized."""
mask = self.objectives == Objective.MAX
mask.name = "maxwhere"
return mask
# READ ONLY PROPERTIES ====================================================
@property
def iobjectives(self):
"""Objectives of the criteria as ``int``.
- Minimize = Objective.MIN.value
- Maximize = Objective.MAX.value
"""
return pd.Series(
[o.value for o in self.objectives],
dtype=np.int8,
index=self._data_df.columns.copy(deep=True),
copy=True,
)
@property
def matrix(self):
"""Alternatives matrix as pandas DataFrame.
The matrix excludes weights and objectives.
If you want to create a DataFrame with objectives and weights, use
``DecisionMatrix.to_dataframe()``
"""
mtx = self._data_df.copy(deep=True)
mtx.index = self._data_df.index.copy(deep=True)
mtx.index.name = "Alternatives"
mtx.columns = self._data_df.columns.copy(deep=True)
mtx.columns.name = "Criteria"
return mtx
@property
def dtypes(self):
"""Dtypes of the criteria."""
series = self._data_df.dtypes.copy(deep=True)
series.index = self._data_df.dtypes.index.copy(deep=True)
return series
# ACCESSORS (YES, WE USE CACHED PROPERTIES IS THE EASIEST WAY) ============
@property
@functools.lru_cache(maxsize=None)
def plot(self):
"""Plot accessor."""
return DecisionMatrixPlotter(self)
@property
@functools.lru_cache(maxsize=None)
def stats(self):
"""Descriptive statistics accessor."""
return DecisionMatrixStatsAccessor(self)
@property
@functools.lru_cache(maxsize=None)
def dominance(self):
"""Dominance information accessor."""
return DecisionMatrixDominanceAccessor(self)
# UTILITIES ===============================================================
def copy(self, **kwargs):
"""Return a deep copy of the current DecisionMatrix.
This method is also useful for manually modifying the values of the
DecisionMatrix object.
Parameters
----------
kwargs :
The same parameters supported by ``from_mcda_data()``. The values
provided replace the existing ones in the object to be copied.
Returns
-------
:py:class:`DecisionMatrix`
A new decision matrix.
"""
dmdict = self.to_dict()
dmdict.update(kwargs)
return self.from_mcda_data(**dmdict)
def to_dataframe(self):
"""Convert the entire DecisionMatrix into a dataframe.
The objectives and weights ara added as rows before the alternatives.
Returns
-------
:py:class:`pd.DataFrame`
A Decision matrix as pandas DataFrame.
Example
-------
.. code-block:: pycon
>>> dm = DecisionMatrix.from_mcda_data(
>>> dm
... [[1, 2, 3], [4, 5, 6]],
... [min, max, min],
... [1, 1, 1])
C0[▼ 1.0] C1[▲ 1.0] C2[▲ 1.0]
A0 1 2 3
A1 4 5 6
>>> dm.to_dataframe()
C0 C1 C2
objectives MIN MAX MIN
weights 1.0 1.0 1.0
A0 1 2 3
A1 4 5 6
"""
data = np.vstack((self.objectives, self.weights, self.matrix))
index = np.hstack((["objectives", "weights"], self.alternatives))
df = pd.DataFrame(data, index=index, columns=self.criteria, copy=True)
return df
def to_dict(self):
"""Return a dict representation of the data.
All the values are represented as numpy array.
"""
return {
"matrix": self.matrix.to_numpy(),
"objectives": self.iobjectives.to_numpy(),
"weights": self.weights.to_numpy(),
"dtypes": self.dtypes.to_numpy(),
"alternatives": np.asarray(self.alternatives),
"criteria": np.asarray(self.criteria),
}
@deprecated(
reason=(
"Use ``DecisionMatrix.stats()``, "
"``DecisionMatrix.stats('describe)`` or "
"``DecisionMatrix.stats.describe()`` instead."
),
version=0.6,
)
def describe(self, **kwargs):
"""Generate descriptive statistics.
Descriptive statistics include those that summarize the central
tendency, dispersion and shape of a dataset's distribution,
excluding ``NaN`` values.
Parameters
----------
Same parameters as ``pandas.DataFrame.describe()``.
Returns
-------
``pandas.DataFrame``
Summary statistics of DecisionMatrix provided.
"""
return self._data_df.describe(**kwargs)
# CMP =====================================================================
@property
def shape(self):
"""Return a tuple with (number_of_alternatives, number_of_criteria).
dm.shape <==> np.shape(dm)
"""
return np.shape(self._data_df)
def __len__(self):
"""Return the number ot alternatives.
dm.__len__() <==> len(dm).
"""
return len(self._data_df)
def equals(self, other):
"""Return True if the decision matrix are equal.
This method calls `DecisionMatrix.aquals` without tolerance.
Parameters
----------
other : :py:class:`skcriteria.DecisionMatrix`
Other instance to compare.
Returns
-------
equals : :py:class:`bool:py:class:`
Returns True if the two dm are equals.
See Also
--------
aequals, :py:func:`numpy.isclose`, :py:func:`numpy.all`,
:py:func:`numpy.any`, :py:func:`numpy.equal`,
:py:func:`numpy.allclose`.
"""
return self.aequals(other, 0, 0, False)
def aequals(self, other, rtol=1e-05, atol=1e-08, equal_nan=False):
"""Return True if the decision matrix are equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
NaNs are treated as equal if they are in the same place and if
``equal_nan=True``. Infs are treated as equal if they are in the same
place and of the same sign in both arrays.
The proceeds as follows:
- If ``other`` is the same object return ``True``.
- If ``other`` is not instance of 'DecisionMatrix', has different shape
'criteria', 'alternatives' or 'objectives' returns ``False``.
- Next check the 'weights' and the matrix itself using the provided
tolerance.
Parameters
----------
other : :py:class:`skcriteria.DecisionMatrix`
Other instance to compare.
rtol : float
The relative tolerance parameter
(see Notes in :py:func:`numpy.allclose`).
atol : float
The absolute tolerance parameter
(see Notes in :py:func:`numpy.allclose`).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in dm will be
considered equal to NaN's in `other` in the output array.
Returns
-------
aequals : :py:class:`bool:py:class:`
Returns True if the two dm are equal within the given
tolerance; False otherwise.
See Also
--------
equals, :py:func:`numpy.isclose`, :py:func:`numpy.all`,
:py:func:`numpy.any`, :py:func:`numpy.equal`,
:py:func:`numpy.allclose`.
"""
return (self is other) or (
isinstance(other, DecisionMatrix)
and np.shape(self) == np.shape(other)
and np.array_equal(self.criteria, other.criteria)
and np.array_equal(self.alternatives, other.alternatives)
and np.array_equal(self.objectives, other.objectives)
and np.allclose(
self.weights,
other.weights,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
)
and np.allclose(
self.matrix,
other.matrix,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
)
)
# SLICES ==================================================================
def __getitem__(self, slc):
"""dm[slc] <==> dm.__getitem__(slc)."""
df = self._data_df.__getitem__(slc)
if isinstance(df, pd.Series):
df = df.to_frame()
dtypes = self._data_df.dtypes
dtypes = dtypes[dtypes.index.isin(df.columns)]
df = df.astype(dtypes)
objectives = self.objectives
objectives = objectives[objectives.index.isin(df.columns)].to_numpy()
weights = self.weights
weights = weights[weights.index.isin(df.columns)].to_numpy()
return DecisionMatrix(df, objectives, weights)
@property
def loc(self):
"""Access a group of alternatives and criteria by label(s) or a \
boolean array.
``.loc[]`` is primarily alternative label based, but may also be used
with a boolean array.
Unlike DataFrames, `ìloc`` of ``DecisionMatrix`` always returns an
instance of ``DecisionMatrix``.
"""
return _Loc("loc", self._data_df.loc, self.objectives, self.weights)
@property
def iloc(self):
"""Purely integer-location based indexing for selection by position.
``.iloc[]`` is primarily integer position based (from ``0`` to
``length-1`` of the axis), but may also be used with a boolean
array.
Unlike DataFrames, `ìloc`` of ``DecisionMatrix`` always returns an
instance of ``DecisionMatrix``.
"""
return _Loc("iloc", self._data_df.iloc, self.objectives, self.weights)
# REPR ====================================================================
def _get_cow_headers(
self, only=None, fmt="{criteria}[{objective}{weight}]"
):
"""Columns names with COW (Criteria, Objective, Weight)."""
criteria = self._data_df.columns.to_series()
objectives = self.objectives
weights = self.weights
if only:
mask = self._data_df.columns.isin(only)
criteria = criteria[mask][only]
objectives = objectives[mask][only]
weights = weights[mask][only]
weights = pd_fmt.format_array(weights, None)
headers = []
for crit, obj, weight in zip(criteria, objectives, weights):
header = fmt.format(
criteria=crit, objective=obj.to_symbol(), weight=weight
)
headers.append(header)
return np.array(headers)
def _get_axc_dimensions(self):
"""Dimension foote with AxC (Alternativs x Criteria)."""
a_number, c_number = self.shape
dimensions = f"{a_number} Alternatives x {c_number} Criteria"
return dimensions
def __repr__(self):
"""dm.__repr__() <==> repr(dm)."""
header = self._get_cow_headers()
dimensions = self._get_axc_dimensions()
with df_temporal_header(self._data_df, header) as df:
with pd.option_context("display.show_dimensions", False):
original_string = repr(df)
# add dimension
string = f"{original_string}\n[{dimensions}]"
return string
def _repr_html_(self):
"""Return a html representation for a particular DecisionMatrix.
Mainly for IPython notebook.
"""
header = self._get_cow_headers()
dimensions = self._get_axc_dimensions()
# retrieve the original string
with df_temporal_header(self._data_df, header) as df:
with pd.option_context("display.show_dimensions", False):
original_html = df._repr_html_()
# add dimension
html = (
"<div class='decisionmatrix'>\n"
f"{original_html}"
f"<em class='decisionmatrix-dim'>{dimensions}</em>\n"
"</div>"
)
return html
# =============================================================================
# factory
# =============================================================================
@functools.wraps(DecisionMatrix.from_mcda_data)
def mkdm(*args, **kwargs):
"""Alias for DecisionMatrix.from_mcda_data."""
return DecisionMatrix.from_mcda_data(*args, **kwargs) | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/core/data.py | 0.916236 | 0.663437 | data.py | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""Stats helper for the DecisionMatrix object."""
# =============================================================================
# IMPORTS
# =============================================================================k
from ..utils import AccessorABC
# =============================================================================
# STATS ACCESSOR
# =============================================================================
class DecisionMatrixStatsAccessor(AccessorABC):
"""Calculate basic statistics of the decision matrix.
Kind of statistic to produce:
- 'corr' : Compute pairwise correlation of columns, excluding
NA/null values.
- 'cov' : Compute pairwise covariance of columns, excluding NA/null
values.
- 'describe' : Generate descriptive statistics.
- 'kurtosis' : Return unbiased kurtosis over requested axis.
- 'mad' : Return the mean absolute deviation of the values over the
requested axis.
- 'max' : Return the maximum of the values over the requested axis.
- 'mean' : Return the mean of the values over the requested axis.
- 'median' : Return the median of the values over the requested
axis.
- 'min' : Return the minimum of the values over the requested axis.
- 'pct_change' : Percentage change between the current and a prior
element.
- 'quantile' : Return values at the given quantile over requested
axis.
- 'sem' : Return unbiased standard error of the mean over requested
axis.
- 'skew' : Return unbiased skew over requested axis.
- 'std' : Return sample standard deviation over requested axis.
- 'var' : Return unbiased variance over requested axis.
"""
# The list of methods that can be accessed of the subjacent dataframe.
_DF_WHITELIST = (
"corr",
"cov",
"describe",
"kurtosis",
"max",
"mean",
"median",
"min",
"pct_change",
"quantile",
"sem",
"skew",
"std",
"var",
)
_default_kind = "describe"
def __init__(self, dm):
self._dm = dm
def __getattr__(self, a):
"""x.__getattr__(a) <==> x.a <==> getattr(x, "a")."""
if a not in self._DF_WHITELIST:
raise AttributeError(a)
return getattr(self._dm._data_df, a)
def __dir__(self):
"""x.__dir__() <==> dir(x)."""
return super().__dir__() + [
e for e in dir(self._dm._data_df) if e in self._DF_WHITELIST
]
def mad(self, axis=0, skipna=True):
"""Return the mean absolute deviation of the values over a given axis.
Parameters
----------
axis : int
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
"""
df = self._dm._data_df
return (df - df.mean(axis=axis)).abs().mean(axis=axis, skipna=skipna) | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/core/stats.py | 0.922343 | 0.635109 | stats.py | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""Definition of the objectives (MIN, MAX) for the criteria."""
# =============================================================================
# IMPORTS
# =============================================================================
import enum
import numpy as np
from ..utils import deprecated
# =============================================================================
# CONSTANTS
# =============================================================================
class Objective(enum.Enum):
"""Representation of criteria objectives (Minimize, Maximize)."""
#: Internal representation of minimize criteria
MIN = -1
#: Internal representation of maximize criteria
MAX = 1
# INTERNALS ===============================================================
_MIN_STR = "\u25bc" # ▼
_MAX_STR = "\u25b2" # ▲
#: Another way to name the maximization criteria.
_MAX_ALIASES = frozenset(
[
MAX,
_MAX_STR,
max,
np.max,
np.nanmax,
np.amax,
"max",
"maximize",
"+",
">",
]
)
#: Another ways to name the minimization criteria.
_MIN_ALIASES = frozenset(
[
MIN,
_MIN_STR,
min,
np.min,
np.nanmin,
np.amin,
"min",
"minimize",
"-",
"<",
]
)
# CUSTOM CONSTRUCTOR ======================================================
@classmethod
def from_alias(cls, alias):
"""Return a n objective instase based on some given alias."""
if isinstance(alias, cls):
return alias
if isinstance(alias, str):
alias = alias.lower()
if alias in cls._MAX_ALIASES.value:
return cls.MAX
if alias in cls._MIN_ALIASES.value:
return cls.MIN
raise ValueError(f"Invalid criteria objective {alias}")
# METHODS =================================================================
def __str__(self):
"""Convert the objective to an string."""
return self.name
def to_symbol(self):
"""Return the printable symbol representation of the objective."""
if self.value in Objective._MIN_ALIASES.value:
return Objective._MIN_STR.value
if self.value in Objective._MAX_ALIASES.value:
return Objective._MAX_STR.value
# DEPRECATED ==============================================================
@classmethod
@deprecated(reason="Use ``Objective.from_alias()`` instead.", version=0.8)
def construct_from_alias(cls, alias):
"""Return an objective instance based on some given alias."""
return cls.from_alias(alias)
@deprecated(reason="Use ``MAX/MIN.to_symbol()`` instead.", version=0.8)
def to_string(self):
"""Return the printable representation of the objective."""
return self.to_symbol() | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/core/objectives.py | 0.826011 | 0.292254 | objectives.py | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""Core functionalities of scikit-criteria."""
# =============================================================================
# IMPORTS
# =============================================================================ç
import abc
import copy
import inspect
# =============================================================================
# BASE DECISION MAKER CLASS
# =============================================================================
class SKCMethodABC(metaclass=abc.ABCMeta):
"""Base class for all class in scikit-criteria.
Notes
-----
All estimators should specify:
- ``_skcriteria_dm_type``: The type of the decision maker.
- ``_skcriteria_parameters``: Availebe parameters.
- ``_skcriteria_abstract_class``: If the class is abstract.
If the class is *abstract* the user can ignore the other two attributes.
"""
_skcriteria_abstract_class = True
def __init_subclass__(cls):
"""Validate if the subclass are well formed."""
is_abstract = vars(cls).get("_skcriteria_abstract_class", False)
if is_abstract:
return
decisor_type = getattr(cls, "_skcriteria_dm_type", None)
if decisor_type is None:
raise TypeError(f"{cls} must redefine '_skcriteria_dm_type'")
cls._skcriteria_dm_type = str(decisor_type)
params = getattr(cls, "_skcriteria_parameters", None)
if params is None:
raise TypeError(f"{cls} must redefine '_skcriteria_parameters'")
params = frozenset(params)
signature = inspect.signature(cls.__init__)
has_kwargs = any(
p.kind == inspect.Parameter.VAR_KEYWORD
for p in signature.parameters.values()
)
params_not_in_signature = params.difference(signature.parameters)
if params_not_in_signature and not has_kwargs:
raise TypeError(
f"{cls} defines the parameters {params_not_in_signature} "
"which is not found as a parameter in the __init__ method."
)
cls._skcriteria_parameters = params
def __repr__(self):
"""x.__repr__() <==> repr(x)."""
cls_name = type(self).__name__
parameters = []
if self._skcriteria_parameters:
for pname in sorted(self._skcriteria_parameters):
pvalue = getattr(self, pname)
parameters.append(f"{pname}={repr(pvalue)}")
str_parameters = ", ".join(parameters)
return f"<{cls_name} [{str_parameters}]>"
def get_parameters(self):
"""Return the parameters of the method as dictionary."""
the_parameters = {}
for parameter_name in self._skcriteria_parameters:
parameter_value = getattr(self, parameter_name)
the_parameters[parameter_name] = copy.deepcopy(parameter_value)
return the_parameters
def copy(self, **kwargs):
"""Return a deep copy of the current Object..
This method is also useful for manually modifying the values of the
object.
Parameters
----------
kwargs :
The same parameters supported by object constructor. The values
provided replace the existing ones in the object to be copied.
Returns
-------
A new object.
"""
asdict = self.get_parameters()
asdict.update(kwargs)
cls = type(self)
return cls(**asdict) | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/core/methods.py | 0.871434 | 0.223971 | methods.py | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""Dominance helper for the DecisionMatrix object."""
# =============================================================================
# IMPORTS
# =============================================================================
import functools
import itertools as it
from collections import OrderedDict
import numpy as np
import pandas as pd
from ..utils import AccessorABC, rank
# =============================================================================
# DOMINANCE ACCESSOR
# =============================================================================
class DecisionMatrixDominanceAccessor(AccessorABC):
"""Calculate basic statistics of the decision matrix."""
_default_kind = "dominance"
def __init__(self, dm):
self._dm = dm
@property
@functools.lru_cache(maxsize=None)
def _dominance_cache(self):
"""Cache of dominance.
Compute the dominance is an O(n_C_2) algorithm, so lets use a cache.
"""
dm = self._dm
reverse = dm.minwhere
dominance_cache, alts_numpy = {}, {}
for a0, a1 in it.combinations(dm.alternatives, 2):
for aname in (a0, a1):
if aname not in alts_numpy:
alts_numpy[aname] = dm.alternatives[aname]
dominance_cache[(a0, a1)] = rank.dominance(
alts_numpy[a0], alts_numpy[a1], reverse=reverse
)
return dominance_cache
def _cache_read(self, a0, a1):
"""Return the entry of the cache.
The input returned is the one that relates the alternatives a0 and a1.
Since the cache can store the entry with the key (a0, a1) or (a1, a0),
a second value is returned that is True if it was necessary to invert
the alternatives.
"""
key = a0, a1
cache = self._dominance_cache
entry, key_reverted = (
(cache[key], False) if key in cache else (cache[key[::-1]], True)
)
return entry, key_reverted
# FRAME ALT VS ALT ========================================================
def _create_frame(self, compute_cell, iname, cname):
"""Create a data frame comparing two alternatives.
The value of each cell is calculated with the "compute_cell"
function.
"""
alternatives = self._dm.alternatives
rows = []
for a0 in alternatives:
row = OrderedDict()
for a1 in alternatives:
row[a1] = compute_cell(a0, a1)
rows.append(row)
df = pd.DataFrame(rows, index=alternatives)
df.index.name = iname
df.columns.name = cname
return df
def bt(self):
"""Compare on how many criteria one alternative is better than another.
*bt* = better-than.
Returns
-------
pandas.DataFrame:
Where the value of each cell identifies on how many criteria the
row alternative is better than the column alternative.
"""
def compute_cell(a0, a1):
if a0 == a1:
return 0
centry, ckreverted = self._cache_read(a0, a1)
return centry.aDb if not ckreverted else centry.bDa
return self._create_frame(
compute_cell, iname="Better than", cname="Worse than"
)
def eq(self):
"""Compare on how many criteria two alternatives are equal.
Returns
-------
pandas.DataFrame:
Where the value of each cell identifies how many criteria the row
and column alternatives are equal.
"""
criteria_len = len(self._dm.criteria)
def compute_cell(a0, a1):
if a0 == a1:
return criteria_len
centry, _ = self._cache_read(a0, a1)
return centry.eq
return self._create_frame(
compute_cell, iname="Equals to", cname="Equals to"
)
def dominance(self, *, strict=False):
"""Compare if one alternative dominates or strictly dominates another \
alternative.
In order to evaluate the dominance of an alternative *a0* over an
alternative *a1*, the algorithm evaluates that *a0* is better in at
least one criterion and that *a1* is not better in any criterion than
*a0*. In the case that ``strict = True`` it also evaluates that there
are no equal criteria.
Parameters
----------
strict: bool, default ``False``
If True, strict dominance is evaluated.
Returns
-------
pandas.DataFrame:
Where the value of each cell is True if the row alternative
dominates the column alternative.
"""
def compute_cell(a0, a1):
if a0 == a1:
return False
centry, ckreverted = self._cache_read(a0, a1)
performance_a0, performance_a1 = (
(centry.aDb, centry.bDa)
if not ckreverted
else (centry.bDa, centry.aDb)
)
if strict and centry.eq:
return False
return performance_a0 > 0 and performance_a1 == 0
iname, cname = (
("Strict dominators", "Strictly dominated")
if strict
else ("Dominators", "Dominated")
)
dom = self._create_frame(compute_cell, iname=iname, cname=cname)
return dom
# COMPARISONS =============================================================
def compare(self, a0, a1):
"""Compare two alternatives.
It creates a summary data frame containing the comparison of the two
alternatives on a per-criteria basis, indicating which of the two is
the best value, or if they are equal. In addition, it presents a
"Performance" column with the count for each case.
Parameters
----------
a0, a1: str
Names of the alternatives to compare.
Returns
-------
pandas.DataFrame:
Comparison of the two alternatives by criteria.
"""
# read the cache and extract the values
centry, ckreverted = self._cache_read(a0, a1)
performance_a0, performance_a1 = (
(centry.aDb, centry.bDa)
if not ckreverted
else (centry.bDa, centry.aDb)
)
where_aDb, where_bDa = (
(centry.aDb_where, centry.bDa_where)
if not ckreverted
else (centry.bDa_where, centry.aDb_where)
)
eq, eq_where = centry.eq, centry.eq_where
criteria = self._dm.criteria
alt_index = pd.MultiIndex.from_tuples(
[
("Alternatives", a0),
("Alternatives", a1),
("Equals", ""),
]
)
crit_index = pd.MultiIndex.from_product([["Criteria"], criteria])
df = pd.DataFrame(
[
pd.Series(where_aDb, name=alt_index[0], index=crit_index),
pd.Series(where_bDa, name=alt_index[1], index=crit_index),
pd.Series(eq_where, name=alt_index[2], index=crit_index),
]
)
df = df.assign(
Performance=[performance_a0, performance_a1, eq],
)
return df
# The dominated ===========================================================
def dominated(self, *, strict=False):
"""Which alternative is dominated or strictly dominated by at least \
one other alternative.
Parameters
----------
strict: bool, default ``False``
If True, strict dominance is evaluated.
Returns
-------
pandas.Series:
Where the index indicates the name of the alternative, and if the
value is is True, it indicates that this alternative is dominated
by at least one other alternative.
"""
dom = self.dominance(strict=strict).any()
dom.name = dom.index.name
dom.index.name = "Alternatives"
return dom
@functools.lru_cache(maxsize=None)
def dominators_of(self, a, *, strict=False):
"""Array of alternatives that dominate or strictly-dominate the \
alternative provided by parameters.
Parameters
----------
a : str
On what alternative to look for the dominators.
strict: bool, default ``False``
If True, strict dominance is evaluated.
Returns
-------
numpy.ndarray:
List of alternatives that dominate ``a``.
"""
dominance_a = self.dominance(strict=strict)[a]
if ~dominance_a.any():
return np.array([], dtype=str)
dominators = dominance_a.index[dominance_a]
for dominator in dominators:
dominators_dominators = self.dominators_of(
dominator, strict=strict
)
dominators = np.concatenate((dominators, dominators_dominators))
return dominators
def has_loops(self, *, strict=False):
"""Retorna True si la matriz contiene loops de dominacia.
A loop is defined as if there are alternatives `a0`, `a1` and 'a2' such
that "a0 ≻ a1 ≻ a2 ≻ a0" if ``strict=True``, or "a0 ≽ a1 ≽ a2 ≽ a0"
if ``strict=False``
Parameters
----------
strict: bool, default ``False``
If True, strict dominance is evaluated.
Returns
-------
bool:
If True a loop exists.
Notes
-----
If the result of this method is True, the ``dominators_of()`` method
raises a ``RecursionError`` for at least one alternative.
"""
# lets put the dominated alternatives last so our while loop will
# be shorter by extracting from the tail
alternatives = list(self.dominated(strict=strict).sort_values().index)
try:
while alternatives:
# dame la ultima alternativa (al final quedan las dominadas)
alt = alternatives.pop()
# ahora dame todas las alternatives las cuales dominan
dominators = self.dominators_of(alt, strict=strict)
# las alternativas dominadoras ya pasaron por "dominated_by"
# por lo cual hay que sacarlas a todas de alternatives
alternatives = [a for a in alternatives if a not in dominators]
except RecursionError:
return True
return False | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/core/dominance.py | 0.913551 | 0.42931 | dominance.py | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""The :mod:`skcriteria.datasets` module includes utilities to load \
datasets."""
# =============================================================================
# IMPORRTS
# =============================================================================
import json
import os
import pathlib
from skcriteria.core.data import mkdm
from .. import core
# =============================================================================
# CONSTANTS
# =============================================================================
_PATH = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
# =============================================================================
# FUNCTIONS
# =============================================================================
def load_simple_stock_selection():
"""Simple stock selection decision matrix.
This matrix was designed primarily for teaching and evaluating the behavior
of an experiment.
Among the data we can find: two maximization criteria (ROE, CAP),
one minimization criterion (RI), dominated alternatives (FX), and
one alternative with an outlier criterion (ROE, MM = 1).
Although the criteria and alternatives are original from the authors of
Scikit-Criteria, the numerical values were extracted at some point from a
somewhere which we have forgotten.
Description:
In order to decide to buy a series of stocks, a company studied 5 candidate
investments: PE, JN, AA, FX, MM and GN. The finance department decides to
consider the following criteria for selection:
1. ROE (Max): Return % for each monetary unit invested.
2. CAP (Max): Years of market capitalization.
3. RI (Min): Risk of the stock.
"""
dm = core.mkdm(
matrix=[
[7, 5, 35],
[5, 4, 26],
[5, 6, 28],
[3, 4, 36],
[1, 7, 30],
[5, 8, 30],
],
objectives=[max, max, min],
weights=[2, 4, 1],
alternatives=["PE", "JN", "AA", "FX", "MM", "GN"],
criteria=["ROE", "CAP", "RI"],
)
return dm
def load_van2021evaluation(windows_size=7):
r"""Dataset extracted from from historical time series cryptocurrencies.
This dataset is extracted from::
Van Heerden, N., Cabral, J. y Luczywo, N. (2021). Evaluación de la
importancia de criterios para la selección de criptomonedas.
XXXIV ENDIO - XXXII EPIO Virtual 2021, Argentina.
The nine available alternatives are based on the ranking of the 20
cryptocurrencies with the largest market capitalization calculated on the
basis of circulating supply, according to information retrieved from
Cryptocurrency Historical Prices" retrieved on July 21st, 2021, from
there only the coins with complete data between October 9th, 2018 to July
6th of 2021, excluding stable-coins, since they maintain a stable price and
therefore do not carry associated yields; the alternatives that met these
requirements turned out to be: Cardano (ADA), Binance coin (BNB),
Bitcoin (BTC), Dogecoin (DOGE), Ethereum (ETH), Chainlink (LINK),
Litecoin (LTC), Stellar (XLM) and Ripple (XRP).
Two decision matrices were created for two sizes of overlapping moving
windows: 7 and 15 days. Six criteria were defined on these windows that
seek to represent returns and risks:
- ``xRv`` - average Window return (:math:`\bar{x}RV`) - Maximize: is the
average of the differences between the closing price of the
cryptocurrency on the last day and the first day of each window, divided
by the price on the first day.
- ``sRV`` - window return deviation (:math:`sRV`) - Minimize: is the
standard deviation of window return. The greater the deviation, the
returns within the windows have higher variance and are unstable.
- ``xVV`` - average of the volume of the window (:math:`\bar{x}VV`) -
Maximize: it is the average of the summations of the transaction amount
of the cryptocurrency in dollars in each window, representing a liquidity
measure of the asset.
- ``sVV`` - window volume deviation (:math:`sVV`) - Minimize: it is the
deviation of the window volumes. The greater the deviation, the volumes
within the windows have higher variance and are unstable.
- ``xR2`` - mean of the correlation coefficient (:math:`\bar{x}R^2`) -
Maximize: it is the mean of the :math:`R^2` of the fit of the linear
trends with respect to the data. It is a measure that defines how well it
explains that linear trend to the data within the window.
- ``xm`` - mean of the slope (:math:`\bar{x}m`) - Maximize: it is the mean
of the slope of the linear trend between the closing prices in dollars
and the volumes traded in dollars of the cryptocurrency within each
window.
Parameters
----------
windows_size: 7 o 15, default 7
If the decision matrix based on 7 or 15 day overlapping moving windows
is desired.
References
----------
:cite:p:`van2021evaluation`
:cite:p:`van2021epio_evaluation`
:cite:p:`rajkumar_2021`
"""
paths = {
7: _PATH / "van2021evaluation" / "windows_size_7.json",
15: _PATH / "van2021evaluation" / "windows_size_15.json",
}
path = paths.get(windows_size)
if path is None:
raise ValueError(
f"Windows size must be '7' or '15'. Found {windows_size!r}"
)
with open(path) as fp:
data = json.load(fp)
return mkdm(**data) | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/datasets/__init__.py | 0.833325 | 0.6911 | __init__.py | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""Container object exposing keys as attributes."""
# =============================================================================
# IMPORTS
# =============================================================================
import copy
from collections.abc import Mapping
# =============================================================================
# DOC INHERITANCE
# =============================================================================
class Bunch(Mapping):
"""Container object exposing keys as attributes.
Concept based on the sklearn.utils.Bunch.
Bunch objects are sometimes used as an output for functions and methods.
They extend dictionaries by enabling values to be accessed by key,
`bunch["value_key"]`, or by an attribute, `bunch.value_key`.
Examples
--------
>>> b = SKCBunch("data", {"a": 1, "b": 2})
>>> b
data({a, b})
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, name, data):
self._name = str(name)
self._data = data
def __getitem__(self, k):
"""x.__getitem__(y) <==> x[y]."""
return self._data[k]
def __getattr__(self, a):
"""x.__getattr__(y) <==> x.y."""
try:
return self._data[a]
except KeyError:
raise AttributeError(a)
def __copy__(self):
"""x.__copy__() <==> copy.copy(x)."""
cls = type(self)
return cls(str(self._name), data=self._data)
def __deepcopy__(self, memo):
"""x.__deepcopy__() <==> copy.copy(x)."""
# extract the class
cls = type(self)
# make the copy but without the data
clone = cls(name=str(self._name), data=None)
# store in the memo that clone is copy of self
# https://docs.python.org/3/library/copy.html
memo[id(self)] = clone
# now we copy the data
clone._data = copy.deepcopy(self._data, memo)
return clone
def __iter__(self):
"""x.__iter__() <==> iter(x)."""
return iter(self._data)
def __len__(self):
"""x.__len__() <==> len(x)."""
return len(self._data)
def __repr__(self):
"""x.__repr__() <==> repr(x)."""
content = repr(set(self._data)) if self._data else "{}"
return f"<{self._name} {content}>"
def __dir__(self):
"""x.__dir__() <==> dir(x)."""
return super().__dir__() + list(self._data) | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/utils/bunch.py | 0.818918 | 0.362066 | bunch.py | pypi |
# =============================================================================
# DOCS
# =============================================================================
"""Functions for calculate and compare ranks (ordinal series)."""
# =============================================================================
# IMPORTS
# =============================================================================
from collections import namedtuple
import numpy as np
from scipy import stats
# =============================================================================
# RANKER
# =============================================================================
def rank_values(arr, reverse=False):
"""Evaluate an array and return a 1 based ranking.
Parameters
----------
arr : (:py:class:`numpy.ndarray`, :py:class:`numpy.ndarray`)
A array with values
reverse : :py:class:`bool` default *False*
By default (*False*) the lesser values are ranked first (like in time
lapse in a race or Golf scoring) if is *True* the data is highest
values are the first.
Returns
-------
:py:class:`numpy.ndarray`
Array of rankings the i-nth element has the ranking of the i-nth
element of the row array.
Examples
--------
.. code-block:: pycon
>>> from skcriteria.util.rank import rank_values
>>> # the fastest (the lowest value) goes first
>>> time_laps = [0.59, 1.2, 0.3]
>>> rank_values(time_laps)
array([2, 3, 1])
>>> # highest is better
>>> scores = [140, 200, 98]
>>> rank_values(scores, reverse=True)
array([2, 1, 3])
"""
if reverse:
arr = np.multiply(arr, -1)
return stats.rankdata(arr, "dense").astype(int)
# =============================================================================
# DOMINANCE
# =============================================================================
_Dominance = namedtuple(
"dominance",
["eq", "aDb", "bDa", "eq_where", "aDb_where", "bDa_where"],
)
def dominance(array_a, array_b, reverse=False):
"""Calculate the dominance or general dominance between two arrays.
Parameters
----------
array_a:
The first array to compare.
array_b:
The second array to compare.
reverse: bool (default=False)
array_a[i] ≻ array_b[i] if array_a[i] > array_b[i] if reverse
is False, otherwise array_a[i] ≻ array_b[i] if array_a[i] < array_b[i].
Also revese can be an array of boolean of the same shape as
array_a and array_b to revert every item independently.
In other words, reverse assume the data is a minimization problem.
Returns
-------
dominance: _Dominance
Named tuple with 4 parameters:
- eq: How many values are equals in both arrays.
- aDb: How many values of array_a dominate those of the same
position in array_b.
- bDa: How many values of array_b dominate those of the same
position in array_a.
- eq_where: Where the values of array_a are equals those of the same
position in array_b.
- aDb_where: Where the values of array_a dominates those of the same
position in array_b.
- bDa_where: Where the values of array_b dominates those of the same
position in array_a.
"""
if np.shape(array_a) != np.shape(array_b):
raise ValueError("array_a and array_b must be of the same shape")
if isinstance(reverse, bool):
reverse = np.full(np.shape(array_a), reverse)
elif np.shape(array_a) != np.shape(reverse):
raise ValueError(
"reverse must be a bool or an iterable of the same "
"shape than the arrays"
)
array_a = np.asarray(array_a)
array_b = np.asarray(array_b)
eq_where = array_a == array_b
aDb_where = np.where(
reverse,
array_a < array_b,
array_a > array_b,
)
bDa_where = ~(aDb_where | eq_where) # a not dominates b and a != b
return _Dominance(
# resume
eq=np.sum(eq_where),
aDb=np.sum(aDb_where),
bDa=np.sum(bDa_where),
# locations
eq_where=eq_where,
aDb_where=aDb_where,
bDa_where=bDa_where,
) | /scikit-criteria-0.8.3.tar.gz/scikit-criteria-0.8.3/skcriteria/utils/rank.py | 0.926279 | 0.778607 | rank.py | pypi |
from __future__ import print_function
from string import Template
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
from pycuda.compiler import SourceModule
import numpy as np
import skcuda.misc as misc
A = 3
B = 4
C = 5
N = A * B * C
# Define a 3D array:
# x_orig = np.arange(0, N, 1, np.float64)
x_orig = np.asarray(np.random.rand(N), np.float64)
x = x_orig.reshape((A, B, C))
# These functions demonstrate how to convert a linear index into subscripts:
a = lambda i: i / (B * C)
b = lambda i: np.mod(i, B * C) / C
c = lambda i: np.mod(np.mod(i, B * C), C)
# Check that x[ind(i)] is equivalent to x.flat[i]:
subscript = lambda i: (a(i), b(i), c(i))
for i in range(x.size):
assert x.flat[i] == x[subscript(i)]
# Check that x[i,j,k] is equivalent to x.flat[index(i,j,k)]:
index = lambda i, j, k: i * B * C + j * C + k
for i in range(A):
for j in range(B):
for k in range(C):
assert x[i, j, k] == x.flat[index(i, j, k)]
func_mod_template = Template("""
// Macro for converting subscripts to linear index:
#define INDEX(a, b, c) a*${B}*${C}+b*${C}+c
__global__ void func(double *x, unsigned int N) {
// Obtain the linear index corresponding to the current thread:
unsigned int idx = blockIdx.y*${max_threads_per_block}*${max_blocks_per_grid}+
blockIdx.x*${max_threads_per_block}+threadIdx.x;
// Convert the linear index to subscripts:
unsigned int a = idx/(${B}*${C});
unsigned int b = (idx%(${B}*${C}))/${C};
unsigned int c = (idx%(${B}*${C}))%${C};
// Use the subscripts to access the array:
if (idx < N) {
if (b == 0)
x[INDEX(a,b,c)] = 100;
}
}
""")
max_threads_per_block, max_block_dim, max_grid_dim = misc.get_dev_attrs(pycuda.autoinit.device)
block_dim, grid_dim = misc.select_block_grid_sizes(pycuda.autoinit.device, x.shape)
max_blocks_per_grid = max(max_grid_dim)
func_mod = \
SourceModule(func_mod_template.substitute(max_threads_per_block=max_threads_per_block,
max_blocks_per_grid=max_blocks_per_grid,
A=A, B=B, C=C))
func = func_mod.get_function('func')
x_gpu = gpuarray.to_gpu(x)
func(x_gpu.gpudata, np.uint32(x_gpu.size),
block=block_dim,
grid=grid_dim)
x_np = x.copy()
x_np[:, 0, :] = 100
print('Success status: ', np.allclose(x_np, x_gpu.get())) | /scikit-cuda-0.5.3.tar.gz/scikit-cuda-0.5.3/demos/indexing_3d_demo.py | 0.579519 | 0.567277 | indexing_3d_demo.py | pypi |
from __future__ import print_function
from string import Template
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
from pycuda.compiler import SourceModule
import numpy as np
import skcuda.misc as misc
A = 3
B = 4
N = A * B
# Define a 2D array:
# x_orig = np.arange(0, N, 1, np.float64)
x_orig = np.asarray(np.random.rand(N), np.float64)
x = x_orig.reshape((A, B))
# These functions demonstrate how to convert a linear index into subscripts:
a = lambda i: i / B
b = lambda i: np.mod(i, B)
# Check that x[subscript(i)] is equivalent to x.flat[i]:
subscript = lambda i: (a(i), b(i))
for i in range(x.size):
assert x.flat[i] == x[subscript(i)]
# Check that x[i, j] is equivalent to x.flat[index(i, j)]:
index = lambda i, j: i * B + j
for i in range(A):
for j in range(B):
assert x[i, j] == x.flat[index(i, j)]
func_mod_template = Template("""
// Macro for converting subscripts to linear index:
#define INDEX(a, b) a*${B}+b
__global__ void func(double *x, unsigned int N) {
// Obtain the linear index corresponding to the current thread:
unsigned int idx = blockIdx.y*${max_threads_per_block}*${max_blocks_per_grid}+
blockIdx.x*${max_threads_per_block}+threadIdx.x;
// Convert the linear index to subscripts:
unsigned int a = idx/${B};
unsigned int b = idx%${B};
// Use the subscripts to access the array:
if (idx < N) {
if (b == 0)
x[INDEX(a,b)] = 100;
}
}
""")
max_threads_per_block, max_block_dim, max_grid_dim = misc.get_dev_attrs(pycuda.autoinit.device)
block_dim, grid_dim = misc.select_block_grid_sizes(pycuda.autoinit.device, x.shape)
max_blocks_per_grid = max(max_grid_dim)
func_mod = \
SourceModule(func_mod_template.substitute(max_threads_per_block=max_threads_per_block,
max_blocks_per_grid=max_blocks_per_grid,
A=A, B=B))
func = func_mod.get_function('func')
x_gpu = gpuarray.to_gpu(x)
func(x_gpu.gpudata, np.uint32(x_gpu.size),
block=block_dim,
grid=grid_dim)
x_np = x.copy()
x_np[:, 0] = 100
print('Success status: %r' % np.allclose(x_np, x_gpu.get())) | /scikit-cuda-0.5.3.tar.gz/scikit-cuda-0.5.3/demos/indexing_2d_demo.py | 0.566258 | 0.570032 | indexing_2d_demo.py | pypi |
from __future__ import print_function
from string import Template
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
from pycuda.compiler import SourceModule
import numpy as np
import skcuda.misc as misc
A = 3
B = 4
C = 5
D = 6
N = A * B * C * D
# Define a 3D array:
# x_orig = np.arange(0, N, 1, np.float64)
x_orig = np.asarray(np.random.rand(N), np.float64)
x = x_orig.reshape((A, B, C, D))
# These functions demonstrate how to convert a linear index into subscripts:
a = lambda i: i / (B * C * D)
b = lambda i: np.mod(i, B * C * D) / (C * D)
c = lambda i: np.mod(np.mod(i, B * C * D), C * D) / D
d = lambda i: np.mod(np.mod(np.mod(i, B * C * D), C * D), D)
# Check that x[subscript(i)] is equivalent to x.flat[i]:
subscript = lambda i: (a(i), b(i), c(i), d(i))
for i in range(x.size):
assert x.flat[i] == x[subscript(i)]
# Check that x[i,j,k,l] is equivalent to x.flat[index(i,j,k,l)]:
index = lambda i, j, k, l: i * B * C * D + j * C * D + k * D + l
for i in range(A):
for j in range(B):
for k in range(C):
for l in range(D):
assert x[i, j, k, l] == x.flat[index(i, j, k, l)]
func_mod_template = Template("""
// Macro for converting subscripts to linear index:
#define INDEX(a, b, c, d) a*${B}*${C}*${D}+b*${C}*${D}+c*${D}+d
__global__ void func(double *x, unsigned int N) {
// Obtain the linear index corresponding to the current thread:
unsigned int idx = blockIdx.y*${max_threads_per_block}*${max_blocks_per_grid}+
blockIdx.x*${max_threads_per_block}+threadIdx.x;
// Convert the linear index to subscripts:
unsigned int a = idx/(${B}*${C}*${D});
unsigned int b = (idx%(${B}*${C}*${D}))/(${C}*${D});
unsigned int c = ((idx%(${B}*${C}*${D}))%(${C}*${D}))/${D};
unsigned int d = ((idx%(${B}*${C}*${D}))%(${C}*${D}))%${D};
// Use the subscripts to access the array:
if (idx < N) {
if (c == 0)
x[INDEX(a,b,c,d)] = 100;
}
}
""")
max_threads_per_block, max_block_dim, max_grid_dim = misc.get_dev_attrs(pycuda.autoinit.device)
block_dim, grid_dim = misc.select_block_grid_sizes(pycuda.autoinit.device, x.shape)
max_blocks_per_grid = max(max_grid_dim)
func_mod = \
SourceModule(func_mod_template.substitute(max_threads_per_block=max_threads_per_block,
max_blocks_per_grid=max_blocks_per_grid,
A=A, B=B, C=C, D=D))
func = func_mod.get_function('func')
x_gpu = gpuarray.to_gpu(x)
func(x_gpu.gpudata, np.uint32(x_gpu.size),
block=block_dim,
grid=grid_dim)
x_np = x.copy()
x_np[:, :, 0, :] = 100
print('Success status: ', np.allclose(x_np, x_gpu.get())) | /scikit-cuda-0.5.3.tar.gz/scikit-cuda-0.5.3/demos/indexing_4d_demo.py | 0.546496 | 0.555857 | indexing_4d_demo.py | pypi |
import numpy as np
import scipy.linalg
import skcuda.magma as magma
import time
import importlib
importlib.reload(magma)
typedict = {'s': np.float32, 'd': np.float64, 'c': np.complex64, 'z': np.complex128}
def test_cpu_gpu(N, t='z'):
"""
N : dimension
dtype : type (default complex)
"""
assert t in typedict.keys()
dtype = typedict[t]
if t in ['s', 'd']:
M_gpu = np.random.random((N,N))
elif t in ['c', 'z']:
M_gpu = np.random.random((N,N))+1j*np.random.random((N,N))
M_gpu = M_gpu.astype(dtype)
M_cpu = M_gpu.copy()
# GPU (skcuda + Magma)
# Set up output buffers:
if t in ['s', 'd']:
wr = np.zeros((N,), dtype) # eigenvalues
wi = np.zeros((N,), dtype) # eigenvalues
elif t in ['c', 'z']:
w = np.zeros((N,), dtype) # eigenvalues
vl = np.zeros((N, N), dtype)
vr = np.zeros((N, N), dtype)
# Set up workspace:
if t == 's':
nb = magma.magma_get_sgeqrf_nb(N,N)
if t == 'd':
nb = magma.magma_get_dgeqrf_nb(N,N)
if t == 'c':
nb = magma.magma_get_cgeqrf_nb(N,N)
if t == 'z':
nb = magma.magma_get_zgeqrf_nb(N,N)
lwork = N*(1 + 2*nb)
work = np.zeros((lwork,), dtype)
if t in ['c', 'z']:
rwork= np.zeros((2*N,), dtype)
# Compute:
gpu_time = time.time();
if t == 's':
status = magma.magma_sgeev('N', 'V', N, M_gpu.ctypes.data, N,
wr.ctypes.data, wi.ctypes.data,
vl.ctypes.data, N, vr.ctypes.data, N,
work.ctypes.data, lwork)
if t == 'd':
status = magma.magma_dgeev('N', 'V', N, M_gpu.ctypes.data, N,
wr.ctypes.data, wi.ctypes.data,
vl.ctypes.data, N, vr.ctypes.data, N,
work.ctypes.data, lwork)
if t == 'c':
status = magma.magma_cgeev('N', 'V', N, M_gpu.ctypes.data, N,
w.ctypes.data, vl.ctypes.data, N, vr.ctypes.data, N,
work.ctypes.data, lwork, rwork.ctypes.data)
if t == 'z':
status = magma.magma_zgeev('N', 'V', N, M_gpu.ctypes.data, N,
w.ctypes.data, vl.ctypes.data, N, vr.ctypes.data, N,
work.ctypes.data, lwork, rwork.ctypes.data)
gpu_time = time.time() - gpu_time;
# CPU
cpu_time = time.time()
W, V = scipy.linalg.eig(M_cpu)
cpu_time = time.time() - cpu_time
# Compare
if t in ['s', 'd']:
W_gpu = wr + 1j*wi
elif t in ['c', 'z']:
W_gpu = w
W_gpu.sort()
W.sort()
status = np.allclose(W[:int(N/4)], W_gpu[:int(N/4)], 1e-3)
return gpu_time, cpu_time, status
if __name__=='__main__':
magma.magma_init()
N=1000
print("%10a %10a %10a %10a" % ('type', "GPU", "CPU", "Equal?"))
for t in ['z', 'c', 's', 'd']:
gpu_time, cpu_time, status = test_cpu_gpu(N, t=t)
print("%10a %10.3g, %10.3g, %10s" % (t, gpu_time, cpu_time, status))
magma.magma_finalize() | /scikit-cuda-0.5.3.tar.gz/scikit-cuda-0.5.3/demos/magma_all_geev_demo.py | 0.429429 | 0.508483 | magma_all_geev_demo.py | pypi |
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import pycuda.elementwise as el
from pycuda.tools import context_dependent_memoize
import pycuda.tools as tools
import numpy as np
from . import cufft
from .cufft import CUFFT_COMPATIBILITY_NATIVE, \
CUFFT_COMPATIBILITY_FFTW_PADDING, \
CUFFT_COMPATIBILITY_FFTW_ASYMMETRIC, \
CUFFT_COMPATIBILITY_FFTW_ALL
from . import cudart
from . import misc
class Plan:
"""
CUFFT plan class.
This class represents an FFT plan for CUFFT.
Parameters
----------
shape : tuple of ints
Transform shape. May contain more than 3 elements.
in_dtype : { numpy.float32, numpy.float64, numpy.complex64, numpy.complex128 }
Type of input data.
out_dtype : { numpy.float32, numpy.float64, numpy.complex64, numpy.complex128 }
Type of output data.
batch : int
Number of FFTs to configure in parallel (default is 1).
stream : pycuda.driver.Stream
Stream with which to associate the plan. If no stream is specified,
the default stream is used.
mode : int
FFTW compatibility mode. Ignored in CUDA 9.2 and later.
inembed : numpy.array with dtype=numpy.int32
number of elements in each dimension of the input array
istride : int
distance between two successive input elements in the least significant
(innermost) dimension
idist : int
distance between the first element of two consective batches in the
input data
onembed : numpy.array with dtype=numpy.int32
number of elements in each dimension of the output array
ostride : int
distance between two successive output elements in the least significant
(innermost) dimension
odist : int
distance between the first element of two consective batches in the
output data
auto_allocate : bool
indicates whether the caller intends to allocate and manage the work area
"""
def __init__(self, shape, in_dtype, out_dtype, batch=1, stream=None,
mode=0x01, inembed=None, istride=1, idist=0, onembed=None,
ostride=1, odist=0, auto_allocate=True):
if np.isscalar(shape):
self.shape = (shape, )
else:
self.shape = shape
self.in_dtype = in_dtype
self.out_dtype = out_dtype
if batch <= 0:
raise ValueError('batch size must be greater than 0')
self.batch = batch
# Determine type of transformation:
if in_dtype == np.float32 and out_dtype == np.complex64:
self.fft_type = cufft.CUFFT_R2C
self.fft_func = cufft.cufftExecR2C
elif in_dtype == np.complex64 and out_dtype == np.float32:
self.fft_type = cufft.CUFFT_C2R
self.fft_func = cufft.cufftExecC2R
elif in_dtype == np.complex64 and out_dtype == np.complex64:
self.fft_type = cufft.CUFFT_C2C
self.fft_func = cufft.cufftExecC2C
elif in_dtype == np.float64 and out_dtype == np.complex128:
self.fft_type = cufft.CUFFT_D2Z
self.fft_func = cufft.cufftExecD2Z
elif in_dtype == np.complex128 and out_dtype == np.float64:
self.fft_type = cufft.CUFFT_Z2D
self.fft_func = cufft.cufftExecZ2D
elif in_dtype == np.complex128 and out_dtype == np.complex128:
self.fft_type = cufft.CUFFT_Z2Z
self.fft_func = cufft.cufftExecZ2Z
else:
raise ValueError('unsupported input/output type combination')
# Check for double precision support:
capability = misc.get_compute_capability(misc.get_current_device())
if capability < 1.3 and \
(misc.isdoubletype(in_dtype) or misc.isdoubletype(out_dtype)):
raise RuntimeError('double precision requires compute capability '
'>= 1.3 (you have %g)' % capability)
if inembed is not None:
inembed = inembed.ctypes.data
if onembed is not None:
onembed = onembed.ctypes.data
# Set up plan:
if len(self.shape) <= 0:
raise ValueError('invalid transform size')
n = np.asarray(self.shape, np.int32)
self.handle = cufft.cufftCreate()
# Set FFTW compatibility mode:
if cufft._cufft_version <= 9010:
cufft.cufftSetCompatibilityMode(self.handle, mode)
# Set auto-allocate mode
cufft.cufftSetAutoAllocation(self.handle, auto_allocate)
self.worksize = cufft.cufftMakePlanMany(
self.handle, len(self.shape), n.ctypes.data, inembed, istride, idist,
onembed, ostride, odist, self.fft_type, self.batch)
# Associate stream with plan:
if stream != None:
cufft.cufftSetStream(self.handle, stream.handle)
def set_work_area(self, work_area):
"""
Associate a caller-managed work area with the plan.
Parameters
----------
work_area : pycuda.gpuarray.GPUArray
"""
cufft.cufftSetWorkArea(self.handle, int(work_area.gpudata))
def __del__(self):
# Don't complain if handle destruction fails because the plan
# may have already been cleaned up:
try:
cufft.cufftDestroy(self.handle)
except:
pass
@context_dependent_memoize
def _get_scale_kernel(dtype):
ctype = tools.dtype_to_ctype(dtype)
return el.ElementwiseKernel(
"{ctype} scale, {ctype} *x".format(ctype=ctype),
"x[i] /= scale")
def _fft(x_gpu, y_gpu, plan, direction, scale=None):
"""
Fast Fourier Transform.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Input array.
y_gpu : pycuda.gpuarray.GPUArray
Output array.
plan : Plan
FFT plan.
direction : { cufft.CUFFT_FORWARD, cufft.CUFFT_INVERSE }
Transform direction. Only affects in-place transforms.
Optional Parameters
-------------------
scale : int or float
Scale the values in the output array by dividing them by this value.
Notes
-----
This function should not be called directly.
"""
if (x_gpu.gpudata == y_gpu.gpudata) and \
plan.fft_type not in [cufft.CUFFT_C2C, cufft.CUFFT_Z2Z]:
raise ValueError('can only compute in-place transform of complex data')
if direction == cufft.CUFFT_FORWARD and \
plan.in_dtype in np.sctypes['complex'] and \
plan.out_dtype in np.sctypes['float']:
raise ValueError('cannot compute forward complex -> real transform')
if direction == cufft.CUFFT_INVERSE and \
plan.in_dtype in np.sctypes['float'] and \
plan.out_dtype in np.sctypes['complex']:
raise ValueError('cannot compute inverse real -> complex transform')
if plan.fft_type in [cufft.CUFFT_C2C, cufft.CUFFT_Z2Z]:
plan.fft_func(plan.handle, int(x_gpu.gpudata), int(y_gpu.gpudata),
direction)
else:
plan.fft_func(plan.handle, int(x_gpu.gpudata),
int(y_gpu.gpudata))
# Scale the result by dividing it by the number of elements:
if scale is not None:
func = _get_scale_kernel(y_gpu.dtype)
func(y_gpu.dtype.type(scale), y_gpu)
def fft(x_gpu, y_gpu, plan, scale=False):
"""
Fast Fourier Transform.
Compute the FFT of some data in device memory using the
specified plan.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Input array.
y_gpu : pycuda.gpuarray.GPUArray
FFT of input array.
plan : Plan
FFT plan.
scale : bool, optional
If True, scale the computed FFT by the number of elements in
the input array.
Examples
--------
>>> import pycuda.autoinit
>>> import pycuda.gpuarray as gpuarray
>>> import numpy as np
>>> from skcuda.fft import fft, Plan
>>> N = 128
>>> x = np.asarray(np.random.rand(N), np.float32)
>>> xf = np.fft.fft(x)
>>> x_gpu = gpuarray.to_gpu(x)
>>> xf_gpu = gpuarray.empty(N/2+1, np.complex64)
>>> plan = Plan(x.shape, np.float32, np.complex64)
>>> fft(x_gpu, xf_gpu, plan)
>>> np.allclose(xf[0:N/2+1], xf_gpu.get(), atol=1e-6)
True
Returns
-------
y_gpu : pycuda.gpuarray.GPUArray
Computed FFT.
Notes
-----
For real to complex transformations, this function computes
N/2+1 non-redundant coefficients of a length-N input signal.
"""
if scale == True:
_fft(x_gpu, y_gpu, plan, cufft.CUFFT_FORWARD, x_gpu.size/plan.batch)
else:
_fft(x_gpu, y_gpu, plan, cufft.CUFFT_FORWARD)
def ifft(x_gpu, y_gpu, plan, scale=False):
"""
Inverse Fast Fourier Transform.
Compute the inverse FFT of some data in device memory using the
specified plan.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Input array.
y_gpu : pycuda.gpuarray.GPUArray
Inverse FFT of input array.
plan : Plan
FFT plan.
scale : bool, optional
If True, scale the computed inverse FFT by the number of
elements in the output array.
Examples
--------
>>> import pycuda.autoinit
>>> import pycuda.gpuarray as gpuarray
>>> import numpy as np
>>> from skcuda.fft import fft, Plan
>>> N = 128
>>> x = np.asarray(np.random.rand(N), np.float32)
>>> xf = np.asarray(np.fft.fft(x), np.complex64)
>>> xf_gpu = gpuarray.to_gpu(xf[0:N/2+1])
>>> x_gpu = gpuarray.empty(N, np.float32)
>>> plan = Plan(N, np.complex64, np.float32)
>>> ifft(xf_gpu, x_gpu, plan, True)
>>> np.allclose(x, x_gpu.get(), atol=1e-6)
True
Notes
-----
For complex to real transformations, this function assumes the
input contains N/2+1 non-redundant FFT coefficents of a signal of
length N.
"""
if scale == True:
_fft(x_gpu, y_gpu, plan, cufft.CUFFT_INVERSE, y_gpu.size/plan.batch)
else:
_fft(x_gpu, y_gpu, plan, cufft.CUFFT_INVERSE)
if __name__ == "__main__":
import doctest
doctest.testmod() | /scikit-cuda-0.5.3.tar.gz/scikit-cuda-0.5.3/skcuda/fft.py | 0.753557 | 0.457803 | fft.py | pypi |
import atexit
import ctypes.util
import platform
from string import Template
import sys
import warnings
import numpy as np
import cuda
# Load library:
_version_list = [10.1, 10.0, 9.2, 9.1, 9.0, 8.0, 7.5, 7.0, 6.5, 6.0, 5.5, 5.0, 4.0]
if 'linux' in sys.platform:
_libcusparse_libname_list = ['libcusparse.so'] + \
['libcusparse.so.%s' % v for v in _version_list]
elif sys.platform == 'darwin':
_libcusparse_libname_list = ['libcusparse.dylib']
elif sys.platform == 'win32':
if platform.machine().endswith('64'):
_libcusparse_libname_list = ['cusparse.dll'] + \
['cusparse64_%s.dll' % (int(v) if v >= 10 else int(10*v))for v in _version_list]
else:
_libcusparse_libname_list = ['cusparse.dll'] + \
['cusparse32_%s.dll' % (int(v) if v >= 10 else int(10*v))for v in _version_list]
else:
raise RuntimeError('unsupported platform')
# Print understandable error message when library cannot be found:
_libcusparse = None
for _libcusparse_libname in _libcusparse_libname_list:
try:
if sys.platform == 'win32':
_libcusparse = ctypes.windll.LoadLibrary(_libcusparse_libname)
else:
_libcusparse = ctypes.cdll.LoadLibrary(_libcusparse_libname)
except OSError:
pass
else:
break
if _libcusparse == None:
OSError('CUDA sparse library not found')
class cusparseError(Exception):
"""CUSPARSE error"""
pass
class cusparseStatusNotInitialized(cusparseError):
"""CUSPARSE library not initialized"""
pass
class cusparseStatusAllocFailed(cusparseError):
"""CUSPARSE resource allocation failed"""
pass
class cusparseStatusInvalidValue(cusparseError):
"""Unsupported value passed to the function"""
pass
class cusparseStatusArchMismatch(cusparseError):
"""Function requires a feature absent from the device architecture"""
pass
class cusparseStatusMappingError(cusparseError):
"""An access to GPU memory space failed"""
pass
class cusparseStatusExecutionFailed(cusparseError):
"""GPU program failed to execute"""
pass
class cusparseStatusInternalError(cusparseError):
"""An internal CUSPARSE operation failed"""
pass
class cusparseStatusMatrixTypeNotSupported(cusparseError):
"""The matrix type is not supported by this function"""
pass
cusparseExceptions = {
1: cusparseStatusNotInitialized,
2: cusparseStatusAllocFailed,
3: cusparseStatusInvalidValue,
4: cusparseStatusArchMismatch,
5: cusparseStatusMappingError,
6: cusparseStatusExecutionFailed,
7: cusparseStatusInternalError,
8: cusparseStatusMatrixTypeNotSupported,
}
# Matrix types:
CUSPARSE_MATRIX_TYPE_GENERAL = 0
CUSPARSE_MATRIX_TYPE_SYMMETRIC = 1
CUSPARSE_MATRIX_TYPE_HERMITIAN = 2
CUSPARSE_MATRIX_TYPE_TRIANGULAR = 3
CUSPARSE_FILL_MODE_LOWER = 0
CUSPARSE_FILL_MODE_UPPER = 1
# Whether or not a matrix' diagonal entries are unity:
CUSPARSE_DIAG_TYPE_NON_UNIT = 0
CUSPARSE_DIAG_TYPE_UNIT = 1
# Matrix index bases:
CUSPARSE_INDEX_BASE_ZERO = 0
CUSPARSE_INDEX_BASE_ONE = 1
# Operation types:
CUSPARSE_OPERATION_NON_TRANSPOSE = 0
CUSPARSE_OPERATION_TRANSPOSE = 1
CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE = 2
# Whether or not to parse elements of a dense matrix row or column-wise.
CUSPARSE_DIRECTION_ROW = 0
CUSPARSE_DIRECTION_COLUMN = 1
# Helper functions:
class cusparseMatDescr(ctypes.Structure):
_fields_ = [
('MatrixType', ctypes.c_int),
('FillMode', ctypes.c_int),
('DiagType', ctypes.c_int),
('IndexBase', ctypes.c_int)
]
def cusparseCheckStatus(status):
"""
Raise CUSPARSE exception
Raise an exception corresponding to the specified CUSPARSE error
code.
Parameters
----------
status : int
CUSPARSE error code.
See Also
--------
cusparseExceptions
"""
if status != 0:
try:
raise cusparseExceptions[status]
except KeyError:
raise cusparseError
_libcusparse.cusparseCreate.restype = int
_libcusparse.cusparseCreate.argtypes = [ctypes.c_void_p]
def cusparseCreate():
"""
Initialize CUSPARSE.
Initializes CUSPARSE and creates a handle to a structure holding
the CUSPARSE library context.
Returns
-------
handle : int
CUSPARSE library context.
"""
handle = ctypes.c_int()
status = _libcusparse.cusparseCreate(ctypes.byref(handle))
cusparseCheckStatus(status)
return handle.value
_libcusparse.cusparseDestroy.restype = int
_libcusparse.cusparseDestroy.argtypes = [ctypes.c_int]
def cusparseDestroy(handle):
"""
Release CUSPARSE resources.
Releases hardware resources used by CUSPARSE
Parameters
----------
handle : int
CUSPARSE library context.
"""
status = _libcusparse.cusparseDestroy(handle)
cusparseCheckStatus(status)
_libcusparse.cusparseGetVersion.restype = int
_libcusparse.cusparseGetVersion.argtypes = [ctypes.c_int,
ctypes.c_void_p]
def cusparseGetVersion(handle):
"""
Return CUSPARSE library version.
Returns the version number of the CUSPARSE library.
Parameters
----------
handle : int
CUSPARSE library context.
Returns
-------
version : int
CUSPARSE library version number.
"""
version = ctypes.c_int()
status = _libcusparse.cusparseGetVersion(handle,
ctypes.byref(version))
cusparseCheckStatus(status)
return version.value
_libcusparse.cusparseSetStream.restype = int
_libcusparse.cusparseSetStream.argtypes = [ctypes.c_int,
ctypes.c_int]
def cusparseSetStream(handle, id):
"""
Sets the CUSPARSE stream in which kernels will run.
Parameters
----------
handle : int
CUSPARSE library context.
id : int
Stream ID.
"""
status = _libcusparse.cusparseSetStream(handle, id)
cusparseCheckStatus(status)
_libcusparse.cusparseCreateMatDescr.restype = int
_libcusparse.cusparseCreateMatDescr.argtypes = [cusparseMatDescr]
def cusparseCreateMatDescr():
"""
Initialize a sparse matrix descriptor.
Initializes the `MatrixType` and `IndexBase` fields of the matrix
descriptor to the default values `CUSPARSE_MATRIX_TYPE_GENERAL`
and `CUSPARSE_INDEX_BASE_ZERO`.
Returns
-------
desc : cusparseMatDescr
Matrix descriptor.
"""
desc = cusparseMatrixDesc()
status = _libcusparse.cusparseCreateMatDescr(ctypes.byref(desc))
cusparseCheckStatus(status)
return desc
_libcusparse.cusparseDestroyMatDescr.restype = int
_libcusparse.cusparseDestroyMatDescr.argtypes = [ctypes.c_int]
def cusparseDestroyMatDescr(desc):
"""
Releases the memory allocated for the matrix descriptor.
Parameters
----------
desc : cusparseMatDescr
Matrix descriptor.
"""
status = _libcusparse.cusparseDestroyMatDescr(desc)
cusparseCheckStatus(status)
_libcusparse.cusparseSetMatType.restype = int
_libcusparse.cusparseSetMatType.argtypes = [cusparseMatDescr,
ctypes.c_int]
def cusparseSetMatType(desc, type):
"""
Sets the matrix type of the specified matrix.
Parameters
----------
desc : cusparseMatDescr
Matrix descriptor.
type : int
Matrix type.
"""
status = _libcusparse.cusparseSetMatType(desc, type)
cusparseCheckStatus(status)
_libcusparse.cusparseGetMatType.restype = int
_libcusparse.cusparseGetMatType.argtypes = [cusparseMatDescr]
def cusparseGetMatType(desc):
"""
Gets the matrix type of the specified matrix.
Parameters
----------
desc : cusparseMatDescr
Matrix descriptor.
Returns
-------
type : int
Matrix type.
"""
return _libcusparse.cusparseGetMatType(desc)
# Format conversion functions:
_libcusparse.cusparseSnnz.restype = int
_libcusparse.cusparseSnnz.argtypes = [ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
cusparseMatDescr,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p]
def cusparseSnnz(handle, dirA, m, n, descrA, A, lda,
nnzPerRowColumn, nnzTotalDevHostPtr):
"""
Compute number of non-zero elements per row, column, or dense matrix.
Parameters
----------
handle : int
CUSPARSE library context.
dirA : int
Data direction of elements.
m : int
Rows in A.
n : int
Columns in A.
descrA : cusparseMatDescr
Matrix descriptor.
A : pycuda.gpuarray.GPUArray
Dense matrix of dimensions (lda, n).
lda : int
Leading dimension of A.
Returns
-------
nnzPerRowColumn : pycuda.gpuarray.GPUArray
Array of length m or n containing the number of
non-zero elements per row or column, respectively.
nnzTotalDevHostPtr : pycuda.gpuarray.GPUArray
Total number of non-zero elements in device or host memory.
"""
# Unfinished:
nnzPerRowColumn = gpuarray.empty()
nnzTotalDevHostPtr = gpuarray.empty()
status = _libcusparse.cusparseSnnz(handle, dirA, m, n,
descrA, int(A), lda,
int(nnzPerRowColumn), int(nnzTotalDevHostPtr))
cusparseCheckStatus(status)
return nnzPerVector, nnzHost
_libcusparse.cusparseSdense2csr.restype = int
_libcusparse.cusparseSdense2csr.argtypes = [ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
cusparseMatDescr,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p]
def cusparseSdense2csr(handle, m, n, descrA, A, lda,
nnzPerRow, csrValA, csrRowPtrA, csrColIndA):
# Unfinished
pass | /scikit-cuda-0.5.3.tar.gz/scikit-cuda-0.5.3/skcuda/cusparse.py | 0.46952 | 0.20838 | cusparse.py | pypi |
import ctypes
import operator
import re
import sys
# Load library:
_linux_version_list = [10.1, 10.0, 9.2, 9.1, 9.0, 8.0, 7.5, 7.0, 6.5, 6.0, 5.5, 5.0, 4.0]
_win32_version_list = [10, 100, 92, 91, 90, 80, 75, 70, 65, 60, 55, 50, 40]
if 'linux' in sys.platform:
_libcufft_libname_list = ['libcufft.so'] + \
['libcufft.so.%s' % v for v in _linux_version_list]
elif sys.platform == 'darwin':
_libcufft_libname_list = ['libcufft.dylib']
elif sys.platform == 'win32':
if sys.maxsize > 2**32:
_libcufft_libname_list = ['cufft.dll'] + \
['cufft64_%s.dll' % v for v in _win32_version_list]
else:
_libcufft_libname_list = ['cufft.dll'] + \
['cufft32_%s.dll' % v for v in _win32_version_list]
else:
raise RuntimeError('unsupported platform')
# Print understandable error message when library cannot be found:
_libcufft = None
for _libcufft_libname in _libcufft_libname_list:
try:
if sys.platform == 'win32':
_libcufft = ctypes.windll.LoadLibrary(_libcufft_libname)
else:
_libcufft = ctypes.cdll.LoadLibrary(_libcufft_libname)
except OSError:
pass
else:
break
if _libcufft == None:
raise OSError('cufft library not found')
# General CUFFT error:
class cufftError(Exception):
"""CUFFT error"""
pass
# Exceptions corresponding to different CUFFT errors:
class cufftInvalidPlan(cufftError):
"""CUFFT was passed an invalid plan handle."""
pass
class cufftAllocFailed(cufftError):
"""CUFFT failed to allocate GPU memory."""
pass
class cufftInvalidType(cufftError):
"""The user requested an unsupported type."""
pass
class cufftInvalidValue(cufftError):
"""The user specified a bad memory pointer."""
pass
class cufftInternalError(cufftError):
"""Internal driver error."""
pass
class cufftExecFailed(cufftError):
"""CUFFT failed to execute an FFT on the GPU."""
pass
class cufftSetupFailed(cufftError):
"""The CUFFT library failed to initialize."""
pass
class cufftInvalidSize(cufftError):
"""The user specified an unsupported FFT size."""
pass
class cufftUnalignedData(cufftError):
"""Input or output does not satisfy texture alignment requirements."""
pass
cufftExceptions = {
0x1: cufftInvalidPlan,
0x2: cufftAllocFailed,
0x3: cufftInvalidType,
0x4: cufftInvalidValue,
0x5: cufftInternalError,
0x6: cufftExecFailed,
0x7: cufftSetupFailed,
0x8: cufftInvalidSize,
0x9: cufftUnalignedData
}
class _types:
"""Some alias types."""
plan = ctypes.c_int
stream = ctypes.c_void_p
worksize = ctypes.c_size_t
def cufftCheckStatus(status):
"""Raise an exception if the specified CUBLAS status is an error."""
if status != 0:
try:
e = cufftExceptions[status]
except KeyError:
raise cufftError
else:
raise e
_libcufft.cufftGetVersion.restype = int
_libcufft.cufftGetVersion.argtypes = [ctypes.c_void_p]
def cufftGetVersion():
"""
Get CUFFT version.
"""
version = ctypes.c_int()
result = _libcufft.cufftGetVersion(ctypes.byref(version))
cufftCheckStatus(result)
return version.value
_cufft_version = int(cufftGetVersion())
class _cufft_version_req(object):
"""
Decorator to replace function with a placeholder that raises an exception
if a specified condition on the installed CUFFT version `v` is not satisfied.
"""
def __init__(self, v, op):
self.op_str = op
if op == '>':
self.op = operator.gt
elif op == '>=':
self.op = operator.ge
elif op == '==':
self.op = operator.eq
elif op == '<':
self.op = operator.lt
elif op == '<=':
self.op = operator.le
else:
raise ValueError('unrecognized comparison operator')
self.vs = str(v)
if isinstance(v, int):
self.vi = str(v)
if len(self.vi) != 4:
raise ValueError('integer version number must be 4 digits')
else:
major, minor = re.search(r'(\d+)\.(\d+)', self.vs).groups()
self.vi = major.ljust(len(major)+1, '0')+minor.ljust(2, '0')
def __call__(self,f):
def f_new(*args,**kwargs):
raise NotImplementedError('CUFFT '+self.op_str+' '+self.vs+' required')
f_new.__doc__ = f.__doc__
if self.op(_cufft_version, int(self.vi)):
return f
else:
return f_new
# Data transformation types:
CUFFT_R2C = 0x2a
CUFFT_C2R = 0x2c
CUFFT_C2C = 0x29
CUFFT_D2Z = 0x6a
CUFFT_Z2D = 0x6c
CUFFT_Z2Z = 0x69
# Transformation directions:
CUFFT_FORWARD = -1
CUFFT_INVERSE = 1
# FFTW compatibility modes:
CUFFT_COMPATIBILITY_NATIVE = 0x00
CUFFT_COMPATIBILITY_FFTW_PADDING = 0x01
CUFFT_COMPATIBILITY_FFTW_ASYMMETRIC = 0x02
CUFFT_COMPATIBILITY_FFTW_ALL = 0x03
# FFT functions implemented by CUFFT:
_libcufft.cufftPlan1d.restype = int
_libcufft.cufftPlan1d.argtypes = [ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int]
def cufftPlan1d(nx, fft_type, batch):
"""
Create 1D FFT plan configuration.
References
----------
`cufftPlan1d <http://docs.nvidia.com/cuda/cufft/#function-cufftplan1d>`_
"""
plan = _types.plan()
status = _libcufft.cufftPlan1d(ctypes.byref(plan), nx, fft_type, batch)
cufftCheckStatus(status)
return plan
_libcufft.cufftPlan2d.restype = int
_libcufft.cufftPlan2d.argtypes = [ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int]
def cufftPlan2d(nx, ny, fft_type):
"""
Create 2D FFT plan configuration.
References
----------
`cufftPlan2d <http://docs.nvidia.com/cuda/cufft/#function-cufftplan2d>`_
"""
plan = _types.plan()
status = _libcufft.cufftPlan2d(ctypes.byref(plan), nx, ny,
fft_type)
cufftCheckStatus(status)
return plan
_libcufft.cufftPlan3d.restype = int
_libcufft.cufftPlan3d.argtypes = [ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int]
def cufftPlan3d(nx, ny, nz, fft_type):
"""
Create 3D FFT plan configuration.
References
----------
`cufftPlan3d <http://docs.nvidia.com/cuda/cufft/#function-cufftplan3d>`_
"""
plan = _types.plan()
status = _libcufft.cufftPlan3d(ctypes.byref(plan), nx, ny, nz,
fft_type)
cufftCheckStatus(status)
return plan
_libcufft.cufftPlanMany.restype = int
_libcufft.cufftPlanMany.argtypes = [ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int]
def cufftPlanMany(rank, n,
inembed, istride, idist,
onembed, ostride, odist, fft_type, batch):
"""
Create batched FFT plan configuration.
References
----------
`cufftPlanMany <http://docs.nvidia.com/cuda/cufft/#function-cufftplanmany>`_
"""
plan = _types.plan()
status = _libcufft.cufftPlanMany(ctypes.byref(plan), rank, n,
inembed, istride, idist,
onembed, ostride, odist,
fft_type, batch)
cufftCheckStatus(status)
return plan
_libcufft.cufftDestroy.restype = int
_libcufft.cufftDestroy.argtypes = [_types.plan]
def cufftDestroy(plan):
"""
Destroy FFT plan.
References
----------
`cufftDestroy <http://docs.nvidia.com/cuda/cufft/#function-cufftdestroy>`_
"""
status = _libcufft.cufftDestroy(plan)
cufftCheckStatus(status)
if _cufft_version <= 9010:
_libcufft.cufftSetCompatibilityMode.restype = int
_libcufft.cufftSetCompatibilityMode.argtypes = [_types.plan,
ctypes.c_int]
@_cufft_version_req(9.1, '<=')
def cufftSetCompatibilityMode(plan, mode):
"""
Set FFTW compatibility mode.
References
----------
`cufftSetCompatibilityMode <http://docs.nvidia.com/cuda/cufft/#function-cufftsetcompatibilitymode>`_
"""
status = _libcufft.cufftSetCompatibilityMode(plan, mode)
cufftCheckStatus(status)
_libcufft.cufftExecC2C.restype = int
_libcufft.cufftExecC2C.argtypes = [_types.plan,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int]
def cufftExecC2C(plan, idata, odata, direction):
"""Execute single precision complex-to-complex transform plan as
specified by `direction`.
References
----------
`cufftExecC2C <http://docs.nvidia.com/cuda/cufft/#function-cufftexecc2c-cufftexecz2z>`_
"""
status = _libcufft.cufftExecC2C(plan, idata, odata,
direction)
cufftCheckStatus(status)
_libcufft.cufftExecR2C.restype = int
_libcufft.cufftExecR2C.argtypes = [_types.plan,
ctypes.c_void_p,
ctypes.c_void_p]
def cufftExecR2C(plan, idata, odata):
"""
Execute single precision real-to-complex forward transform plan.
References
----------
`cufftExecR2C <http://docs.nvidia.com/cuda/cufft/#function-cufftexecr2c-cufftexecd2z>`_
"""
status = _libcufft.cufftExecR2C(plan, idata, odata)
cufftCheckStatus(status)
_libcufft.cufftExecC2R.restype = int
_libcufft.cufftExecC2R.argtypes = [_types.plan,
ctypes.c_void_p,
ctypes.c_void_p]
def cufftExecC2R(plan, idata, odata):
"""
Execute single precision complex-to-real reverse transform plan.
References
----------
`cufftExecC2R <http://docs.nvidia.com/cuda/cufft/#function-cufftexecc2r-cufftexecz2d>`_
"""
status = _libcufft.cufftExecC2R(plan, idata, odata)
cufftCheckStatus(status)
_libcufft.cufftExecZ2Z.restype = int
_libcufft.cufftExecZ2Z.argtypes = [_types.plan,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int]
def cufftExecZ2Z(plan, idata, odata, direction):
"""
Execute double precision complex-to-complex transform plan as
specified by `direction`.
References
----------
`cufftExecZ2Z <http://docs.nvidia.com/cuda/cufft/#function-cufftexecc2c-cufftexecz2z>`_
"""
status = _libcufft.cufftExecZ2Z(plan, idata, odata,
direction)
cufftCheckStatus(status)
_libcufft.cufftExecD2Z.restype = int
_libcufft.cufftExecD2Z.argtypes = [_types.plan,
ctypes.c_void_p,
ctypes.c_void_p]
def cufftExecD2Z(plan, idata, odata):
"""
Execute double precision real-to-complex forward transform plan.
References
----------
`cufftExecD2Z <http://docs.nvidia.com/cuda/cufft/#function-cufftexecr2c-cufftexecd2z>`_
"""
status = _libcufft.cufftExecD2Z(plan, idata, odata)
cufftCheckStatus(status)
_libcufft.cufftExecZ2D.restype = int
_libcufft.cufftExecZ2D.argtypes = [_types.plan,
ctypes.c_void_p,
ctypes.c_void_p]
def cufftExecZ2D(plan, idata, odata):
"""
Execute double precision complex-to-real transform plan.
References
----------
`cufftExecZ2D <http://docs.nvidia.com/cuda/cufft/#function-cufftexecc2r-cufftexecz2d>`_
"""
status = _libcufft.cufftExecZ2D(plan, idata, odata)
cufftCheckStatus(status)
_libcufft.cufftSetStream.restype = int
_libcufft.cufftSetStream.argtypes = [_types.plan,
_types.stream]
def cufftSetStream(plan, stream):
"""
Associate a CUDA stream with a CUFFT plan.
References
----------
`cufftSetStream <http://docs.nvidia.com/cuda/cufft/#function-cufftsetstream>`_
"""
status = _libcufft.cufftSetStream(plan, stream)
cufftCheckStatus(status)
_libcufft.cufftEstimate1d.restype = int
_libcufft.cufftEstimate1d.argtypes = [ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p]
def cufftEstimate1d(nx, fft_type, batch=1):
"""
Return estimated work area for 1D FFT.
References
----------
`cufftEstimate1d <http://docs.nvidia.com/cuda/cufft/#function-cufftestimate1d>`_
"""
worksize = _types.worksize()
status = _libcufft.cufftEstimate1d(nx, fft_type, batch,
ctypes.byref(worksize))
cufftCheckStatus(status)
return worksize.value
_libcufft.cufftEstimate2d.restype = int
_libcufft.cufftEstimate2d.argtypes = [ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p]
def cufftEstimate2d(nx, ny, fft_type):
"""
Return estimated work area for 2D FFT.
References
----------
`cufftEstimate2d <http://docs.nvidia.com/cuda/cufft/#function-cufftestimate2d>`_
"""
worksize = _types.worksize()
status = _libcufft.cufftEstimate2d(nx, ny, fft_type,
ctypes.byref(worksize))
cufftCheckStatus(status)
return worksize.value
_libcufft.cufftEstimate3d.restype = int
_libcufft.cufftEstimate3d.argtypes = [ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p]
def cufftEstimate3d(nx, ny, nz, fft_type):
"""
Return estimated work area for 3D FFT.
References
----------
`cufftEstimate3d <http://docs.nvidia.com/cuda/cufft/#function-cufftestimate3d>`_
"""
worksize = _types.worksize()
status = _libcufft.cufftEstimate3d(nx, ny, nz, fft_type,
ctypes.byref(worksize))
cufftCheckStatus(status)
return worksize.value
_libcufft.cufftEstimateMany.restype = int
_libcufft.cufftEstimateMany.argtypes = [ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p]
def cufftEstimateMany(rank, n,
inembed, istride, idist,
onembed, ostride, odist, fft_type, batch):
"""
Return estimated work area for batched FFT.
References
----------
`cufftEstimateMany <http://docs.nvidia.com/cuda/cufft/#function-cufftestimatemany>`_
"""
worksize = _types.worksize()
status = _libcufft.cufftEstimateMany(rank, n,
inembed, istride, idist,
onembed, ostride, odist,
fft_type, batch,
ctypes.byref(worksize))
cufftCheckStatus(status)
return worksize.value
_libcufft.cufftGetSize1d.restype = int
_libcufft.cufftGetSize1d.argtypes = [_types.plan,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p]
def cufftGetSize1d(plan, nx, fft_type, batch=1):
"""
Return more accurate estimate of work area size required for 1D FFT,
taking into account any plan settings that may have been made.
References
----------
`cufftGetSize1d <http://docs.nvidia.com/cuda/cufft/#function-cufftgetsize1d>`_
"""
worksize = _types.worksize()
status = _libcufft.cufftGetSize1d(plan, nx, fft_type, batch,
ctypes.byref(worksize))
cufftCheckStatus(status)
return worksize.value
_libcufft.cufftGetSize2d.restype = int
_libcufft.cufftGetSize2d.argtypes = [_types.plan,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p]
def cufftGetSize2d(plan, nx, ny, fft_type):
"""
Return more accurate estimate of work area size required for 2D FFT,
taking into account any plan settings that may have been made.
References
----------
`cufftGetSize2d <http://docs.nvidia.com/cuda/cufft/#function-cufftgetsize2d>`_
"""
worksize = _types.worksize()
status = _libcufft.cufftGetSize2d(plan, nx, ny, fft_type,
ctypes.byref(worksize))
cufftCheckStatus(status)
return worksize.value
_libcufft.cufftGetSize3d.restype = int
_libcufft.cufftGetSize3d.argtypes = [_types.plan,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p]
def cufftGetSize3d(plan, nx, ny, nz, fft_type):
"""
Return more accurate estimate of work area size required for 3D FFT,
taking into account any plan settings that may have been made.
References
----------
`cufftGetSize3d <http://docs.nvidia.com/cuda/cufft/#function-cufftgetsize3d>`_
"""
worksize = _types.worksize()
status = _libcufft.cufftGetSize3d(plan, nx, ny, nz, fft_type,
ctypes.byref(worksize))
cufftCheckStatus(status)
return worksize.value
_libcufft.cufftGetSizeMany.restype = int
_libcufft.cufftGetSizeMany.argtypes = [_types.plan,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p]
def cufftGetSizeMany(plan, rank, n,
inembed, istride, idist,
onembed, ostride, odist, fft_type, batch):
"""
Return more accurate estimate of work area size required for batched FFT,
taking into account any plan settings that may have been made.
References
----------
`cufftGetSizeMany <http://docs.nvidia.com/cuda/cufft/#function-cufftgetsizemany>`_
"""
worksize = _types.worksize()
status = _libcufft.cufftGetSizeMany(plan, rank, n,
inembed, istride, idist,
onembed, ostride, odist,
fft_type, batch,
ctypes.byref(worksize))
cufftCheckStatus(status)
return worksize.value
_libcufft.cufftGetSize.restype = int
_libcufft.cufftGetSize.argtypes = [_types.plan,
ctypes.c_void_p]
def cufftGetSize(plan):
"""
Return actual size of work area for FFT described in plan.
References
----------
`cufftGetSize <http://docs.nvidia.com/cuda/cufft/#function-cufftgetsize>`_
"""
worksize = _types.worksize()
status = _libcufft.cufftGetSize(plan, ctypes.byref(worksize))
cufftCheckStatus(status)
return worksize.value
_libcufft.cufftCreate.restype = int
_libcufft.cufftCreate.argtypes = [ctypes.c_void_p]
def cufftCreate():
"""
Create FFT plan handle.
References
----------
`cufftCreate <http://docs.nvidia.com/cuda/cufft/#function-cufftcreate>`_
"""
plan = _types.plan()
status = _libcufft.cufftCreate(ctypes.byref(plan))
cufftCheckStatus(status)
return plan
_libcufft.cufftMakePlan1d.restype = int
_libcufft.cufftMakePlan1d.argtypes = [_types.plan,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p]
def cufftMakePlan1d(plan, nx, fft_type, batch):
"""
Create 1D FFT plan configuration.
References
----------
`cufftMakePlan1d <http://docs.nvidia.com/cuda/cufft/#function-cufftmakeplan1d>`_
"""
worksize = _types.worksize()
status = _libcufft.cufftMakePlan1d(plan, nx, fft_type, batch,
ctypes.byref(worksize))
cufftCheckStatus(status)
return worksize.value
_libcufft.cufftMakePlan2d.restype = int
_libcufft.cufftMakePlan2d.argtypes = [_types.plan,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p]
def cufftMakePlan2d(plan, nx, ny, fft_type):
"""
Create 2D FFT plan configuration.
References
----------
`cufftMakePlan2d <http://docs.nvidia.com/cuda/cufft/#function-cufftmakeplan2d>`_
"""
worksize = _types.worksize()
status = _libcufft.cufftMakePlan2d(plan, nx, ny, fft_type,
ctypes.byref(worksize))
cufftCheckStatus(status)
return worksize.value
_libcufft.cufftMakePlan3d.restype = int
_libcufft.cufftMakePlan3d.argtypes = [_types.plan,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p]
def cufftMakePlan3d(plan, nx, ny, nz, fft_type):
"""
Create 3D FFT plan configuration.
References
----------
`cufftMakePlan3d <http://docs.nvidia.com/cuda/cufft/#function-cufftmakeplan3d>`_
"""
worksize = _types.worksize()
status = _libcufft.cufftMakePlan3d(plan, nx, ny, nz, fft_type,
ctypes.byref(worksize))
cufftCheckStatus(status)
return worksize.value
_libcufft.cufftMakePlanMany.restype = int
_libcufft.cufftMakePlanMany.argtypes = [_types.plan,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p]
def cufftMakePlanMany(plan, rank, n,
inembed, istride, idist,
onembed, ostride, odist, fft_type, batch):
"""
Create batched FFT plan configuration.
References
----------
`cufftMakePlanMany <http://docs.nvidia.com/cuda/cufft/#function-cufftmakeplanmany>`_
"""
worksize = _types.worksize()
status = _libcufft.cufftMakePlanMany(plan, rank, n,
inembed, istride, idist,
onembed, ostride, odist,
fft_type, batch,
ctypes.byref(worksize))
cufftCheckStatus(status)
return worksize.value
_libcufft.cufftSetAutoAllocation.restype = int
_libcufft.cufftSetAutoAllocation.argtypes = [_types.plan,
ctypes.c_int]
def cufftSetAutoAllocation(plan, auto_allocate):
"""
Indicate whether the caller intends to allocate and manage work areas for
plans that have been generated.
References
----------
`cufftSetAutoAllocation <http://docs.nvidia.com/cuda/cufft/#function-cufftsetautoallocation>`_
"""
status = _libcufft.cufftSetAutoAllocation(plan, auto_allocate)
cufftCheckStatus(status)
_libcufft.cufftSetWorkArea.restype = int
_libcufft.cufftSetWorkArea.argtypes = [_types.plan,
ctypes.c_void_p]
def cufftSetWorkArea(plan, work_area):
"""
Override the work area pointer associated with a plan.
References
----------
`cufftSetworkArea <http://docs.nvidia.com/cuda/cufft/#function-cufftsetworkarea>`_
"""
status = _libcufft.cufftSetWorkArea(plan, work_area)
cufftCheckStatus(status) | /scikit-cuda-0.5.3.tar.gz/scikit-cuda-0.5.3/skcuda/cufft.py | 0.489748 | 0.15059 | cufft.py | pypi |
from __future__ import absolute_import, division
import atexit
import numbers
from string import Template
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import pycuda.elementwise as elementwise
import pycuda.reduction as reduction
import pycuda.scan as scan
import pycuda.tools as tools
from pycuda.tools import context_dependent_memoize, dtype_to_ctype
from pycuda.compiler import SourceModule
from pytools import memoize
import numpy as np
from . import cuda
from . import cublas
import sys
if sys.version_info < (3,):
range = xrange
try:
from . import cula
_has_cula = True
except (ImportError, OSError):
_has_cula = False
try:
from . import cusolver
_has_cusolver = True
except (ImportError, OSError):
_has_cusolver = False
try:
from . import magma
_has_magma = True
except (ImportError, OSError):
_has_magma = False
isdoubletype = lambda x : True if x == np.float64 or \
x == np.complex128 else False
isdoubletype.__doc__ = """
Check whether a type has double precision.
Parameters
----------
t : numpy float type
Type to test.
Returns
-------
result : bool
Result.
"""
iscomplextype = lambda x : True if x == np.complex64 or \
x == np.complex128 else False
iscomplextype.__doc__ = """
Check whether a type is complex.
Parameters
----------
t : numpy float type
Type to test.
Returns
-------
result : bool
Result.
"""
def init_device(n=0):
"""
Initialize a GPU device.
Initialize a specified GPU device rather than the default device
found by `pycuda.autoinit`.
Parameters
----------
n : int
Device number.
Returns
-------
dev : pycuda.driver.Device
Initialized device.
"""
drv.init()
dev = drv.Device(n)
return dev
def init_context(dev):
"""
Create a context that will be cleaned up properly.
Create a context on the specified device and register its pop()
method with atexit.
Parameters
----------
dev : pycuda.driver.Device
GPU device.
Returns
-------
ctx : pycuda.driver.Context
Created context.
"""
ctx = dev.make_context()
atexit.register(ctx.pop)
return ctx
def done_context(ctx):
"""
Detach from a context cleanly.
Detach from a context and remove its pop() from atexit.
Parameters
----------
ctx : pycuda.driver.Context
Context from which to detach.
"""
for i in range(len(atexit._exithandlers)):
if atexit._exithandlers[i][0] == ctx.pop:
del atexit._exithandlers[i]
break
ctx.detach()
global _global_cublas_handle
_global_cublas_handle = None
global _global_cusolver_handle
_global_cusolver_handle = None
global _global_cublas_allocator
_global_cublas_allocator = None
def init(allocator=drv.mem_alloc):
"""
Initialize libraries used by scikit-cuda.
Initialize the CUBLAS, CULA, CUSOLVER, and MAGMA libraries used by
high-level functions provided by scikit-cuda.
Parameters
----------
allocator : an allocator used internally by some of the high-level
functions.
Notes
-----
This function does not initialize PyCUDA; it uses whatever device
and context were initialized in the current host thread.
"""
# CUBLAS uses whatever device is being used by the host thread:
global _global_cublas_handle, _global_cublas_allocator
if not _global_cublas_handle:
from . import cublas # nest to avoid requiring cublas e.g. for FFT
_global_cublas_handle = cublas.cublasCreate()
if _global_cublas_allocator is None:
_global_cublas_allocator = allocator
# Initializing MAGMA after CUSOLVER causes some functions in the latter to
# fail with internal errors:
if _has_magma:
magma.magma_init()
global _global_cusolver_handle
if not _global_cusolver_handle:
from . import cusolver
_global_cusolver_handle = cusolver.cusolverDnCreate()
# culaSelectDevice() need not (and, in fact, cannot) be called
# here because the host thread has already been bound to a GPU
# device:
if _has_cula:
cula.culaInitialize()
def shutdown():
"""
Shutdown libraries used by scikit-cuda.
Shutdown the CUBLAS, CULA, CUSOLVER, and MAGMA libraries used by
high-level functions provided by scikits-cuda.
Notes
-----
This function does not shutdown PyCUDA.
"""
global _global_cublas_handle
if _global_cublas_handle:
from . import cublas # nest to avoid requiring cublas e.g. for FFT
cublas.cublasDestroy(_global_cublas_handle)
_global_cublas_handle = None
global _global_cusolver_handle
if _global_cusolver_handle:
from . import cusolver
cusolver.cusolverDnDestroy(_global_cusolver_handle)
_global_cusolver_handle = None
if _has_magma:
magma.magma_finalize()
if _has_cula:
cula.culaShutdown()
def get_compute_capability(dev):
"""
Get the compute capability of the specified device.
Retrieve the compute capability of the specified CUDA device and
return it as a floating point value.
Parameters
----------
d : pycuda.driver.Device
Device object to examine.
Returns
-------
c : float
Compute capability.
"""
return np.float('.'.join([str(i) for i in
dev.compute_capability()]))
def get_current_device():
"""
Get the device in use by the current context.
Returns
-------
d : pycuda.driver.Device
Device in use by current context.
"""
return drv.Device(cuda.cudaGetDevice())
@memoize
def get_dev_attrs(dev):
"""
Get select CUDA device attributes.
Retrieve select attributes of the specified CUDA device that
relate to maximum thread block and grid sizes.
Parameters
----------
d : pycuda.driver.Device
Device object to examine.
Returns
-------
attrs : list
List containing [MAX_THREADS_PER_BLOCK,
(MAX_BLOCK_DIM_X, MAX_BLOCK_DIM_Y, MAX_BLOCK_DIM_Z),
(MAX_GRID_DIM_X, MAX_GRID_DIM_Y, MAX_GRID_DIM_Z)]
"""
attrs = dev.get_attributes()
return [attrs[drv.device_attribute.MAX_THREADS_PER_BLOCK],
(attrs[drv.device_attribute.MAX_BLOCK_DIM_X],
attrs[drv.device_attribute.MAX_BLOCK_DIM_Y],
attrs[drv.device_attribute.MAX_BLOCK_DIM_Z]),
(attrs[drv.device_attribute.MAX_GRID_DIM_X],
attrs[drv.device_attribute.MAX_GRID_DIM_Y],
attrs[drv.device_attribute.MAX_GRID_DIM_Z])]
iceil = lambda n: int(np.ceil(n))
@memoize
def select_block_grid_sizes(dev, data_shape, threads_per_block=None):
"""
Determine CUDA block and grid dimensions given device constraints.
Determine the CUDA block and grid dimensions allowed by a GPU
device that are sufficient for processing every element of an
array in a separate thread.
Parameters
----------
d : pycuda.driver.Device
Device object to be used.
data_shape : tuple
Shape of input data array. Must be of length 2.
threads_per_block : int, optional
Number of threads to execute in each block. If this is None,
the maximum number of threads per block allowed by device `d`
is used.
Returns
-------
block_dim : tuple
X, Y, and Z dimensions of minimal required thread block.
grid_dim : tuple
X and Y dimensions of minimal required block grid.
Notes
-----
Using the scheme in this function, all of the threads in the grid can be enumerated
as `i = blockIdx.y*max_threads_per_block*max_blocks_per_grid+
blockIdx.x*max_threads_per_block+threadIdx.x`.
For 2D shapes, the subscripts of the element `data[a, b]` where `data.shape == (A, B)`
can be computed as
`a = i/B`
`b = mod(i,B)`.
For 3D shapes, the subscripts of the element `data[a, b, c]` where
`data.shape == (A, B, C)` can be computed as
`a = i/(B*C)`
`b = mod(i, B*C)/C`
`c = mod(mod(i, B*C), C)`.
For 4D shapes, the subscripts of the element `data[a, b, c, d]`
where `data.shape == (A, B, C, D)` can be computed as
`a = i/(B*C*D)`
`b = mod(i, B*C*D)/(C*D)`
`c = mod(mod(i, B*C*D)%(C*D))/D`
`d = mod(mod(mod(i, B*C*D)%(C*D)), D)`
It is advisable that the number of threads per block be a multiple
of the warp size to fully utilize a device's computing resources.
"""
# Sanity checks:
if np.isscalar(data_shape):
data_shape = (data_shape,)
# Number of elements to process; we need to cast the result of
# np.prod to a Python int to prevent PyCUDA's kernel execution
# framework from getting confused when
N = int(np.prod(data_shape))
# Get device constraints:
max_threads_per_block, max_block_dim, max_grid_dim = get_dev_attrs(dev)
if threads_per_block is not None:
if threads_per_block > max_threads_per_block:
raise ValueError('threads per block exceeds device maximum')
else:
max_threads_per_block = threads_per_block
# Actual number of thread blocks needed:
blocks_needed = iceil(N/float(max_threads_per_block))
if blocks_needed <= max_grid_dim[0]:
return (max_threads_per_block, 1, 1), (blocks_needed, 1, 1)
elif blocks_needed > max_grid_dim[0] and \
blocks_needed <= max_grid_dim[0]*max_grid_dim[1]:
return (max_threads_per_block, 1, 1), \
(max_grid_dim[0], iceil(blocks_needed/float(max_grid_dim[0])), 1)
elif blocks_needed > max_grid_dim[0]*max_grid_dim[1] and \
blocks_needed <= max_grid_dim[0]*max_grid_dim[1]*max_grid_dim[2]:
return (max_threads_per_block, 1, 1), \
(max_grid_dim[0], max_grid_dim[1],
iceil(blocks_needed/float(max_grid_dim[0]*max_grid_dim[1])))
else:
raise ValueError('array size too large')
def zeros(shape, dtype, order='C', allocator=drv.mem_alloc):
"""
Return an array of the given shape and dtype filled with zeros.
Parameters
----------
shape : tuple
Array shape.
dtype : data-type
Data type for the array.
order : {'C', 'F'}, optional
Create array using row-major or column-major format.
allocator : callable, optional
Returns an object that represents the memory allocated for
the requested array.
Returns
-------
out : pycuda.gpuarray.GPUArray
Array of zeros with the given shape, dtype, and order.
Notes
-----
This function exists to work around the following numpy bug that
prevents pycuda.gpuarray.zeros() from working properly with
complex types in pycuda 2011.1.2:
http://projects.scipy.org/numpy/ticket/1898
"""
out = gpuarray.GPUArray(shape, dtype, allocator, order=order)
z = np.zeros((), dtype)
out.fill(z)
return out
def zeros_like(a):
"""
Return an array of zeros with the same shape and type as a given
array.
Parameters
----------
a : array_like
The shape and data type of `a` determine the corresponding
attributes of the returned array.
Returns
-------
out : pycuda.gpuarray.GPUArray
Array of zeros with the shape, dtype, and strides of `a`.
"""
out = gpuarray.GPUArray(a.shape, a.dtype, drv.mem_alloc,
strides=a.strides)
z = np.zeros((), a.dtype)
out.fill(z)
return out
def ones(shape, dtype, order='C', allocator=drv.mem_alloc):
"""
Return an array of the given shape and dtype filled with ones.
Parameters
----------
shape : tuple
Array shape.
dtype : data-type
Data type for the array.
order : {'C', 'F'}, optional
Create array using row-major or column-major format.
allocator : callable, optional
Returns an object that represents the memory allocated for
the requested array.
Returns
-------
out : pycuda.gpuarray.GPUArray
Array of ones with the given shape, dtype, and order.
"""
out = gpuarray.GPUArray(shape, dtype, allocator, order=order)
o = np.ones((), dtype)
out.fill(o)
return out
def ones_like(a):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data type of `a` determine the corresponding
attributes of the returned array.
Returns
-------
out : pycuda.gpuarray.GPUArray
Array of ones with the shape, dtype, and strides of `other`.
"""
out = gpuarray.GPUArray(a.shape, a.dtype,
a.allocator, strides=a.strides)
o = np.ones((), a.dtype)
out.fill(o)
return out
def inf(shape, dtype, order='C', allocator=drv.mem_alloc):
"""
Return an array of the given shape and dtype filled with infs.
Parameters
----------
shape : tuple
Array shape.
dtype : data-type
Data type for the array.
order : {'C', 'F'}, optional
Create array using row-major or column-major format.
allocator : callable, optional
Returns an object that represents the memory allocated for
the requested array.
Returns
-------
out : pycuda.gpuarray.GPUArray
Array of infs with the given shape, dtype, and order.
"""
out = gpuarray.GPUArray(shape, dtype, allocator, order=order)
i = np.array(np.inf, dtype)
out.fill(i)
return out
def maxabs(x_gpu):
"""
Get maximum absolute value.
Find maximum absolute value in the specified array.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Input array.
Returns
-------
m_gpu : pycuda.gpuarray.GPUArray
Array containing maximum absolute value in `x_gpu`.
Examples
--------
>>> import pycuda.autoinit
>>> import pycuda.gpuarray as gpuarray
>>> import misc
>>> x_gpu = gpuarray.to_gpu(np.array([-1, 2, -3], np.float32))
>>> m_gpu = misc.maxabs(x_gpu)
>>> np.allclose(m_gpu.get(), 3.0)
True
"""
try:
func = maxabs.cache[x_gpu.dtype]
except KeyError:
ctype = tools.dtype_to_ctype(x_gpu.dtype)
use_double = int(x_gpu.dtype in [np.float64, np.complex128])
ret_type = np.float64 if use_double else np.float32
func = reduction.ReductionKernel(ret_type, neutral="0",
reduce_expr="max(a,b)",
map_expr="abs(x[i])",
arguments="{ctype} *x".format(ctype=ctype))
maxabs.cache[x_gpu.dtype] = func
return func(x_gpu)
maxabs.cache = {}
def cumsum(x_gpu):
"""
Cumulative sum.
Return the cumulative sum of the elements in the specified array.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Input array.
Returns
-------
c_gpu : pycuda.gpuarray.GPUArray
Output array containing cumulative sum of `x_gpu`.
Notes
-----
Higher dimensional arrays are implicitly flattened row-wise by this function.
Examples
--------
>>> import pycuda.autoinit
>>> import pycuda.gpuarray as gpuarray
>>> import misc
>>> x_gpu = gpuarray.to_gpu(np.random.rand(5).astype(np.float32))
>>> c_gpu = misc.cumsum(x_gpu)
>>> np.allclose(c_gpu.get(), np.cumsum(x_gpu.get()))
True
"""
try:
func = cumsum.cache[x_gpu.dtype]
except KeyError:
func = scan.InclusiveScanKernel(x_gpu.dtype, 'a+b',
preamble='#include <pycuda-complex.hpp>')
cumsum.cache[x_gpu.dtype] = func
return func(x_gpu)
cumsum.cache = {}
def diff(x_gpu):
"""
Calculate the discrete difference.
Calculates the first order difference between the successive
entries of a vector.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Input vector.
Returns
-------
y_gpu : pycuda.gpuarray.GPUArray
Discrete difference.
Examples
--------
>>> import pycuda.driver as drv
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import misc
>>> x = np.asarray(np.random.rand(5), np.float32)
>>> x_gpu = gpuarray.to_gpu(x)
>>> y_gpu = misc.diff(x_gpu)
>>> np.allclose(np.diff(x), y_gpu.get())
True
"""
y_gpu = gpuarray.empty(len(x_gpu)-1, x_gpu.dtype)
try:
func = diff.cache[x_gpu.dtype]
except KeyError:
ctype = tools.dtype_to_ctype(x_gpu.dtype)
func = elementwise.ElementwiseKernel("{ctype} *a, {ctype} *b".format(ctype=ctype),
"b[i] = a[i+1]-a[i]")
diff.cache[x_gpu.dtype] = func
func(x_gpu, y_gpu)
return y_gpu
diff.cache = {}
# List of available numerical types provided by numpy:
num_types = [np.typeDict[t] for t in \
np.typecodes['AllInteger']+np.typecodes['AllFloat']]
# Numbers of bytes occupied by each numerical type:
num_nbytes = dict((np.dtype(t),t(1).nbytes) for t in num_types)
def set_realloc(x_gpu, data):
"""
Transfer data into a GPUArray instance.
Copies the contents of a numpy array into a GPUArray instance. If
the array has a different type or dimensions than the instance,
the GPU memory used by the instance is reallocated and the
instance updated appropriately.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
GPUArray instance to modify.
data : numpy.ndarray
Array of data to transfer to the GPU.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import misc
>>> x = np.asarray(np.random.rand(5), np.float32)
>>> x_gpu = gpuarray.to_gpu(x)
>>> x = np.asarray(np.random.rand(10, 1), np.float64)
>>> set_realloc(x_gpu, x)
>>> np.allclose(x, x_gpu.get())
True
"""
# Only reallocate if absolutely necessary:
if x_gpu.shape != data.shape or x_gpu.size != data.size or \
x_gpu.strides != data.strides or x_gpu.dtype != data.dtype:
# Free old memory:
x_gpu.gpudata.free()
# Allocate new memory:
nbytes = num_nbytes[data.dtype]
x_gpu.gpudata = drv.mem_alloc(nbytes*data.size)
# Set array attributes:
x_gpu.shape = data.shape
x_gpu.size = data.size
x_gpu.strides = data.strides
x_gpu.dtype = data.dtype
# Update the GPU memory:
x_gpu.set(data)
def get_by_index(src_gpu, ind):
"""
Get values in a GPUArray by index.
Parameters
----------
src_gpu : pycuda.gpuarray.GPUArray
GPUArray instance from which to extract values.
ind : pycuda.gpuarray.GPUArray or numpy.ndarray
Array of element indices to set. Must have an integer dtype.
Returns
-------
res_gpu : pycuda.gpuarray.GPUArray
GPUArray with length of `ind` and dtype of `src_gpu` containing
selected values.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import misc
>>> src = np.random.rand(5).astype(np.float32)
>>> src_gpu = gpuarray.to_gpu(src)
>>> ind = gpuarray.to_gpu(np.array([0, 2, 4]))
>>> res_gpu = misc.get_by_index(src_gpu, ind)
>>> np.allclose(res_gpu.get(), src[[0, 2, 4]])
True
Notes
-----
Only supports 1D index arrays.
May not be efficient for certain index patterns because of lack of inability
to coalesce memory operations.
"""
# Only support 1D index arrays:
assert len(np.shape(ind)) == 1
assert issubclass(ind.dtype.type, numbers.Integral)
N = len(ind)
if not isinstance(ind, gpuarray.GPUArray):
ind = gpuarray.to_gpu(ind)
dest_gpu = gpuarray.empty(N, dtype=src_gpu.dtype)
# Manually handle empty index array because it will cause the kernel to
# fail if processed:
if N == 0:
return dest_gpu
try:
func = get_by_index.cache[(src_gpu.dtype, ind.dtype)]
except KeyError:
data_ctype = tools.dtype_to_ctype(src_gpu.dtype)
ind_ctype = tools.dtype_to_ctype(ind.dtype)
v = "{data_ctype} *dest, {ind_ctype} *ind, {data_ctype} *src".format(data_ctype=data_ctype, ind_ctype=ind_ctype)
func = elementwise.ElementwiseKernel(v, "dest[i] = src[ind[i]]")
get_by_index.cache[(src_gpu.dtype, ind.dtype)] = func
func(dest_gpu, ind, src_gpu, range=slice(0, N, 1))
return dest_gpu
get_by_index.cache = {}
def set_by_index(dest_gpu, ind, src_gpu, ind_which='dest'):
"""
Set values in a GPUArray by index.
Parameters
----------
dest_gpu : pycuda.gpuarray.GPUArray
GPUArray instance to modify.
ind : pycuda.gpuarray.GPUArray or numpy.ndarray
1D array of element indices to set. Must have an integer dtype.
src_gpu : pycuda.gpuarray.GPUArray
GPUArray instance from which to set values.
ind_which : str
If set to 'dest', set the elements in `dest_gpu` with indices `ind`
to the successive values in `src_gpu`; the lengths of `ind` and
`src_gpu` must be equal. If set to 'src', set the
successive values in `dest_gpu` to the values in `src_gpu` with indices
`ind`; the lengths of `ind` and `dest_gpu` must be equal.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import misc
>>> dest_gpu = gpuarray.to_gpu(np.arange(5, dtype=np.float32))
>>> ind = gpuarray.to_gpu(np.array([0, 2, 4]))
>>> src_gpu = gpuarray.to_gpu(np.array([1, 1, 1], dtype=np.float32))
>>> misc.set_by_index(dest_gpu, ind, src_gpu, 'dest')
>>> np.allclose(dest_gpu.get(), np.array([1, 1, 1, 3, 1], dtype=np.float32))
True
>>> dest_gpu = gpuarray.to_gpu(np.zeros(3, dtype=np.float32))
>>> ind = gpuarray.to_gpu(np.array([0, 2, 4]))
>>> src_gpu = gpuarray.to_gpu(np.arange(5, dtype=np.float32))
>>> misc.set_by_index(dest_gpu, ind, src_gpu)
>>> np.allclose(dest_gpu.get(), np.array([0, 2, 4], dtype=np.float32))
True
Notes
-----
Only supports 1D index arrays.
May not be efficient for certain index patterns because of lack of inability
to coalesce memory operations.
"""
# Only support 1D index arrays:
assert len(np.shape(ind)) == 1
assert dest_gpu.dtype == src_gpu.dtype
assert issubclass(ind.dtype.type, numbers.Integral)
N = len(ind)
# Manually handle empty index array because it will cause the kernel to
# fail if processed:
if N == 0:
return
if ind_which == 'dest':
assert N == len(src_gpu)
elif ind_which == 'src':
assert N == len(dest_gpu)
else:
raise ValueError('invalid value for `ind_which`')
if not isinstance(ind, gpuarray.GPUArray):
ind = gpuarray.to_gpu(ind)
try:
func = set_by_index.cache[(dest_gpu.dtype, ind.dtype, ind_which)]
except KeyError:
data_ctype = tools.dtype_to_ctype(dest_gpu.dtype)
ind_ctype = tools.dtype_to_ctype(ind.dtype)
v = "{data_ctype} *dest, {ind_ctype} *ind, {data_ctype} *src".format(data_ctype=data_ctype, ind_ctype=ind_ctype)
if ind_which == 'dest':
func = elementwise.ElementwiseKernel(v, "dest[ind[i]] = src[i]")
else:
func = elementwise.ElementwiseKernel(v, "dest[i] = src[ind[i]]")
set_by_index.cache[(dest_gpu.dtype, ind.dtype, ind_which)] = func
func(dest_gpu, ind, src_gpu, range=slice(0, N, 1))
set_by_index.cache = {}
@context_dependent_memoize
def _get_binaryop_vecmat_kernel(dtype, binary_op):
template = Template("""
#include <pycuda-complex.hpp>
__global__ void opColVecToMat(const ${type} *mat, const ${type} *vec, ${type} *out,
const int n, const int m){
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tidx = blockIdx.x * blockDim.x + threadIdx.x;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y;
extern __shared__ ${type} shared_vec[];
if ((ty == 0) & (tidx < n))
shared_vec[tx] = vec[tidx];
__syncthreads();
if ((tidy < m) & (tidx < n)) {
out[tidx*m+tidy] = mat[tidx*m+tidy] ${binary_op} shared_vec[tx];
}
}
__global__ void opRowVecToMat(const ${type}* mat, const ${type}* vec, ${type}* out,
const int n, const int m){
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tidx = blockIdx.x * blockDim.x + threadIdx.x;
const int tidy = blockIdx.y * blockDim.y + threadIdx.y;
extern __shared__ ${type} shared_vec[];
if ((tx == 0) & (tidy < m))
shared_vec[ty] = vec[tidy];
__syncthreads();
if ((tidy < m) & (tidx < n)) {
out[tidx*m+tidy] = mat[tidx*m+tidy] ${binary_op} shared_vec[ty];
}
}""")
cache_dir=None
ctype = dtype_to_ctype(dtype)
tmpl = template.substitute(type=ctype, binary_op=binary_op)
mod = SourceModule(tmpl)
add_row_vec_kernel = mod.get_function('opRowVecToMat')
add_col_vec_kernel = mod.get_function('opColVecToMat')
return add_row_vec_kernel, add_col_vec_kernel
def binaryop_matvec(binary_op, x_gpu, a_gpu, axis=None, out=None, stream=None):
"""
Applies a binary operation to a vector and each column/row of a matrix.
The numpy broadcasting rules apply so this would yield the same result
as `x_gpu.get()` op `a_gpu.get()` in host-code.
Parameters
----------
binary_op : string, ['+', '-', '/', '*' '%']
The operator to apply
x_gpu : pycuda.gpuarray.GPUArray
Matrix to which to add the vector.
a_gpu : pycuda.gpuarray.GPUArray
Vector to add to `x_gpu`.
axis : int (optional)
The axis onto which the vector is added. By default this is
determined automatically by using the first axis with the correct
dimensionality.
out : pycuda.gpuarray.GPUArray (optional)
Optional destination matrix.
stream : pycuda.driver.Stream (optional)
Optional Stream in which to perform this calculation.
Returns
-------
out : pycuda.gpuarray.GPUArray
result of `x_gpu` + `a_gpu`
"""
if axis is None:
if len(a_gpu.shape) == 1:
if a_gpu.shape[0] == x_gpu.shape[1]:
axis = 1
else:
raise ValueError(
"operands could not be broadcast together "
"with shapes %s %s" % (x_gpu.shape, a_gpu.shape))
elif a_gpu.shape[1] == x_gpu.shape[1]: # numpy matches inner axes first
axis = 1
elif a_gpu.shape[0] == x_gpu.shape[0]:
axis = 0
else:
raise ValueError(
"operands could not be broadcast together "
"with shapes %s %s" % (x_gpu.shape, a_gpu.shape))
else:
if axis < 0:
axis += 2
if axis > 1:
raise ValueError('invalid axis')
if binary_op not in ['+', '-', '/', '*', '%']:
raise ValueError('invalid operator')
row_kernel, col_kernel = _get_binaryop_vecmat_kernel(x_gpu.dtype, binary_op)
n, m = np.int32(x_gpu.shape[0]), np.int32(x_gpu.shape[1])
block = (24, 24, 1)
gridx = int(n // block[0] + 1 * (n % block[0] != 0))
gridy = int(m // block[1] + 1 * (m % block[1] != 0))
grid = (gridx, gridy, 1)
if out is None:
alloc = _global_cublas_allocator
out = gpuarray.empty_like(x_gpu)
else:
assert out.dtype == x_gpu.dtype
assert out.shape == x_gpu.shape
if x_gpu.flags.c_contiguous:
if axis == 0:
col_kernel(x_gpu, a_gpu, out, n, m,
block=block, grid=grid, stream=stream,
shared=24*x_gpu.dtype.itemsize)
elif axis == 1:
row_kernel(x_gpu, a_gpu, out, n, m,
block=block, grid=grid, stream=stream,
shared=24*x_gpu.dtype.itemsize)
else:
if axis == 0:
row_kernel(x_gpu, a_gpu, out, m, n,
block=block, grid=grid, stream=stream,
shared=24*x_gpu.dtype.itemsize)
elif axis == 1:
col_kernel(x_gpu, a_gpu, out, m, n,
block=block, grid=grid, stream=stream,
shared=24*x_gpu.dtype.itemsize)
return out
import operator
def binaryop_2d(c_op, py_op, commutative, x_gpu, y_gpu):
if x_gpu.flags.c_contiguous != y_gpu.flags.c_contiguous:
raise ValueError('unsupported combination of input order')
if x_gpu.shape == y_gpu.shape:
return py_op(x_gpu, y_gpu)
elif x_gpu.size == 1:
return py_op(x_gpu.get().reshape(()), y_gpu)
elif y_gpu.size == 1:
return py_op(x_gpu, y_gpu.get().reshape(()))
if len(x_gpu.shape) == 2:
m, n = x_gpu.shape
if y_gpu.shape == (n,):
return binaryop_matvec(c_op, x_gpu, y_gpu, axis=1)
elif y_gpu.shape == (1, n):
return binaryop_matvec(c_op, x_gpu, y_gpu[0], axis=1)
elif y_gpu.shape == (m, 1):
return binaryop_matvec(c_op, x_gpu, y_gpu.ravel(), axis=0)
if len(y_gpu.shape) == 2 and commutative:
m, n = y_gpu.shape
if x_gpu.shape == (n,):
return binaryop_matvec(c_op, y_gpu, x_gpu, axis=1)
elif x_gpu.shape == (1, n):
return binaryop_matvec(c_op, y_gpu, x_gpu[0], axis=1)
elif x_gpu.shape == (m, 1):
return binaryop_matvec(c_op, y_gpu, x_gpu.ravel(), axis=0)
raise TypeError("unsupported combination of shapes")
def add(x_gpu, y_gpu):
"""
Adds two scalars, vectors, or matrices.
The numpy broadcasting rules apply so this would yield the same result
as `x_gpu.get()` + `y_gpu.get()` in host code.
Parameters
----------
x_gpu, y_gpu : pycuda.gpuarray.GPUArray
The arrays to be added.
Returns
-------
out : pycuda.gpuarray.GPUArray
Equivalent to `x_gpu.get()` + `y_gpu.get()`.
Notes
-----
The `out` and `stream` options are not supported because `GPUArray.__add__`
doesn't provide them.
"""
return binaryop_2d("+", operator.add, True, x_gpu, y_gpu)
def subtract(x_gpu, y_gpu):
"""
Subtracts two scalars, vectors, or matrices with broadcasting.
The numpy broadcasting rules apply so this would yield the same result
as `x_gpu.get()` - `y_gpu.get()` in host code.
Parameters
----------
x_gpu, y_gpu : pycuda.gpuarray.GPUArray
The arrays to be subtracted.
Returns
-------
out : pycuda.gpuarray.GPUArray
Equivalent to `x_gpu.get()` - `y_gpu.get()`.
Notes
-----
The `out` and `stream` options are not supported because `GPUArray.__sub__`
doesn't provide them.
"""
return binaryop_2d("-", operator.sub, False, x_gpu, y_gpu)
def multiply(x_gpu, y_gpu):
"""
Multiplies two scalars, vectors, or matrices with broadcasting.
The numpy broadcasting rules apply so this would yield the same result
as `x_gpu.get()` * `y_gpu.get()` in host code.
Parameters
----------
x_gpu, y_gpu : pycuda.gpuarray.GPUArray
The arrays to be multiplied.
Returns
-------
out : pycuda.gpuarray.GPUArray
Equivalent to `x_gpu.get()` * `y_gpu.get()`.
Notes
-----
The `out` and `stream` options are not supported because `GPUArray.__mul__`
doesn't provide them.
"""
return binaryop_2d("*", operator.mul, True, x_gpu, y_gpu)
def divide(x_gpu, y_gpu):
"""
Divides two scalars, vectors, or matrices with broadcasting.
The numpy broadcasting rules apply so this would yield the same result
as `x_gpu.get()` / `y_gpu.get()` in host code.
Parameters
----------
x_gpu, y_gpu : pycuda.gpuarray.GPUArray
The arrays to be divided.
Returns
-------
out : pycuda.gpuarray.GPUArray
Equivalent to `x_gpu.get()` / `y_gpu.get()`.
Notes
-----
The `out` and `stream` options are not supported because `GPUArray.__div__`
doesn't provide them.
"""
return binaryop_2d("/", operator.truediv, False, x_gpu, y_gpu)
def add_matvec(x_gpu, a_gpu, axis=None, out=None, stream=None):
"""
Adds a vector to each column/row of the matrix.
The numpy broadcasting rules apply so this would yield the same result
as `x_gpu.get()` + `a_gpu.get()` in host-code.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Matrix to which to add the vector.
a_gpu : pycuda.gpuarray.GPUArray
Vector to add to `x_gpu`.
axis : int (optional)
The axis onto which the vector is added. By default this is
determined automatically by using the first axis with the correct
dimensionality.
out : pycuda.gpuarray.GPUArray (optional)
Optional destination matrix.
stream : pycuda.driver.Stream (optional)
Optional Stream in which to perform this calculation.
Returns
-------
out : pycuda.gpuarray.GPUArray
Result of `x_gpu` + `a_gpu`
"""
return binaryop_matvec('+', x_gpu, a_gpu, axis, out, stream)
def div_matvec(x_gpu, a_gpu, axis=None, out=None, stream=None):
"""
Divides each column/row of a matrix by a vector.
The numpy broadcasting rules apply so this would yield the same result
as `x_gpu.get()` / `a_gpu.get()` in host-code.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Matrix to divide by the vector `a_gpu`.
a_gpu : pycuda.gpuarray.GPUArray
The matrix `x_gpu` will be divided by this vector.
axis : int (optional)
The axis on which division occurs. By default this is
determined automatically by using the first axis with the correct
dimensionality.
out : pycuda.gpuarray.GPUArray (optional)
Optional destination matrix.
stream : pycuda.driver.Stream (optional)
Optional Stream in which to perform this calculation.
Returns
-------
out : pycuda.gpuarray.GPUArray
result of `x_gpu` / `a_gpu`
"""
return binaryop_matvec('/', x_gpu, a_gpu, axis, out, stream)
def mult_matvec(x_gpu, a_gpu, axis=None, out=None, stream=None):
"""
Multiplies a vector elementwise with each column/row of the matrix.
The numpy broadcasting rules apply so this would yield the same result
as `x_gpu.get()` * `a_gpu.get()` in host-code.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Matrix to multiply by the vector `a_gpu`.
a_gpu : pycuda.gpuarray.GPUArray
The matrix `x_gpu` will be multiplied by this vector.
axis : int (optional)
The axis on which multiplication occurs. By default this is
determined automatically by using the first axis with the correct
dimensionality.
out : pycuda.gpuarray.GPUArray (optional)
Optional destination matrix.
stream : pycuda.driver.Stream (optional)
Optional Stream in which to perform this calculation.
Returns
-------
out : pycuda.gpuarray.GPUArray
result of `x_gpu` * `a_gpu`
"""
return binaryop_matvec('*', x_gpu, a_gpu, axis, out, stream)
def _sum_axis(x_gpu, axis=None, out=None, calc_mean=False, ddof=0,
keepdims=False):
global _global_cublas_allocator
assert isinstance(ddof, numbers.Integral)
if axis is None or len(x_gpu.shape) <= 1:
out_shape = (1,)*len(x_gpu.shape) if keepdims else ()
if calc_mean == False:
return gpuarray.sum(x_gpu).reshape(out_shape)
else:
return gpuarray.sum(x_gpu).reshape(out_shape) / (x_gpu.dtype.type(x_gpu.size-ddof))
if axis < 0:
axis += 2
if axis > 1:
raise ValueError('invalid axis')
if x_gpu.flags.c_contiguous:
n, m = x_gpu.shape[1], x_gpu.shape[0]
lda = x_gpu.shape[1]
trans = "n" if axis == 0 else "t"
sum_axis, out_axis = (m, n) if axis == 0 else (n, m)
else:
n, m = x_gpu.shape[0], x_gpu.shape[1]
lda = x_gpu.shape[0]
trans = "t" if axis == 0 else "n"
sum_axis, out_axis = (n, m) if axis == 0 else (m, n)
if calc_mean:
alpha = (1.0 / (sum_axis-ddof))
else:
alpha = 1.0
if (x_gpu.dtype == np.complex64):
gemv = cublas.cublasCgemv
elif (x_gpu.dtype == np.float32):
gemv = cublas.cublasSgemv
elif (x_gpu.dtype == np.complex128):
gemv = cublas.cublasZgemv
elif (x_gpu.dtype == np.float64):
gemv = cublas.cublasDgemv
alloc = _global_cublas_allocator
ons = ones((sum_axis, ), x_gpu.dtype, allocator=alloc)
if keepdims:
out_shape = (1, out_axis) if axis == 0 else (out_axis, 1)
else:
out_shape = (out_axis,)
if out is None:
out = gpuarray.empty(out_shape, x_gpu.dtype, alloc)
else:
assert out.dtype == x_gpu.dtype
assert out.size >= out_axis
gemv(_global_cublas_handle, trans, n, m,
alpha, x_gpu.gpudata, lda,
ons.gpudata, 1, 0.0, out.gpudata, 1)
return out
def sum(x_gpu, axis=None, out=None, keepdims=False):
"""
Compute the sum along the specified axis.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Array containing numbers whose sum is desired.
axis : int (optional)
Axis along which the sums are computed. The default is to
compute the sum of the flattened array.
out : pycuda.gpuarray.GPUArray (optional)
Output array in which to place the result.
keepdims : bool (optional, default False)
If True, the axes which are reduced are left in the result as
dimensions with size one.
Returns
-------
out : pycuda.gpuarray.GPUArray
sum of elements, or sums of elements along the desired axis.
"""
return _sum_axis(x_gpu, axis, out=out, keepdims=keepdims)
def mean(x_gpu, axis=None, out=None, keepdims=False):
"""
Compute the arithmetic means along the specified axis.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Array containing numbers whose mean is desired.
axis : int (optional)
Axis along which the means are computed. The default is to
compute the mean of the flattened array.
out : pycuda.gpuarray.GPUArray (optional)
Output array in which to place the result.
keepdims : bool (optional, default False)
If True, the axes which are reduced are left in the result as
dimensions with size one.
Returns
-------
out : pycuda.gpuarray.GPUArray
mean of elements, or means of elements along the desired axis.
"""
return _sum_axis(x_gpu, axis, calc_mean=True, out=out, keepdims=keepdims)
def var(x_gpu, ddof=0, axis=None, stream=None, keepdims=False):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by default,
otherwise over the specified axis.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Array containing numbers whose variance is desired.
ddof : int (optional)
"Delta Degrees of Freedom": the divisor used in computing the
variance is ``N - ddof``, where ``N`` is the number of elements.
Setting ``ddof = 1`` is equivalent to applying Bessel's
correction.
axis : int (optional)
Axis along which the variance are computed. The default is to
compute the variance of the flattened array.
stream : pycuda.driver.Stream (optional)
Optional CUDA stream in which to perform this calculation
keepdims : bool (optional, default False)
If True, the axes which are reduced are left in the result as
dimensions with size one.
Returns
-------
out : pycuda.gpuarray.GPUArray
variance of elements, or variances of elements along the desired axis.
"""
def _inplace_pow(x_gpu, p, stream):
func = elementwise.get_pow_kernel(x_gpu.dtype)
func.prepared_async_call(x_gpu._grid, x_gpu._block, stream,
p, x_gpu.gpudata, x_gpu.gpudata, x_gpu.mem_size)
if axis is None:
m = mean(x_gpu).get()
out = x_gpu - m
out **= 2
out = _sum_axis(out, axis=None, calc_mean=True,
ddof=ddof, out=None, keepdims=keepdims)
else:
if axis < 0:
axis += 2
m = mean(x_gpu, axis=axis)
out = add_matvec(x_gpu, -m, axis=1-axis, stream=stream)
_inplace_pow(out, 2, stream)
out = _sum_axis(out, axis=axis, calc_mean=True,
ddof=ddof, out=None, keepdims=keepdims)
return out
def std(x_gpu, ddof=0, axis=None, stream=None, keepdims=False):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation of the array elements, a measure of the
spread of a distribution. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Array containing numbers whose std is desired.
ddof : int (optional)
"Delta Degrees of Freedom": the divisor used in computing the
variance is ``N - ddof``, where ``N`` is the number of elements.
Setting ``ddof = 1`` is equivalent to applying Bessel's
correction.
axis : int (optional)
Axis along which the std are computed. The default is to
compute the std of the flattened array.
stream : pycuda.driver.Stream (optional)
Optional CUDA stream in which to perform this calculation
keepdims : bool (optional, default False)
If True, the axes which are reduced are left in the result as
dimensions with size one.
Returns
-------
out : pycuda.gpuarray.GPUArray or float
std of elements, or stds of elements along the desired axis.
"""
def _inplace_pow(x_gpu, p, stream):
func = elementwise.get_pow_kernel(x_gpu.dtype)
func.prepared_async_call(x_gpu._grid, x_gpu._block, stream,
p, x_gpu.gpudata, x_gpu.gpudata, x_gpu.mem_size)
if axis is None:
return var(x_gpu, ddof=ddof, stream=stream, keepdims=keepdims) ** 0.5
else:
out = var(x_gpu, ddof=ddof, axis=axis, stream=stream, keepdims=keepdims)
_inplace_pow(out, 0.5, stream)
return out
@context_dependent_memoize
def _get_minmax_kernel(dtype, min_or_max):
template = Template("""
#include <pycuda-complex.hpp>
__global__ void minmax_column_kernel(${type}* mat, ${type}* target,
unsigned int *idx_target,
unsigned int width,
unsigned int height) {
__shared__ ${type} max_vals[32];
__shared__ unsigned int max_idxs[32];
${type} cur_max = ${init_value};
unsigned int cur_idx = 0;
${type} val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x + i * width];
if (val ${cmp_op} cur_max) {
cur_max = val;
cur_idx = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_idxs[threadIdx.x] = cur_idx;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = ${init_value};
cur_idx = 0;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] ${cmp_op} cur_max) {
cur_max = max_vals[i];
cur_idx = max_idxs[i];
}
target[blockIdx.x] = cur_max;
idx_target[blockIdx.x] = cur_idx;
}
}
__global__ void minmax_row_kernel(${type}* mat, ${type}* target,
unsigned int* idx_target,
unsigned int width,
unsigned int height) {
__shared__ ${type} max_vals[32];
__shared__ unsigned int max_idxs[32];
${type} cur_max = ${init_value};
unsigned int cur_idx = 0;
${type} val = 0;
for (unsigned int i = threadIdx.x; i < width; i += 32) {
val = mat[blockIdx.x * width + i];
if (val ${cmp_op} cur_max) {
cur_max = val;
cur_idx = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_idxs[threadIdx.x] = cur_idx;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = ${init_value};
cur_idx = 0;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] ${cmp_op} cur_max) {
cur_max = max_vals[i];
cur_idx = max_idxs[i];
}
target[blockIdx.x] = cur_max;
idx_target[blockIdx.x] = cur_idx;
}
}
""")
cache_dir=None
ctype = dtype_to_ctype(dtype)
if min_or_max=='max':
iv = str(np.finfo(dtype).min)
tmpl = template.substitute(type=ctype, cmp_op='>', init_value=iv)
elif min_or_max=='min':
iv = str(np.finfo(dtype).max)
tmpl = template.substitute(type=ctype, cmp_op='<', init_value=iv)
else:
raise ValueError('invalid argument')
mod = SourceModule(tmpl)
minmax_col_kernel = mod.get_function('minmax_column_kernel')
minmax_row_kernel = mod.get_function('minmax_row_kernel')
return minmax_col_kernel, minmax_row_kernel
def _minmax_impl(a_gpu, axis, min_or_max, stream=None, keepdims=False):
''' Returns both max and argmax (min/argmin) along an axis.'''
assert len(a_gpu.shape) < 3
if iscomplextype(a_gpu.dtype):
raise ValueError("Cannot compute min/max of complex values")
if axis is None or len(a_gpu.shape) <= 1: ## Note: PyCUDA doesn't have an overall argmax/argmin!
out_shape = (1,) * len(a_gpu.shape)
if min_or_max == 'max':
return gpuarray.max(a_gpu).reshape(out_shape), None
else:
return gpuarray.min(a_gpu).reshape(out_shape), None
else:
if axis < 0:
axis += 2
assert axis in (0, 1)
global _global_cublas_allocator
alloc = _global_cublas_allocator
n, m = a_gpu.shape if a_gpu.flags.c_contiguous else (a_gpu.shape[1], a_gpu.shape[0])
col_kernel, row_kernel = _get_minmax_kernel(a_gpu.dtype, min_or_max)
if (axis == 0 and a_gpu.flags.c_contiguous) or (axis == 1 and a_gpu.flags.f_contiguous):
if keepdims:
out_shape = (1, m) if axis == 0 else (m, 1)
else:
out_shape = (m,)
target = gpuarray.empty(out_shape, dtype=a_gpu.dtype, allocator=alloc)
idx = gpuarray.empty(out_shape, dtype=np.uint32, allocator=alloc)
col_kernel(a_gpu, target, idx, np.uint32(m), np.uint32(n),
block=(32, 1, 1), grid=(m, 1, 1), stream=stream)
else:
if keepdims:
out_shape = (1, n) if axis == 0 else (n, 1)
else:
out_shape = (n,)
target = gpuarray.empty(out_shape, dtype=a_gpu, allocator=alloc)
idx = gpuarray.empty(out_shape, dtype=np.uint32, allocator=alloc)
row_kernel(a_gpu, target, idx, np.uint32(m), np.uint32(n),
block=(32, 1, 1), grid=(n, 1, 1), stream=stream)
return target, idx
def max(a_gpu, axis=None, keepdims=False):
'''
Return the maximum of an array or maximum along an axis.
Parameters
----------
a_gpu : pycuda.gpuarray.GPUArray
Input array
axis : int (optional)
Axis along which the maxima are computed. The default is to
compute the maximum of the flattened array.
keepdims : bool (optional, default False)
If True, the axes which are reduced are left in the result as
dimensions with size one.
Returns
-------
out : pycuda.gpuarray.GPUArray or float
maximum of elements, or maxima of elements along the desired axis.
'''
return _minmax_impl(a_gpu, axis, "max", keepdims=keepdims)[0]
def min(a_gpu, axis=None, keepdims=False):
'''
Return the minimum of an array or minimum along an axis.
Parameters
----------
a_gpu : pycuda.gpuarray.GPUArray
Input array
axis : int (optional)
Axis along which the minima are computed. The default is to
compute the minimum of the flattened array.
keepdims : bool (optional, default False)
If True, the axes which are reduced are left in the result as
dimensions with size one.
Returns
-------
out : pycuda.gpuarray.GPUArray or float
minimum of elements, or minima of elements along the desired axis.
'''
return _minmax_impl(a_gpu, axis, "min", keepdims=keepdims)[0]
def argmax(a_gpu, axis, keepdims=False):
'''
Indices of the maximum values along an axis.
Parameters
----------
a_gpu : pycuda.gpuarray.GPUArray
Input array
axis : int
Axis along which the maxima are computed.
keepdims : bool (optional, default False)
If True, the axes which are reduced are left in the result as
dimensions with size one.
Returns
-------
out : pycuda.gpuarray.GPUArray
Array of indices into the array.
'''
if axis is None:
raise NotImplementedError("Can't compute global argmax")
return _minmax_impl(a_gpu, axis, "max", keepdims=keepdims)[1]
def argmin(a_gpu, axis, keepdims=False):
'''
Indices of the minimum values along an axis.
Parameters
----------
a_gpu : pycuda.gpuarray.GPUArray
Input array
axis : int
Axis along which the minima are computed.
keepdims : bool (optional, default False)
If True, the axes which are reduced are left in the result as
dimensions with size one.
Returns
-------
out : pycuda.gpuarray.GPUArray
Array of indices into the array.
'''
if axis is None:
raise NotImplementedError("Can't compute global argmax")
return _minmax_impl(a_gpu, axis, "min", keepdims=keepdims)[1]
if __name__ == "__main__":
import doctest
doctest.testmod() | /scikit-cuda-0.5.3.tar.gz/scikit-cuda-0.5.3/skcuda/misc.py | 0.607547 | 0.183155 | misc.py | pypi |
from __future__ import absolute_import, division
from pprint import pprint
from string import Template
from pycuda.tools import context_dependent_memoize
from pycuda.compiler import SourceModule
from pycuda.reduction import ReductionKernel
from pycuda import curandom
from pycuda import cumath
import pycuda.gpuarray as gpuarray
import pycuda.driver as drv
import pycuda.elementwise as el
import pycuda.tools as tools
import numpy as np
from . import cublas
from . import misc
from . import linalg
rand = curandom.MRG32k3aRandomNumberGenerator()
import sys
if sys.version_info < (3,):
range = xrange
class LinAlgError(Exception):
"""Randomized Linear Algebra Error."""
pass
try:
from . import cula
_has_cula = True
except (ImportError, OSError):
_has_cula = False
from .misc import init, add_matvec, div_matvec, mult_matvec
from .linalg import hermitian, transpose
# Get installation location of C headers:
from . import install_headers
def rsvd(a_gpu, k=None, p=0, q=0, method="standard", handle=None):
"""
Randomized Singular Value Decomposition.
Randomized algorithm for computing the approximate low-rank singular value
decomposition of a rectangular (m, n) matrix `a` with target rank `k << n`.
The input matrix a is factored as `a = U * diag(s) * Vt`. The right singluar
vectors are the columns of the real or complex unitary matrix `U`. The left
singular vectors are the columns of the real or complex unitary matrix `V`.
The singular values `s` are non-negative and real numbers.
The paramter `p` is a oversampling parameter to improve the approximation.
A value between 2 and 10 is recommended.
The paramter `q` specifies the number of normlized power iterations
(subspace iterations) to reduce the approximation error. This is recommended
if the the singular values decay slowly and in practice 1 or 2 iterations
achive good results. However, computing power iterations is increasing the
computational time.
If k > (n/1.5), partial SVD or trancated SVD might be faster.
Parameters
----------
a_gpu : pycuda.gpuarray.GPUArray
Real/complex input matrix `a` with dimensions `(m, n)`.
k : int
`k` is the target rank of the low-rank decomposition, k << min(m,n).
p : int
`p` sets the oversampling parameter (default k=0).
q : int
`q` sets the number of power iterations (default=0).
method : `{'standard', 'fast'}`
'standard' : Standard algorithm as described in [1, 2]
'fast' : Version II algorithm as described in [2]
handle : int
CUBLAS context. If no context is specified, the default handle from
`skcuda.misc._global_cublas_handle` is used.
Returns
-------
u_gpu : pycuda.gpuarray
Right singular values, array of shape `(m, k)`.
s_gpu : pycuda.gpuarray
Singular values, 1-d array of length `k`.
vt_gpu : pycuda.gpuarray
Left singular values, array of shape `(k, n)`.
Notes
-----
Double precision is only supported if the standard version of the
CULA Dense toolkit is installed.
This function destroys the contents of the input matrix.
Arrays are assumed to be stored in column-major order, i.e., order='F'.
Input matrix of shape `(m, n)`, where `n>m` is not supported yet.
References
----------
N. Halko, P. Martinsson, and J. Tropp.
"Finding structure with randomness: probabilistic
algorithms for constructing approximate matrix
decompositions" (2009).
(available at `arXiv <http://arxiv.org/abs/0909.4061>`_).
S. Voronin and P.Martinsson.
"RSVDPACK: Subroutines for computing partial singular value
decompositions via randomized sampling on single core, multi core,
and GPU architectures" (2015).
(available at `arXiv <http://arxiv.org/abs/1502.05366>`_).
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> from skcuda import linalg, rlinalg
>>> linalg.init()
>>> rlinalg.init()
>>> #Randomized SVD decomposition of the square matrix `a` with single precision.
>>> #Note: There is no gain to use rsvd if k > int(n/1.5)
>>> a = np.array(np.random.randn(5, 5), np.float32, order='F')
>>> a_gpu = gpuarray.to_gpu(a)
>>> U, s, Vt = rlinalg.rsvd(a_gpu, k=5, method='standard')
>>> np.allclose(a, np.dot(U.get(), np.dot(np.diag(s.get()), Vt.get())), 1e-4)
True
>>> #Low-rank SVD decomposition with target rank k=2
>>> a = np.array(np.random.randn(5, 5), np.float32, order='F')
>>> a_gpu = gpuarray.to_gpu(a)
>>> U, s, Vt = rlinalg.rsvd(a_gpu, k=2, method='standard')
"""
#*************************************************************************
#*** Author: N. Benjamin Erichson <[email protected]> ***
#*** <September, 2015> ***
#*** License: BSD 3 clause ***
#*************************************************************************
if not _has_cula:
raise NotImplementedError('CULA not installed')
if handle is None:
handle = misc._global_cublas_handle
alloc = misc._global_cublas_allocator
# The free version of CULA only supports single precision floating
data_type = a_gpu.dtype.type
real_type = np.float32
if data_type == np.complex64:
cula_func_gesvd = cula.culaDeviceCgesvd
cublas_func_gemm = cublas.cublasCgemm
copy_func = cublas.cublasCcopy
alpha = np.complex64(1.0)
beta = np.complex64(0.0)
TRANS_type = 'C'
isreal = False
elif data_type == np.float32:
cula_func_gesvd = cula.culaDeviceSgesvd
cublas_func_gemm = cublas.cublasSgemm
copy_func = cublas.cublasScopy
alpha = np.float32(1.0)
beta = np.float32(0.0)
TRANS_type = 'T'
isreal = True
else:
if cula._libcula_toolkit == 'standard':
if data_type == np.complex128:
cula_func_gesvd = cula.culaDeviceZgesvd
cublas_func_gemm = cublas.cublasZgemm
copy_func = cublas.cublasZcopy
alpha = np.complex128(1.0)
beta = np.complex128(0.0)
TRANS_type = 'C'
isreal = False
elif data_type == np.float64:
cula_func_gesvd = cula.culaDeviceDgesvd
cublas_func_gemm = cublas.cublasDgemm
copy_func = cublas.cublasDcopy
alpha = np.float64(1.0)
beta = np.float64(0.0)
TRANS_type = 'T'
isreal = True
else:
raise ValueError('unsupported type')
real_type = np.float64
else:
raise ValueError('double precision not supported')
#CUDA assumes that arrays are stored in column-major order
m, n = np.array(a_gpu.shape, int)
if n>m : raise ValueError('input matrix of shape (m,n), where n>m is not supported')
#Set k
if k == None : raise ValueError('k must be provided')
if k > n or k < 1: raise ValueError('k must be 0 < k <= n')
kt = k
k = k + p
if k > n: k=n
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Generate a random sampling matrix O
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if isreal==False:
Oimag_gpu = gpuarray.empty((n,k), real_type, order="F", allocator=alloc)
Oreal_gpu = gpuarray.empty((n,k), real_type, order="F", allocator=alloc)
O_gpu = gpuarray.empty((n,k), data_type, order="F", allocator=alloc)
rand.fill_uniform(Oimag_gpu)
rand.fill_uniform(Oreal_gpu)
O_gpu = Oreal_gpu + 1j * Oimag_gpu
O_gpu = O_gpu.T * 2 - 1 #Scale to [-1,1]
else:
O_gpu = gpuarray.empty((n,k), real_type, order="F", allocator=alloc)
rand.fill_uniform(O_gpu) #Draw random samples from a ~ Uniform(-1,1) distribution
O_gpu = O_gpu * 2 - 1 #Scale to [-1,1]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Build sample matrix Y : Y = A * O
#Note: Y should approximate the range of A
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Allocate Y
Y_gpu = gpuarray.zeros((m,k), data_type, order="F", allocator=alloc)
#Dot product Y = A * O
cublas_func_gemm(handle, 'n', 'n', m, k, n, alpha,
a_gpu.gpudata, m, O_gpu.gpudata, n,
beta, Y_gpu.gpudata, m )
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Orthogonalize Y using economic QR decomposition: Y=QR
#If q > 0 perfrom q subspace iterations
#Note: economic QR just returns Q, and destroys Y_gpu
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if q > 0:
Z_gpu = gpuarray.empty((n,k), data_type, order="F", allocator=alloc)
for i in np.arange(1, q+1 ):
if( (2*i-2)%q == 0 ):
Y_gpu = linalg.qr(Y_gpu, 'economic', lib='cula')
cublas_func_gemm(handle, TRANS_type, 'n', n, k, m, alpha,
a_gpu.gpudata, m, Y_gpu.gpudata, m,
beta, Z_gpu.gpudata, n )
if( (2*i-1)%q == 0 ):
Z_gpu = linalg.qr(Z_gpu, 'economic', lib='cula')
cublas_func_gemm(handle, 'n', 'n', m, k, n, alpha,
a_gpu.gpudata, m, Z_gpu.gpudata, n,
beta, Y_gpu.gpudata, m )
#End for
#End if
Q_gpu = linalg.qr(Y_gpu, 'economic', lib='cula')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Project the data matrix a into a lower dimensional subspace
#B = Q.T * A
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Allocate B
B_gpu = gpuarray.empty((k,n), data_type, order="F", allocator=alloc)
cublas_func_gemm(handle, TRANS_type, 'n', k, n, m, alpha,
Q_gpu.gpudata, m, a_gpu.gpudata, m,
beta, B_gpu.gpudata, k )
if method == 'standard':
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Singular Value Decomposition
#Note: B = U" * S * Vt
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#gesvd(jobu, jobvt, m, n, int(a), lda, int(s), int(u), ldu, int(vt), ldvt)
#Allocate s, U, Vt for economic SVD
#Note: singular values are always real
s_gpu = gpuarray.empty(k, real_type, order="F", allocator=alloc)
U_gpu = gpuarray.empty((k,k), data_type, order="F", allocator=alloc)
Vt_gpu = gpuarray.empty((k,n), data_type, order="F", allocator=alloc)
#Economic SVD
cula_func_gesvd('S', 'S', k, n, int(B_gpu.gpudata), k, int(s_gpu.gpudata),
int(U_gpu.gpudata), k, int(Vt_gpu.gpudata), k)
#Compute right singular vectors as U = Q * U"
cublas_func_gemm(handle, 'n', 'n', m, k, k, alpha,
Q_gpu.gpudata, m, U_gpu.gpudata, k,
beta, Q_gpu.gpudata, m )
U_gpu = Q_gpu #Set pointer
# Free internal CULA memory:
cula.culaFreeBuffers()
#Return
return U_gpu[ : , 0:kt ], s_gpu[ 0:kt ], Vt_gpu[ 0:kt , : ]
elif method == 'fast':
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Orthogonalize B.T using reduced QR decomposition: B.T = Q" * R"
#Note: reduced QR returns Q and R, and destroys B_gpu
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if isreal==True:
B_gpu = transpose(B_gpu) #transpose B
else:
B_gpu = hermitian(B_gpu) #transpose B
Qstar_gpu, Rstar_gpu = linalg.qr(B_gpu, 'reduced', lib='cula')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Singular Value Decomposition of R"
#Note: R" = U" * S" * Vt"
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#gesvd(jobu, jobvt, m, n, int(a), lda, int(s), int(u), ldu, int(vt), ldvt)
#Allocate s, U, Vt for economic SVD
#Note: singular values are always real
s_gpu = gpuarray.empty(k, real_type, order="F", allocator=alloc)
Ustar_gpu = gpuarray.empty((k,k), data_type, order="F", allocator=alloc)
Vtstar_gpu = gpuarray.empty((k,k), data_type, order="F", allocator=alloc)
#Economic SVD
cula_func_gesvd('A', 'A', k, k, int(Rstar_gpu.gpudata), k, int(s_gpu.gpudata),
int(Ustar_gpu.gpudata), k, int(Vtstar_gpu.gpudata), k)
#Compute right singular vectors as U = Q * Vt.T"
cublas_func_gemm(handle, 'n', TRANS_type, m, k, k, alpha,
Q_gpu.gpudata, m, Vtstar_gpu.gpudata, k,
beta, Q_gpu.gpudata, m )
U_gpu = Q_gpu #Set pointer
#Compute left singular vectors as Vt = U".T * Q".T
Vt_gpu = gpuarray.empty((k,n), data_type, order="F", allocator=alloc)
cublas_func_gemm(handle, TRANS_type, TRANS_type, k, n, k, alpha,
Ustar_gpu.gpudata, k, Qstar_gpu.gpudata, n,
beta, Vt_gpu.gpudata, k )
# Free internal CULA memory:
cula.culaFreeBuffers()
#Return
return U_gpu[ : , 0:kt ], s_gpu[ 0:kt ], Vt_gpu[ 0:kt , : ]
#End if
def rdmd(a_gpu, k=None, p=5, q=1, modes='exact', method_rsvd='standard', return_amplitudes=False, return_vandermonde=False, handle=None):
"""
Randomized Dynamic Mode Decomposition.
Dynamic Mode Decomposition (DMD) is a data processing algorithm which
allows to decompose a matrix `a` in space and time.
The matrix `a` is decomposed as `a = FBV`, where the columns of `F`
contain the dynamic modes. The modes are ordered corresponding
to the amplitudes stored in the diagonal matrix `B`. `V` is a Vandermonde
matrix describing the temporal evolution.
Parameters
----------
a_gpu : pycuda.gpuarray.GPUArray
Real/complex input matrix `a` with dimensions `(m, n)`.
k : int, optional
If `k < (n-1)` low-rank Dynamic Mode Decomposition is computed.
p : int
`p` sets the oversampling parameter for rSVD (default k=5).
q : int
`q` sets the number of power iterations for rSVD (default=1).
modes : `{'standard', 'exact'}`
'standard' : uses the standard definition to compute the dynamic modes,
`F = U * W`.
'exact' : computes the exact dynamic modes, `F = Y * V * (S**-1) * W`.
method_rsvd : `{'standard', 'fast'}`
'standard' : (default) Standard algorithm as described in [1, 2]
'fast' : Version II algorithm as described in [2]
return_amplitudes : bool `{True, False}`
True: return amplitudes in addition to dynamic modes.
return_vandermonde : bool `{True, False}`
True: return Vandermonde matrix in addition to dynamic modes and amplitudes.
handle : int
CUBLAS context. If no context is specified, the default handle from
`skcuda.misc._global_cublas_handle` is used.
Returns
-------
f_gpu : pycuda.gpuarray.GPUArray
Matrix containing the dynamic modes of shape `(m, n-1)` or `(m, k)`.
b_gpu : pycuda.gpuarray.GPUArray
1-D array containing the amplitudes of length `min(n-1, k)`.
v_gpu : pycuda.gpuarray.GPUArray
Vandermonde matrix of shape `(n-1, n-1)` or `(k, n-1)`.
Notes
-----
Double precision is only supported if the standard version of the
CULA Dense toolkit is installed.
This function destroys the contents of the input matrix.
Arrays are assumed to be stored in column-major order, i.e., order='F'.
References
----------
N. B. Erichson and C. Donovan.
"Randomized Low-Rank Dynamic Mode Decomposition for Motion Detection"
Under Review.
N. Halko, P. Martinsson, and J. Tropp.
"Finding structure with randomness: probabilistic
algorithms for constructing approximate matrix
decompositions" (2009).
(available at `arXiv <http://arxiv.org/abs/0909.4061>`_).
J. H. Tu, et al.
"On dynamic mode decomposition: theory and applications."
arXiv preprint arXiv:1312.0041 (2013).
Examples
--------
>>> #Numpy
>>> import numpy as np
>>> #Plot libs
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.mplot3d import Axes3D
>>> from matplotlib import cm
>>> #GPU DMD libs
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> from skcuda import linalg, rlinalg
>>> linalg.init()
>>> rlinalg.init()
>>> # Define time and space discretizations
>>> x=np.linspace( -15, 15, 200)
>>> t=np.linspace(0, 8*np.pi , 80)
>>> dt=t[2]-t[1]
>>> X, T = np.meshgrid(x,t)
>>> # Create two patio-temporal patterns
>>> F1 = 0.5* np.cos(X)*(1.+0.* T)
>>> F2 = ( (1./np.cosh(X)) * np.tanh(X)) *(2.*np.exp(1j*2.8*T))
>>> # Add both signals
>>> F = (F1+F2)
>>> #Plot dataset
>>> fig = plt.figure()
>>> ax = fig.add_subplot(231, projection='3d')
>>> ax = fig.gca(projection='3d')
>>> surf = ax.plot_surface(X, T, F, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=True)
>>> ax.set_zlim(-1, 1)
>>> plt.title('F')
>>> ax = fig.add_subplot(232, projection='3d')
>>> ax = fig.gca(projection='3d')
>>> surf = ax.plot_surface(X, T, F1, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
>>> ax.set_zlim(-1, 1)
>>> plt.title('F1')
>>> ax = fig.add_subplot(233, projection='3d')
>>> ax = fig.gca(projection='3d')
>>> surf = ax.plot_surface(X, T, F2, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
>>> ax.set_zlim(-1, 1)
>>> plt.title('F2')
>>> #Dynamic Mode Decomposition
>>> F_gpu = np.array(F.T, np.complex64, order='F')
>>> F_gpu = gpuarray.to_gpu(F_gpu)
>>> Fmodes_gpu, b_gpu, V_gpu, omega_gpu = rlinalg.rdmd(F_gpu, k=2, p=0, q=1, modes='exact', return_amplitudes=True, return_vandermonde=True)
>>> omega = omega_gpu.get()
>>> plt.scatter(omega.real, omega.imag, marker='o', c='r')
>>> #Recover original signal
>>> F1tilde = np.dot(Fmodes_gpu[:,0:1].get() , np.dot(b_gpu[0].get(), V_gpu[0:1,:].get() ) )
>>> F2tilde = np.dot(Fmodes_gpu[:,1:2].get() , np.dot(b_gpu[1].get(), V_gpu[1:2,:].get() ) )
>>> #Plot DMD modes
>>> #Mode 0
>>> ax = fig.add_subplot(235, projection='3d')
>>> ax = fig.gca(projection='3d')
>>> surf = ax.plot_surface(X[0:F1tilde.shape[1],:], T[0:F1tilde.shape[1],:], F1tilde.T, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
>>> ax.set_zlim(-1, 1)
>>> plt.title('F1_tilde')
>>> #Mode 1
>>> ax = fig.add_subplot(236, projection='3d')
>>> ax = fig.gca(projection='3d')
>>> surf = ax.plot_surface(X[0:F2tilde.shape[1],:], T[0:F2tilde.shape[1],:], F2tilde.T, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
>>> ax.set_zlim(-1, 1)
>>> plt.title('F2_tilde')
>>> plt.show()
"""
#*************************************************************************
#*** Author: N. Benjamin Erichson <[email protected]> ***
#*** <2015> ***
#*** License: BSD 3 clause ***
#*************************************************************************
if not _has_cula:
raise NotImplementedError('CULA not installed')
if handle is None:
handle = misc._global_cublas_handle
alloc = misc._global_cublas_allocator
# The free version of CULA only supports single precision floating
data_type = a_gpu.dtype.type
real_type = np.float32
if data_type == np.complex64:
cula_func_gesvd = cula.culaDeviceCgesvd
cublas_func_gemm = cublas.cublasCgemm
cublas_func_dgmm = cublas.cublasCdgmm
cula_func_gels = cula.culaDeviceCgels
copy_func = cublas.cublasCcopy
transpose_func = cublas.cublasCgeam
alpha = np.complex64(1.0)
beta = np.complex64(0.0)
TRANS_type = 'C'
isreal = False
elif data_type == np.float32:
cula_func_gesvd = cula.culaDeviceSgesvd
cublas_func_gemm = cublas.cublasSgemm
cublas_func_dgmm = cublas.cublasSdgmm
cula_func_gels = cula.culaDeviceSgels
copy_func = cublas.cublasScopy
transpose_func = cublas.cublasSgeam
alpha = np.float32(1.0)
beta = np.float32(0.0)
TRANS_type = 'T'
isreal = True
else:
if cula._libcula_toolkit == 'standard':
if data_type == np.complex128:
cula_func_gesvd = cula.culaDeviceZgesvd
cublas_func_gemm = cublas.cublasZgemm
cublas_func_dgmm = cublas.cublasZdgmm
cula_func_gels = cula.culaDeviceZgels
copy_func = cublas.cublasZcopy
transpose_func = cublas.cublasZgeam
alpha = np.complex128(1.0)
beta = np.complex128(0.0)
TRANS_type = 'C'
isreal = False
elif data_type == np.float64:
cula_func_gesvd = cula.culaDeviceDgesvd
cublas_func_gemm = cublas.cublasDgemm
cublas_func_dgmm = cublas.cublasDdgmm
cula_func_gels = cula.culaDeviceDgels
copy_func = cublas.cublasDcopy
transpose_func = cublas.cublasDgeam
alpha = np.float64(1.0)
beta = np.float64(0.0)
TRANS_type = 'T'
isreal = True
else:
raise ValueError('unsupported type')
real_type = np.float64
else:
raise ValueError('double precision not supported')
#CUDA assumes that arrays are stored in column-major order
m, n = np.array(a_gpu.shape, int)
nx = n-1
#Set k
if k == None : k = nx
if k > nx or k < 1: raise ValueError('k is not valid')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Split data into lef and right snapshot sequence
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Note: we need a copy of X_gpu, because SVD destroys X_gpu
#While Y_gpu is just a pointer
X_gpu = gpuarray.empty((m, n), data_type, order="F", allocator=alloc)
copy_func(handle, X_gpu.size, int(a_gpu.gpudata), 1, int(X_gpu.gpudata), 1)
X_gpu = X_gpu[:, :nx]
Y_gpu = a_gpu[:, 1:]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Randomized Singular Value Decomposition
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
U_gpu, s_gpu, Vh_gpu = rsvd(X_gpu, k=k, p=p, q=q,
method=method_rsvd, handle=handle)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Solve the LS problem to find estimate for M using the pseudo-inverse
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#real: M = U.T * Y * Vt.T * S**-1
#complex: M = U.H * Y * Vt.H * S**-1
#Let G = Y * Vt.H * S**-1, hence M = M * G
#Allocate G and M
G_gpu = gpuarray.empty((m,k), data_type, order="F", allocator=alloc)
M_gpu = gpuarray.empty((k,k), data_type, order="F", allocator=alloc)
#i) s = s **-1 (inverse)
if data_type == np.complex64 or data_type == np.complex128:
s_gpu = 1/s_gpu
s_gpu = s_gpu + 1j * gpuarray.zeros_like(s_gpu)
else:
s_gpu = 1.0/s_gpu
#ii) real/complex: scale Vs = Vt* x diag(s**-1)
Vs_gpu = gpuarray.empty((nx,k), data_type, order="F", allocator=alloc)
lda = max(1, Vh_gpu.strides[1] // Vh_gpu.dtype.itemsize)
ldb = max(1, Vs_gpu.strides[1] // Vs_gpu.dtype.itemsize)
transpose_func(handle, TRANS_type, TRANS_type, nx, k,
alpha, int(Vh_gpu.gpudata), lda, beta, int(Vh_gpu.gpudata), lda,
int(Vs_gpu.gpudata), ldb)
cublas_func_dgmm(handle, 'r', nx, k, int(Vs_gpu.gpudata), nx,
int(s_gpu.gpudata), 1 , int(Vs_gpu.gpudata), nx)
#iii) real: G = Y * Vs , complex: G = Y x Vs
cublas_func_gemm(handle, 'n', 'n', m, k, nx, alpha,
int(Y_gpu.gpudata), m, int(Vs_gpu.gpudata), nx,
beta, int(G_gpu.gpudata), m )
#iv) real/complex: M = U* x G
cublas_func_gemm(handle, TRANS_type, 'n', k, k, m, alpha,
int(U_gpu.gpudata), m, int(G_gpu.gpudata), m,
beta, int(M_gpu.gpudata), k )
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Eigen Decomposition
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Note: If a_gpu is real the imag part is omitted
Vr_gpu, w_gpu = linalg.eig(M_gpu, 'N', 'V', 'F', lib='cula')
omega = cumath.log(w_gpu)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Compute DMD Modes
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
F_gpu = gpuarray.empty((m,k), data_type, order="F", allocator=alloc)
modes = modes.lower()
if modes == 'exact': #Compute (exact) DMD modes: F = Y * V * S**-1 * W = G * W
cublas_func_gemm(handle, 'n', 'n', m, k, k, alpha,
G_gpu.gpudata, m, Vr_gpu.gpudata, k,
beta, G_gpu.gpudata, m )
F_gpu_temp = G_gpu
elif modes == 'standard': #Compute (standard) DMD modes: F = U * W
cublas_func_gemm(handle, 'n', 'n', m, k, k,
alpha, U_gpu.gpudata, m, Vr_gpu.gpudata, k,
beta, U_gpu.gpudata, m )
F_gpu_temp = U_gpu
else:
raise ValueError('Type of modes is not supported, choose "exact" or "standard".')
#Copy is required, because gels destroys input
copy_func(handle, F_gpu_temp.size, int(F_gpu_temp.gpudata),
1, int(F_gpu.gpudata), 1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Compute amplitueds b using least-squares: Fb=x1
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if return_amplitudes==True:
#x1_gpu = a_gpu[:,0].copy()
x1_gpu = gpuarray.empty(m, data_type, order="F", allocator=alloc)
copy_func(handle, x1_gpu.size, int(a_gpu[:,0].gpudata), 1, int(x1_gpu.gpudata), 1)
cula_func_gels( 'N', m, k, int(1) , F_gpu_temp.gpudata, m, x1_gpu.gpudata, m)
b_gpu = x1_gpu
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Compute Vandermonde matrix (CPU)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if return_vandermonde==True:
V_gpu = linalg.vander(w_gpu, n=nx)
# Free internal CULA memory:
cula.culaFreeBuffers()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Return
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if return_amplitudes==True and return_vandermonde==True:
return F_gpu, b_gpu[:k], V_gpu, omega
elif return_amplitudes==True and return_vandermonde==False:
return F_gpu, b_gpu[:k], omega
elif return_amplitudes==False and return_vandermonde==True:
return F_gpu, V_gpu, omega
else:
return F_gpu, omega
def cdmd(a_gpu, k=None, c=None, modes='exact', return_amplitudes=False, return_vandermonde=False, handle=None):
"""
Compressed Dynamic Mode Decomposition.
Dynamic Mode Decomposition (DMD) is a data processing algorithm which
allows to decompose a matrix `a` in space and time.
The matrix `a` is decomposed as `a = FBV`, where the columns of `F`
contain the dynamic modes. The modes are ordered corresponding
to the amplitudes stored in the diagonal matrix `B`. `V` is a Vandermonde
matrix describing the temporal evolution.
Parameters
----------
a_gpu : pycuda.gpuarray.GPUArray
Real/complex input matrix `a` with dimensions `(m, n)`.
k : int, optional
If `k < (n-1)` low-rank Dynamic Mode Decomposition is computed.
c : int
`p` sets the number of measurements sensors.
modes : `{'exact'}`
'exact' : computes the exact dynamic modes, `F = Y * V * (S**-1) * W`.
return_amplitudes : bool `{True, False}`
True: return amplitudes in addition to dynamic modes.
return_vandermonde : bool `{True, False}`
True: return Vandermonde matrix in addition to dynamic modes and amplitudes.
handle : int
CUBLAS context. If no context is specified, the default handle from
`skcuda.misc._global_cublas_handle` is used.
Returns
-------
f_gpu : pycuda.gpuarray.GPUArray
Matrix containing the dynamic modes of shape `(m, n-1)` or `(m, k)`.
b_gpu : pycuda.gpuarray.GPUArray
1-D array containing the amplitudes of length `min(n-1, k)`.
v_gpu : pycuda.gpuarray.GPUArray
Vandermonde matrix of shape `(n-1, n-1)` or `(k, n-1)`.
Notes
-----
Double precision is only supported if the standard version of the
CULA Dense toolkit is installed.
This function destroys the contents of the input matrix.
Arrays are assumed to be stored in column-major order, i.e., order='F'.
References
----------
S. L. Brunton, et al.
"Compressed sampling and dynamic mode decomposition."
arXiv preprint arXiv:1312.5186 (2013).
J. H. Tu, et al.
"On dynamic mode decomposition: theory and applications."
arXiv preprint arXiv:1312.0041 (2013).
Examples
--------
>>> #Numpy
>>> import numpy as np
>>> #Plot libs
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.mplot3d import Axes3D
>>> from matplotlib import cm
>>> #GPU DMD libs
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> from skcuda import linalg, rlinalg
>>> linalg.init()
>>> rlinalg.init()
>>> # Define time and space discretizations
>>> x=np.linspace( -15, 15, 200)
>>> t=np.linspace(0, 8*np.pi , 80)
>>> dt=t[2]-t[1]
>>> X, T = np.meshgrid(x,t)
>>> # Create two patio-temporal patterns
>>> F1 = 0.5* np.cos(X)*(1.+0.* T)
>>> F2 = ( (1./np.cosh(X)) * np.tanh(X)) *(2.*np.exp(1j*2.8*T))
>>> # Add both signals
>>> F = (F1+F2)
>>> #Plot dataset
>>> fig = plt.figure()
>>> ax = fig.add_subplot(231, projection='3d')
>>> ax = fig.gca(projection='3d')
>>> surf = ax.plot_surface(X, T, F, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=True)
>>> ax.set_zlim(-1, 1)
>>> plt.title('F')
>>> ax = fig.add_subplot(232, projection='3d')
>>> ax = fig.gca(projection='3d')
>>> surf = ax.plot_surface(X, T, F1, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
>>> ax.set_zlim(-1, 1)
>>> plt.title('F1')
>>> ax = fig.add_subplot(233, projection='3d')
>>> ax = fig.gca(projection='3d')
>>> surf = ax.plot_surface(X, T, F2, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
>>> ax.set_zlim(-1, 1)
>>> plt.title('F2')
>>> #Dynamic Mode Decomposition
>>> F_gpu = np.array(F.T, np.complex64, order='F')
>>> F_gpu = gpuarray.to_gpu(F_gpu)
>>> Fmodes_gpu, b_gpu, V_gpu, omega_gpu = rlinalg.cdmd(F_gpu, k=2, c=20, modes='exact', return_amplitudes=True, return_vandermonde=True)
>>> omega = omega_gpu.get()
>>> plt.scatter(omega.real, omega.imag, marker='o', c='r')
>>> #Recover original signal
>>> F1tilde = np.dot(Fmodes_gpu[:,0:1].get() , np.dot(b_gpu[0].get(), V_gpu[0:1,:].get() ) )
>>> F2tilde = np.dot(Fmodes_gpu[:,1:2].get() , np.dot(b_gpu[1].get(), V_gpu[1:2,:].get() ) )
>>> # Plot DMD modes
>>> #Mode 0
>>> ax = fig.add_subplot(235, projection='3d')
>>> ax = fig.gca(projection='3d')
>>> surf = ax.plot_surface(X[0:F1tilde.shape[1],:], T[0:F1tilde.shape[1],:], F1tilde.T, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
>>> ax.set_zlim(-1, 1)
>>> plt.title('F1_tilde')
>>> #Mode 1
>>> ax = fig.add_subplot(236, projection='3d')
>>> ax = fig.gca(projection='3d')
>>> surf = ax.plot_surface(X[0:F2tilde.shape[1],:], T[0:F2tilde.shape[1],:], F2tilde.T, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
>>> ax.set_zlim(-1, 1)
>>> plt.title('F2_tilde')
>>> plt.show()
"""
#*************************************************************************
#*** Author: N. Benjamin Erichson <[email protected]> ***
#*** <2015> ***
#*** License: BSD 3 clause ***
#*************************************************************************
if not _has_cula:
raise NotImplementedError('CULA not installed')
if handle is None:
handle = misc._global_cublas_handle
alloc = misc._global_cublas_allocator
# The free version of CULA only supports single precision floating
data_type = a_gpu.dtype.type
real_type = np.float32
if data_type == np.complex64:
cula_func_gesvd = cula.culaDeviceCgesvd
cublas_func_gemm = cublas.cublasCgemm
cublas_func_dgmm = cublas.cublasCdgmm
cula_func_gels = cula.culaDeviceCgels
copy_func = cublas.cublasCcopy
transpose_func = cublas.cublasCgeam
alpha = np.complex64(1.0)
beta = np.complex64(0.0)
TRANS_type = 'C'
isreal = False
elif data_type == np.float32:
cula_func_gesvd = cula.culaDeviceSgesvd
cublas_func_gemm = cublas.cublasSgemm
cublas_func_dgmm = cublas.cublasSdgmm
cula_func_gels = cula.culaDeviceSgels
copy_func = cublas.cublasScopy
transpose_func = cublas.cublasSgeam
alpha = np.float32(1.0)
beta = np.float32(0.0)
TRANS_type = 'T'
isreal = True
else:
if cula._libcula_toolkit == 'standard':
if data_type == np.complex128:
cula_func_gesvd = cula.culaDeviceZgesvd
cublas_func_gemm = cublas.cublasZgemm
cublas_func_dgmm = cublas.cublasZdgmm
cula_func_gels = cula.culaDeviceZgels
copy_func = cublas.cublasZcopy
transpose_func = cublas.cublasZgeam
alpha = np.complex128(1.0)
beta = np.complex128(0.0)
TRANS_type = 'C'
isreal = False
elif data_type == np.float64:
cula_func_gesvd = cula.culaDeviceDgesvd
cublas_func_gemm = cublas.cublasDgemm
cublas_func_dgmm = cublas.cublasDdgmm
cula_func_gels = cula.culaDeviceDgels
copy_func = cublas.cublasDcopy
transpose_func = cublas.cublasDgeam
alpha = np.float64(1.0)
beta = np.float64(0.0)
TRANS_type = 'T'
isreal = True
else:
raise ValueError('unsupported type')
real_type = np.float64
else:
raise ValueError('double precision not supported')
#CUDA assumes that arrays are stored in column-major order
m, n = np.array(a_gpu.shape, int)
nx = n-1
#Set k
if k == None : k = nx
if k > nx or k < 1: raise ValueError('k is not valid')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Compress
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if c==None:
Ac_gpu = A
c=m
else:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Generate a random sensing matrix S
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if isreal==False:
Simag_gpu = gpuarray.empty((m,c), real_type, order="F", allocator=alloc)
Sreal_gpu = gpuarray.empty((m,c), real_type, order="F", allocator=alloc)
S_gpu = gpuarray.empty((c,m), data_type, order="F", allocator=alloc)
rand.fill_uniform(Simag_gpu)
rand.fill_uniform(Sreal_gpu)
S_gpu = Sreal_gpu + 1j * Simag_gpu
S_gpu = S_gpu.T * 2 -1 #Scale to [-1,1]
else:
S_gpu = gpuarray.empty((c,m), real_type, order="F", allocator=alloc)
rand.fill_uniform(S_gpu) #Draw random samples from a ~ Uniform(-1,1) distribution
S_gpu = S_gpu * 2 - 1 #Scale to [-1,1]
#Allocate Ac
Ac_gpu = gpuarray.empty((c,n), data_type, order="F", allocator=alloc)
#Compress input matrix
cublas_func_gemm(handle, 'n', 'n', c, n, m, alpha,
int(S_gpu.gpudata), c, int(a_gpu.gpudata), m,
beta, int(Ac_gpu.gpudata), c )
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Split data into lef and right snapshot sequence
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Note: we need a copy of X_gpu, because SVD destroys X_gpu
#While Y_gpu is just a pointer
X_gpu = gpuarray.empty((c, n), data_type, order="F", allocator=alloc)
copy_func(handle, X_gpu.size, int(Ac_gpu.gpudata), 1, int(X_gpu.gpudata), 1)
X_gpu = X_gpu[:, :nx]
Y_gpu = Ac_gpu[:, 1:]
Yorig_gpu = a_gpu[:, 1:]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Singular Value Decomposition
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Allocate s, U, Vt for economic SVD
#Note: singular values are always real
min_s = min(nx,c)
s_gpu = gpuarray.zeros(min_s, real_type, order="F", allocator=alloc)
U_gpu = gpuarray.zeros((c,min_s), data_type, order="F", allocator=alloc)
Vh_gpu = gpuarray.zeros((min_s,nx), data_type, order="F", allocator=alloc)
#Economic SVD
cula_func_gesvd('S', 'S', c, nx, int(X_gpu.gpudata), c, int(s_gpu.gpudata),
int(U_gpu.gpudata), c, int(Vh_gpu.gpudata), min_s)
#Low-rank DMD: trancate SVD if k < nx
if k != nx:
s_gpu = s_gpu[:k]
U_gpu = U_gpu[: , :k]
Vh_gpu = Vh_gpu[:k , : ]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Solve the LS problem to find estimate for M using the pseudo-inverse
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#real: M = U.T * Y * Vt.T * S**-1
#complex: M = U.H * Y * Vt.H * S**-1
#Let G = Y * Vt.H * S**-1, hence M = M * G
#Allocate G and M
G_gpu = gpuarray.zeros((c,k), data_type, order="F", allocator=alloc)
M_gpu = gpuarray.zeros((k,k), data_type, order="F", allocator=alloc)
#i) s = s **-1 (inverse)
if data_type == np.complex64 or data_type == np.complex128:
s_gpu = 1/s_gpu
s_gpu = s_gpu + 1j * gpuarray.zeros_like(s_gpu)
else:
s_gpu = 1/s_gpu
#ii) real/complex: scale Vs = Vt* x diag(s**-1)
Vs_gpu = gpuarray.zeros((nx,k), data_type, order="F", allocator=alloc)
lda = max(1, Vh_gpu.strides[1] // Vh_gpu.dtype.itemsize)
ldb = max(1, Vs_gpu.strides[1] // Vs_gpu.dtype.itemsize)
transpose_func(handle, TRANS_type, TRANS_type, nx, k,
1.0, int(Vh_gpu.gpudata), lda, 0.0, int(Vh_gpu.gpudata), lda,
int(Vs_gpu.gpudata), ldb)
#End Transpose
cublas_func_dgmm(handle, 'r', nx, k, int(Vs_gpu.gpudata), nx,
int(s_gpu.gpudata), 1 , int(Vs_gpu.gpudata), nx)
#iii) real: G = Y * Vs , complex: G = Y x Vs
cublas_func_gemm(handle, 'n', 'n', c, k, nx, alpha,
int(Y_gpu.gpudata), c, int(Vs_gpu.gpudata), nx,
beta, int(G_gpu.gpudata), c )
#iv) real/complex: M = U* x G
cublas_func_gemm(handle, TRANS_type, 'n', k, k, c, alpha,
int(U_gpu.gpudata), c, int(G_gpu.gpudata), c,
beta, int(M_gpu.gpudata), k )
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Eigen Decomposition
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Note: If a_gpu is real the imag part is omitted
Vr_gpu, w_gpu = linalg.eig(M_gpu, 'N', 'V', 'F', lib='cula')
omega = cumath.log(w_gpu)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Compute DMD Modes
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
F_gpu = gpuarray.empty((m,k), data_type, order="F", allocator=alloc)
modes = modes.lower()
if modes == 'exact': #Compute (exact) DMD modes: F = Y * V * S**-1 * W = G * W
cublas_func_gemm(handle, 'n' , 'n', nx, k, k, alpha,
int(Vs_gpu.gpudata), nx, int(Vr_gpu.gpudata), k,
beta, int(Vs_gpu.gpudata), nx )
cublas_func_gemm(handle, 'n', 'n', m, k, nx, alpha,
Yorig_gpu.gpudata, m, Vs_gpu.gpudata, nx,
beta, F_gpu.gpudata, m )
else:
raise ValueError('Type of modes is not supported, choose "exact" or "standard".')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Compute amplitueds b using least-squares: Fb=x1
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if return_amplitudes==True:
F_gpu_temp = gpuarray.empty((m,k), data_type, order="F", allocator=alloc)
#Copy is required, because gels destroys input
copy_func(handle, F_gpu.size, int(F_gpu.gpudata),
1, int(F_gpu_temp.gpudata), 1)
#x1_gpu = a_gpu[:,0].copy()
x1_gpu = gpuarray.empty(m, data_type, order="F", allocator=alloc)
copy_func(handle, x1_gpu.size, int(a_gpu[:,0].gpudata), 1, int(x1_gpu.gpudata), 1)
cula_func_gels( 'N', m, k, int(1) , F_gpu_temp.gpudata, m, x1_gpu.gpudata, m)
b_gpu = x1_gpu
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Compute Vandermonde matrix (CPU)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if return_vandermonde==True:
V_gpu = linalg.vander(w_gpu, n=nx)
# Free internal CULA memory:
cula.culaFreeBuffers()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Return
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if return_amplitudes==True and return_vandermonde==True:
return F_gpu, b_gpu[:k], V_gpu, omega
elif return_amplitudes==True and return_vandermonde==False:
return F_gpu, b_gpu[:k], omega
elif return_amplitudes==False and return_vandermonde==True:
return F_gpu, V_gpu, omega
else:
return F_gpu, omega
if __name__ == "__main__":
import doctest
doctest.testmod() | /scikit-cuda-0.5.3.tar.gz/scikit-cuda-0.5.3/skcuda/rlinalg.py | 0.712232 | 0.461927 | rlinalg.py | pypi |
import os
import pycuda.gpuarray as gpuarray
import pycuda.elementwise as elementwise
from pycuda.tools import context_dependent_memoize
import numpy as np
from . import misc
from .misc import init
# Get installation location of C headers:
from . import install_headers
@context_dependent_memoize
def _get_sici_kernel(dtype):
if dtype == np.float32:
args = 'float *x, float *si, float *ci'
op = 'sicif(x[i], &si[i], &ci[i])'
elif dtype == np.float64:
args = 'double *x, double *si, double *ci'
op = 'sici(x[i], &si[i], &ci[i])'
else:
raise ValueError('unsupported type')
return elementwise.ElementwiseKernel(args, op,
options=["-I", install_headers],
preamble='#include "cuSpecialFuncs.h"')
def sici(x_gpu):
"""
Sine/Cosine integral.
Computes the sine and cosine integral of every element in the
input matrix.
Parameters
----------
x_gpu : GPUArray
Input matrix of shape `(m, n)`.
Returns
-------
(si_gpu, ci_gpu) : tuple of GPUArrays
Tuple of GPUarrays containing the sine integrals and cosine
integrals of the entries of `x_gpu`.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import scipy.special
>>> import special
>>> x = np.array([[1, 2], [3, 4]], np.float32)
>>> x_gpu = gpuarray.to_gpu(x)
>>> (si_gpu, ci_gpu) = sici(x_gpu)
>>> (si, ci) = scipy.special.sici(x)
>>> np.allclose(si, si_gpu.get())
True
>>> np.allclose(ci, ci_gpu.get())
True
"""
si_gpu = gpuarray.empty_like(x_gpu)
ci_gpu = gpuarray.empty_like(x_gpu)
func = _get_sici_kernel(x_gpu.dtype)
func(x_gpu, si_gpu, ci_gpu)
return (si_gpu, ci_gpu)
@context_dependent_memoize
def _get_exp1_kernel(dtype):
if dtype == np.complex64:
args = 'pycuda::complex<float> *z, pycuda::complex<float> *e'
elif dtype == np.complex128:
args = 'pycuda::complex<double> *z, pycuda::complex<double> *e'
else:
raise ValueError('unsupported type')
op = 'e[i] = exp1(z[i])'
return elementwise.ElementwiseKernel(args, op,
options=["-I", install_headers],
preamble='#include "cuSpecialFuncs.h"')
def exp1(z_gpu):
"""
Exponential integral with `n = 1` of complex arguments.
Parameters
----------
z_gpu : GPUArray
Input matrix of shape `(m, n)`.
Returns
-------
e_gpu : GPUArray
GPUarrays containing the exponential integrals of
the entries of `z_gpu`.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import scipy.special
>>> import special
>>> z = np.asarray(np.random.rand(4, 4)+1j*np.random.rand(4, 4), np.complex64)
>>> z_gpu = gpuarray.to_gpu(z)
>>> e_gpu = exp1(z_gpu)
>>> e_sp = scipy.special.exp1(z)
>>> np.allclose(e_sp, e_gpu.get())
True
"""
e_gpu = gpuarray.empty_like(z_gpu)
func = _get_exp1_kernel(z_gpu.dtype)
func(z_gpu, e_gpu)
return e_gpu
exp1.cache = {}
@context_dependent_memoize
def _get_expi_kernel(dtype):
if dtype == np.complex64:
args = 'pycuda::complex<float> *z, pycuda::complex<float> *e'
elif dtype == np.complex128:
args = 'pycuda::complex<double> *z, pycuda::complex<double> *e'
else:
raise ValueError('unsupported type')
op = 'e[i] = expi(z[i])'
return elementwise.ElementwiseKernel(args, op,
options=["-I", install_headers],
preamble='#include "cuSpecialFuncs.h"')
def expi(z_gpu):
"""
Exponential integral of complex arguments.
Parameters
----------
z_gpu : GPUArray
Input matrix of shape `(m, n)`.
Returns
-------
e_gpu : GPUArray
GPUarrays containing the exponential integrals of
the entries of `z_gpu`.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import scipy.special
>>> import special
>>> z = np.asarray(np.random.rand(4, 4)+1j*np.random.rand(4, 4), np.complex64)
>>> z_gpu = gpuarray.to_gpu(z)
>>> e_gpu = expi(z_gpu)
>>> e_sp = scipy.special.expi(z)
>>> np.allclose(e_sp, e_gpu.get())
True
"""
e_gpu = gpuarray.empty_like(z_gpu)
func = _get_expi_kernel(z_gpu.dtype)
func(z_gpu, e_gpu)
return e_gpu
if __name__ == "__main__":
import doctest
doctest.testmod() | /scikit-cuda-0.5.3.tar.gz/scikit-cuda-0.5.3/skcuda/special.py | 0.794066 | 0.489503 | special.py | pypi |
import typing as ty
import collections.abc as abc
import numpy as np
import scipy.signal as signal
import scipy.signal.windows as windows
import scipy.ndimage as ndimage
if ty.TYPE_CHECKING:
from curve._base import Curve
class SmoothingError(Exception):
"""Any smoothing errors
"""
_SMOOTHING_FILTERS = {}
def register_smooth_filter(method: str):
def decorator(filter_callable):
if method in _SMOOTHING_FILTERS:
raise ValueError('"{}" smoothing method already registered for {}'.format(
method, _SMOOTHING_FILTERS[method]))
_SMOOTHING_FILTERS[method] = filter_callable
return decorator
@register_smooth_filter('savgol')
def savgol_filter(curve: 'Curve', window_length: int, polyorder: int, *,
deriv: int = 0, delta: float = 1.0,
mode: str = 'interp', cval: float = 0.0) -> np.ndarray:
"""Savitzky-Golay smoothing filter [1]_
References
----------
.. [1] `Savitzky-Golay filter
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.savgol_filter.html#scipy.signal.savgol_filter>`_
"""
return signal.savgol_filter(
curve.data,
window_length=window_length,
polyorder=polyorder,
deriv=deriv,
delta=delta,
mode=mode,
cval=cval,
axis=0,
)
@register_smooth_filter('window')
def window_filter(curve: 'Curve',
window_size: int, window_type: ty.Union[str, abc.Callable] = 'hann',
mode: str = 'reflect', cval: float = 0.0) -> np.ndarray:
"""Smoothes a curve using moving average filter with the given window [1]_
References
----------
.. [1] `The windows in scipy
<https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows>`_
"""
if callable(window_type):
try:
window = window_type(window_size)
except Exception as err:
raise ValueError(
'Cannot create the window using {}: {}'.format(window_type, err)) from err
else:
window = windows.get_window(window_type, window_size, fftbins=False)
window /= window.sum()
return ndimage.convolve1d(
curve.data,
weights=window,
mode=mode,
cval=cval,
axis=0,
)
def smooth_methods() -> ty.List[str]:
"""Returns the list of available smoothing methods
Returns
-------
methods : List[str]
The list of available smoothing methods
"""
return list(_SMOOTHING_FILTERS.keys())
def get_smooth_filter(method: str) -> abc.Callable:
"""Creates and returns the smoothing filter for the given method
Parameters
----------
method : str
Smoothing method
Returns
-------
smooth_filter : Callable
Smoothing filter callable
See Also
--------
smooth_methods
Raises
------
NameError : If smooth method is unknown
"""
if method not in _SMOOTHING_FILTERS:
raise NameError('Cannot find the smoothing filter for given method "{}"'.format(method))
return _SMOOTHING_FILTERS[method]
def smooth(curve: 'Curve', method: str, **params) -> 'Curve':
"""Smoothes a n-dimensional curve using the given method and its parameters
Parameters
----------
curve : Curve
A curve object
method : str
Smoothing method
params : mapping
The parameters of smoothing method
Returns
-------
curve : Curve
Smoothed curve with type `numpy.float64`
Raises
------
ValueError : Input data or parameters have invalid values
TypeError : Input data or parameters have invalid type
SmoothingError : Smoothing has failed
See Also
--------
smooth_methods
"""
smooth_filter = get_smooth_filter(method)
try:
smoothed_data = smooth_filter(curve, **params)
except (ValueError, TypeError):
raise
except Exception as err:
raise SmoothingError('Smoothing has failed: {}'.format(err)) from err
return type(curve)(smoothed_data) | /scikit_curve-0.1.0-py3-none-any.whl/curve/_smooth.py | 0.906759 | 0.442456 | _smooth.py | pypi |
import typing as ty
import numpy as np
F_EPS = np.finfo(np.float64).eps
def isequal(obj1: np.ndarray, obj2: np.ndarray, **kwargs) -> np.ndarray:
"""Returns a boolean array where two arrays are element-wise equal
Notes
-----
int/float dtype independent equal check
Parameters
----------
obj1 : np.ndarray
The first object
obj2 : np.ndarray
The second object
kwargs : dict
Additional arguments for equal function
Returns
-------
res : np.ndarray
Result array
"""
if np.issubdtype(obj1.dtype, np.integer) and np.issubdtype(obj2.dtype, np.integer):
cmp = np.equal
else:
cmp = np.isclose
return cmp(obj1, obj2, **kwargs)
def allequal(obj1: np.ndarray, obj2: np.ndarray, axis: ty.Optional[int] = None, **kwargs) -> np.ndarray:
"""Test whether all array elements along a given axis evaluate to True.
Parameters
----------
obj1 : np.ndarray
The first object
obj2 : np.ndarray
The second object
axis : int, None
Axis for test equal. By default None
kwargs : dict
Additional arguments for equal function
Returns
-------
res : np.ndarray
The result array
"""
return np.all(isequal(obj1, obj2, **kwargs), axis=axis)
def dot1d(data1: np.ndarray, data2: np.ndarray) -> np.ndarray:
"""Computes row-wise dot product of two MxN arrays
Parameters
----------
data1 : np.ndarray
The first MxN array
data2 : np.ndarray
The second MxN array
Returns
-------
res : np.ndarray
The array 1xM with row-wise dot product result
"""
return np.einsum('ij,ij->i', data1, data2)
def linrescale(in_data: np.ndarray,
in_range: ty.Optional[ty.Tuple[float, float]] = None,
out_range: ty.Optional[ty.Tuple[float, float]] = None,
out_dtype: ty.Optional[np.dtype] = None) -> np.ndarray:
"""Linearly transforms values from input range to output range
Parameters
----------
in_data : array-like
Input data
in_range : list-like
Input range. Tuple of two items: ``[min, max]``. By default: ``[min(in_data), max(in_data)]``
out_range : list-like
Output range. Tuple of two items: ``[min max]``. By default: ``[0, 1]``
out_dtype : numpy.dtype
Output data type. By default ``numpy.float64``
Returns
-------
out_data : numpy.ndarray
Transformed data
Examples
--------
.. code-block:: python
>>> import numpy as np
>>>
>>> data = np.arange(0, 11)
>>> out = linrescale(data)
>>> print out
array([ 0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ])
"""
in_data = np.asarray(in_data, dtype=np.float64)
if in_range is None:
in_range = (np.min(in_data), np.max(in_data))
if out_range is None:
out_range = (0, 1)
in_data = (in_data - in_range[0]) / (in_range[1] - in_range[0])
out_data = in_data * (out_range[1] - out_range[0]) + out_range[0]
if out_dtype is not None:
out_data = out_data.astype(out_dtype)
return out_data | /scikit_curve-0.1.0-py3-none-any.whl/curve/_numeric.py | 0.900124 | 0.808067 | _numeric.py | pypi |
import numpy as np
from scipy.special import fresnel
from curve import Curve
def arc(t_start: float = 0.0,
t_stop: float = np.pi * 2,
p_count: int = 49,
r: float = 1.0,
c: float = 0.0) -> Curve:
r"""Produces arc or full circle curve
Produces arc using the following parametric equations:
.. math::
x = cos(\theta) \dot r + c
y = sin(\theta) \dot r + c
By default computes full circle.
Parameters
----------
t_start : float
Start theta
t_stop : float
Stop theta
p_count : int
The number of points
r : float
Circle radius
c : float
Circle center
Returns
-------
curve : Curve
Acr curve
"""
theta = np.linspace(t_start, t_stop, p_count)
x = np.cos(theta) * r + c
y = np.sin(theta) * r + c
return Curve([x, y], tdata=theta)
def lemniscate_of_bernoulli(t_start: float = 0.0,
t_stop: float = np.pi*2,
p_count: int = 101,
c: float = 1.0) -> Curve:
"""Produces Lemniscate of Bernoulli curve
Parameters
----------
t_start
t_stop
p_count
c
Returns
-------
"""
theta = np.linspace(t_start, t_stop, p_count)
c_sq2 = c * np.sqrt(2)
cos_t = np.cos(theta)
sin_t = np.sin(theta)
denominator = sin_t ** 2 + 1
x = (c_sq2 * cos_t) / denominator
y = (c_sq2 * cos_t * sin_t) / denominator
return Curve([x, y], tdata=theta)
def archimedean_spiral(t_start: float = 0.0,
t_stop: float = 5 * np.pi,
p_count: int = 200,
a: float = 1.5,
b: float = -2.4) -> Curve:
"""Produces Archimedean spiral curve
Parameters
----------
t_start
t_stop
p_count
a
b
Returns
-------
"""
theta = np.linspace(t_start, t_stop, p_count)
x = (a + b * theta) * np.cos(theta)
y = (a + b * theta) * np.sin(theta)
return Curve([x, y], tdata=theta)
def euler_spiral(t_start: float = -3 * np.pi / 2,
t_stop: float = 3 * np.pi / 2,
p_count: int = 1000) -> Curve:
"""Produces Euler spiral curve
Parameters
----------
t_start
t_stop
p_count
Returns
-------
"""
t = np.linspace(t_start, t_stop, p_count)
ssa, csa = fresnel(t)
return Curve([csa, ssa], tdata=t)
def lissajous(t_start: float = 0.0,
t_stop: float = 2*np.pi,
p_count: int = 101,
a_ampl: float = 1.0,
b_ampl: float = 1.0,
a: float = 3.0,
b: float = 2.0,
d: float = 0.0,) -> Curve:
"""
Parameters
----------
t_start
t_stop
p_count
a_ampl
b_ampl
a
b
d
Returns
-------
"""
theta = np.linspace(t_start, t_stop, p_count)
x = a_ampl * np.sin(a * theta + d)
y = b_ampl * np.sin(b * theta)
return Curve([x, y], tdata=theta)
def helix(t_start: float = -3 * np.pi,
t_stop: float = 3 * np.pi,
p_count: int = 100,
a: float = 1.0,
b: float = 1.0) -> Curve:
"""Produces 3-d helix curve
Parameters
----------
t_start : float
t_stop : float
p_count : int
a : float
b : float
Returns
-------
"""
theta = np.linspace(t_start, t_stop, p_count)
x = np.sin(theta) * a
y = np.cos(theta) * a
z = theta * b
return Curve([x, y, z], tdata=theta)
def irregular_helix(t_start: float = -4 * np.pi,
t_stop: float = 4 * np.pi,
z_start: float = -2.0,
z_stop: float = 2.0,
p_count: int = 100) -> Curve:
"""Produces 3-d irregular helix curve
Parameters
----------
t_start
t_stop
z_start
z_stop
p_count
Returns
-------
"""
theta = np.linspace(t_start, t_stop, p_count)
z = np.linspace(z_start, z_stop, p_count)
r = z ** 2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
return Curve([x, y, z], tdata=theta) | /scikit_curve-0.1.0-py3-none-any.whl/curve/curves.py | 0.946868 | 0.690771 | curves.py | pypi |
# Authors: Guillaume Lemaitre <[email protected]>
# Cedric Lemaitre
# License: BSD 3 clause
import numpy as np
import pandas as pd
from .extraction import activity_power_profile
from .io import bikeread
from .utils import validate_filenames
class Rider(object):
"""User interface for a rider.
User interface to easily add, remove, compute information related to power.
Read more in the :ref:`User Guide <record_power_profile>`.
Parameters
----------
n_jobs : int, (default=1)
The number of workers to use for the different processing.
Attributes
----------
power_profile_ : DataFrame
DataFrame containing all information regarding the power-profile of a
rider for each ride.
"""
def __init__(self, n_jobs=1):
self.n_jobs = n_jobs
self.power_profile_ = None
def add_activities(self, filenames):
"""Compute the power-profile for each activity and add it to the
current power-profile.
Parameters
----------
filenames : str or list of str
A string a list of string to the file to read. You can use
wildcards to automatically check several files.
Returns
-------
None
Examples
--------
>>> from skcycling.datasets import load_fit
>>> from skcycling.base import Rider
>>> rider = Rider()
>>> rider.add_activities(load_fit()[0])
>>> rider.power_profile_.head()
2014-05-07 12:26:22
cadence 00:00:01 78.000000
00:00:02 64.000000
00:00:03 62.666667
00:00:04 62.500000
00:00:05 64.400000
"""
filenames = validate_filenames(filenames)
activities_pp = [activity_power_profile(bikeread(f))
for f in filenames]
activities_pp = pd.concat(activities_pp, axis=1)
if self.power_profile_ is not None:
try:
self.power_profile_ = self.power_profile_.join(activities_pp,
how='outer')
except ValueError as e:
if 'columns overlap but no suffix specified' in e.args[0]:
raise ValueError('One of the activity was already added'
' to the rider power-profile. Remove this'
' activity before to try to add it.')
else:
raise
else:
self.power_profile_ = activities_pp
def delete_activities(self, dates, time_comparison=False):
"""Delete the activities power-profile from some specific dates.
Parameters
----------
dates : list/tuple of datetime-like or str
The dates of the activities to be removed. The format expected is:
* datetime-like or str: a single activity will be deleted.
* a list of datetime-like or str: each activity for which the date
is contained in the list will be deleted.
* a tuple of datetime-like or str ``(start_date, end_date)``: the
activities for which the dates are included in the range will be
deleted.
time_comparison : bool, optional
Whether to make a strict comparison using time or to relax to
constraints with only the date.
Returns
-------
None
Examples
--------
>>> from skcycling.datasets import load_rider
>>> from skcycling import Rider
>>> rider = Rider.from_csv(load_rider())
>>> rider.delete_activities('07 May 2014')
>>> print(rider)
RIDER INFORMATION:
power-profile:
2014-05-11 09:39:38 2014-07-26 16:50:56
cadence 00:00:01 100.000000 60.000000
00:00:02 89.000000 58.000000
00:00:03 68.333333 56.333333
00:00:04 59.500000 59.250000
00:00:05 63.200000 61.000000
"""
def _strict_comparison(dates_pp, date, strict_equal):
if strict_equal:
return dates_pp == date
else:
return np.bitwise_and(
dates_pp >= date,
dates_pp <= pd.Timestamp(date) + pd.DateOffset(1))
if isinstance(dates, tuple):
if len(dates) != 2:
raise ValueError("Wrong tuple format. Expecting a tuple of"
" format (start_date, end_date). Got {!r}"
" instead.".format(dates))
mask_date = np.bitwise_and(
self.power_profile_.columns >= dates[0],
self.power_profile_.columns <= pd.Timestamp(dates[1]) +
pd.DateOffset(1))
elif isinstance(dates, list):
mask_date = np.any(
[_strict_comparison(self.power_profile_.columns, d,
time_comparison)
for d in dates], axis=0)
else:
mask_date = _strict_comparison(self.power_profile_.columns, dates,
time_comparison)
mask_date = np.bitwise_not(mask_date)
self.power_profile_ = self.power_profile_.loc[:, mask_date]
def record_power_profile(self, range_dates=None, columns=None):
"""Compute the record power-profile.
Parameters
----------
range_dates : tuple of datetime-like or str, optional
The start and end date to consider when computing the record
power-profile. By default, all data will be used.
columns : array-like or None, optional
Name of data field to return. By default, all available data will
be returned.
Returns
-------
record_power_profile : DataFrame
Record power-profile taken between the range of dates.
Examples
--------
>>> from skcycling import Rider
>>> from skcycling.datasets import load_rider
>>> rider = Rider.from_csv(load_rider())
>>> record_power_profile = rider.record_power_profile()
>>> record_power_profile.head() # doctest: +NORMALIZE_WHITESPACE
cadence distance elevation heart-rate power
00:00:01 60.000000 27162.600000 NaN NaN 750.000000
00:00:02 58.000000 27163.750000 NaN NaN 741.000000
00:00:03 56.333333 27164.586667 NaN NaN 731.666667
00:00:04 59.250000 27163.402500 NaN NaN 719.500000
00:00:05 61.000000 27162.142000 NaN NaN 712.200000
This is also possible to give a range of dates to compute the record
power-profile. We can also select some specific information.
>>> record_power_profile = rider.record_power_profile(
... range_dates=('07 May 2014', '11 May 2014'),
... columns=['power', 'cadence'])
>>> record_power_profile.head()
cadence power
00:00:01 100.000000 717.00
00:00:02 89.000000 717.00
00:00:03 68.333333 590.00
00:00:04 59.500000 552.25
00:00:05 63.200000 552.60
"""
if range_dates is None:
mask_date = np.ones_like(self.power_profile_.columns,
dtype=bool)
else:
mask_date = np.bitwise_and(
self.power_profile_.columns >= range_dates[0],
self.power_profile_.columns <= pd.Timestamp(range_dates[1]) +
pd.DateOffset(1))
if columns is None:
columns = self.power_profile_.index.levels[0]
pp_idxmax = (self.power_profile_.loc['power']
.loc[:, mask_date]
.idxmax(axis=1)
.dropna())
rpp = {}
for dt in columns:
data = self.power_profile_.loc[dt].loc[:, mask_date]
rpp[dt] = pd.Series(
[data.loc[date_idx]
for date_idx in pp_idxmax.iteritems()],
index=data.index[:pp_idxmax.size])
return pd.DataFrame(rpp)
@classmethod
def from_csv(cls, filename, n_jobs=1):
"""Load rider information from a CSV file.
Parameters
----------
filename : str
The path to the CSV file.
n_jobs : int, (default=1)
The number of workers to use for the different processing.
Returns
-------
rider : skcycling.Rider
The :class:`skcycling.Rider` instance.
Examples
--------
>>> from skcycling.datasets import load_rider
>>> from skcycling import Rider
>>> rider = Rider.from_csv(load_rider())
>>> print(rider) # doctest: +NORMALIZE_WHITESPACE
RIDER INFORMATION:
power-profile:
2014-05-07 12:26:22 2014-05-11 09:39:38 \\
cadence 00:00:01 78.000000 100.000000
00:00:02 64.000000 89.000000
00:00:03 62.666667 68.333333
00:00:04 62.500000 59.500000
00:00:05 64.400000 63.200000
<BLANKLINE>
2014-07-26 16:50:56
cadence 00:00:01 60.000000
00:00:02 58.000000
00:00:03 56.333333
00:00:04 59.250000
00:00:05 61.000000
"""
df = pd.read_csv(filename, index_col=[0, 1])
df.columns = pd.to_datetime(df.columns)
df.index = pd.MultiIndex(levels=[df.index.levels[0],
pd.to_timedelta(df.index.levels[1])],
labels=df.index.labels,
name=[None, None])
rider = cls(n_jobs=n_jobs)
rider.power_profile_ = df
return rider
def to_csv(self, filename):
"""Drop the rider information into a CSV file.
Parameters
----------
filename : str
The path to the CSV file.
Returns
-------
None
Examples
--------
>>> from skcycling.datasets import load_fit
>>> from skcycling import Rider
>>> rider = Rider(n_jobs=-1)
>>> rider.add_activities(load_fit()[:1])
>>> print(rider)
RIDER INFORMATION:
power-profile:
2014-05-07 12:26:22
cadence 00:00:01 78.000000
00:00:02 64.000000
00:00:03 62.666667
00:00:04 62.500000
00:00:05 64.400000
"""
self.power_profile_.to_csv(filename, date_format='%Y-%m-%d %H:%M:%S')
def __repr__(self):
return 'RIDER INFORMATION:\n power-profile:\n {}'.format(
self.power_profile_.head()) | /scikit_cycling-0.1.3-cp35-cp35m-win32.whl/skcycling/base.py | 0.886942 | 0.483587 | base.py | pypi |
# Authors: Guillaume Lemaitre <[email protected]>
# Cedric Lemaitre
# License: BSD 3 clause
from __future__ import division
from collections import Iterable
import pandas as pd
from ..exceptions import MissingDataError
def acceleration(activity, periods=5, append=True):
"""Compute the acceleration (i.e. speed gradient).
Read more in the :ref:`User Guide <gradient>`.
Parameters
----------
activity : DataFrame
The activity containing speed information.
periods : int, default=5
Periods to shift to compute the acceleration.
append : bool, optional
Whether to append the acceleration to the original activity (default)
or to only return the acceleration as a Series.
Returns
-------
data : DataFrame or Series
The original activity with an additional column containing the
acceleration or a single Series containing the acceleration.
Examples
--------
>>> from skcycling.datasets import load_fit
>>> from skcycling.io import bikeread
>>> from skcycling.extraction import acceleration
>>> ride = bikeread(load_fit()[0])
>>> new_ride = acceleration(ride)
"""
if 'speed' not in activity.columns:
raise MissingDataError('To compute the acceleration, speed data are '
'required. Got {} fields.'
.format(activity.columns))
acceleration = activity['speed'].diff(periods=periods) / periods
if append:
activity['acceleration'] = acceleration
return activity
else:
return acceleration
def gradient_elevation(activity, periods=5, append=True):
"""Compute the elevation gradient.
Read more in the :ref:`User Guide <gradient>`.
Parameters
----------
activity : DataFrame
The activity containing elevation and distance information.
periods : int, default=5
Periods to shift to compute the elevation gradient.
append : bool, optional
Whether to append the elevation gradient to the original activity
(default) or to only return the elevation gradient as a Series.
Returns
-------
data : DataFrame or Series
The original activity with an additional column containing the
elevation gradient or a single Series containing the elevation
gradient.
Examples
--------
>>> from skcycling.datasets import load_fit
>>> from skcycling.io import bikeread
>>> from skcycling.extraction import gradient_elevation
>>> ride = bikeread(load_fit()[0])
>>> new_ride = gradient_elevation(ride)
"""
if not {'elevation', 'distance'}.issubset(activity.columns):
raise MissingDataError('To compute the elevation gradient, elevation '
'and distance data are required. Got {} fields.'
.format(activity.columns))
diff_elevation = activity['elevation'].diff(periods=periods)
diff_distance = activity['distance'].diff(periods=periods)
gradient_elevation = diff_elevation / diff_distance
if append:
activity['gradient-elevation'] = gradient_elevation
return activity
else:
return gradient_elevation
def gradient_heart_rate(activity, periods=5, append=True):
"""Compute the heart-rate gradient.
Read more in the :ref:`User Guide <gradient>`.
Parameters
----------
activity : DataFrame
The activity containing heart-rate information.
periods : int, default=5
Periods to shift to compute the heart-rate gradient.
append : bool, optional
Whether to append the heart-rate gradient to the original activity
(default) or to only return the heart-rate gradient as a Series.
Returns
-------
data : DataFrame or Series
The original activity with an additional column containing the
heart-rate gradient or a single Series containing the heart-rate
gradient.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from skcycling.datasets import load_fit
>>> from skcycling.io import bikeread
>>> from skcycling.extraction import gradient_heart_rate
>>> ride = bikeread(load_fit()[0])
>>> ride['heart-rate'] = pd.Series(
... np.random.randint(60, 200, size=ride.shape[0]),
... index=ride.index) # Add fake heart-rate data for the example
>>> new_ride = gradient_heart_rate(ride)
"""
if 'heart-rate' not in activity.columns:
raise MissingDataError('To compute the heart-rate gradient, heart-rate'
' data are required. Got {} fields.'
.format(activity.columns))
gradient_heart_rate = activity['heart-rate'].diff(periods=periods)
if append:
activity['gradient-heart-rate'] = gradient_heart_rate
return activity
else:
return gradient_heart_rate
def gradient_activity(activity, periods=1, append=True, columns=None):
"""Compute the gradient for all given columns.
Read more in the :ref:`User Guide <gradient>`.
Parameters
----------
activity : DataFrame
The activity to use to compute the gradient.
periods : int or array-like, default=1
Periods to shift to compute the gradient. If an array-like is given,
several gradient will be computed.
append : bool, optional
Whether to append the gradients to the original activity.
columns : list, optional
The name of the columns to use to compute the gradient. By default, all
the columns are used.
Returns
-------
gradient : DataFrame
The computed gradient from the activity.
Examples
--------
>>> from skcycling.datasets import load_fit
>>> from skcycling.io import bikeread
>>> from skcycling.extraction import gradient_activity
>>> ride = bikeread(load_fit()[0], drop_nan='columns')
>>> new_ride = acceleration(ride)
"""
if columns is not None:
data = activity[columns]
else:
data = activity
if isinstance(periods, Iterable):
gradient = [data.diff(periods=p) for p in periods]
gradient_name = ['gradient_{}'.format(p) for p in periods]
else:
gradient = [data.diff(periods=periods)]
gradient_name = ['gradient_{}'.format(periods)]
if append:
# prepend the original information
gradient = [activity] + gradient
gradient_name = ['original'] + gradient_name
return pd.concat(gradient, axis=1, keys=gradient_name) | /scikit_cycling-0.1.3-cp35-cp35m-win32.whl/skcycling/extraction/gradient.py | 0.95638 | 0.836821 | gradient.py | pypi |
# Authors: Guillaume Lemaitre <[email protected]>
# Cedric Lemaitre
# License: BSD 3 clause
import os
from collections import defaultdict
import pandas as pd
import numpy as np
import six
from fitparse import FitFile
# 'timestamp' will be consider as the index of the DataFrame later on
FIELDS_DATA = ('timestamp', 'power', 'heart_rate', 'cadence', 'distance',
'altitude', 'speed')
def check_filename_fit(filename):
"""Method to check if the filename corresponds to a fit file.
Parameters
----------
filename : str
The fit file to check.
Returns
-------
filename : str
The checked filename.
"""
# Check that filename is of string type
if isinstance(filename, six.string_types):
# Check that this is a fit file
if filename.endswith('.fit'):
# Check that the file is existing
if os.path.isfile(filename):
return filename
else:
raise ValueError('The file does not exist.')
else:
raise ValueError('The file is not a fit file.')
else:
raise ValueError('filename needs to be a string. Got {}'.format(
type(filename)))
def load_power_from_fit(filename):
"""Method to open the power data from FIT file into a pandas dataframe.
Parameters
----------
filename : str,
Path to the FIT file.
Returns
-------
data : DataFrame
Power records of the ride.
"""
filename = check_filename_fit(filename)
activity = FitFile(filename)
activity.parse()
records = activity.get_messages(name='record')
data = defaultdict(list)
for rec in records:
values = rec.get_values()
for key in FIELDS_DATA:
data[key].append(values.get(key, np.NaN))
data = pd.DataFrame(data)
if data.empty:
raise IOError('The file {} does not contain any data.'.format(
filename))
# rename the columns for consistency
data.rename(columns={'heart_rate': 'heart-rate', 'altitude': 'elevation'},
inplace=True)
data.set_index(FIELDS_DATA[0], inplace=True)
del data.index.name
return data | /scikit_cycling-0.1.3-cp35-cp35m-win32.whl/skcycling/io/fit.py | 0.808899 | 0.517449 | fit.py | pypi |
# Authors: Guillaume Lemaitre <[email protected]>
# Cedric Lemaitre
# License: BSD 3 clause
import numpy as np
from .fit import load_power_from_fit
DROP_OPTIONS = ('columns', 'rows', 'both')
def bikeread(filename, drop_nan=None):
"""Read power data file.
Read more in the :ref:`User Guide <reader>`.
Parameters
----------
filename : str
Path to the file to read.
drop_nan : str {'columns', 'rows', 'both'} or None
Either to remove the columns/rows containing NaN values. By default,
all data will be kept.
Returns
-------
data : DataFrame
Power data and time data.
Examples
--------
>>> from skcycling.datasets import load_fit
>>> from skcycling.io import bikeread
>>> activity = bikeread(load_fit()[0], drop_nan='columns')
>>> activity.head() # doctest : +NORMALIZE_WHITESPACE
elevation cadence distance power speed
2014-05-07 12:26:22 64.8 45.0 3.05 256.0 3.036
2014-05-07 12:26:23 64.8 42.0 6.09 185.0 3.053
2014-05-07 12:26:24 64.8 44.0 9.09 343.0 3.004
2014-05-07 12:26:25 64.8 45.0 11.94 344.0 2.846
2014-05-07 12:26:26 65.8 48.0 15.03 389.0 3.088
"""
if drop_nan is not None and drop_nan not in DROP_OPTIONS:
raise ValueError('"drop_nan" should be one of {}.'
' Got {} instead.'.format(DROP_OPTIONS, drop_nan))
df = load_power_from_fit(filename)
if drop_nan is not None:
if drop_nan == 'columns':
df.dropna(axis=1, inplace=True)
elif drop_nan == 'rows':
df.dropna(axis=0, inplace=True)
else:
df.dropna(axis=1, inplace=True).dropna(axis=0, inplace=True)
# remove possible outliers by clipping the value
df[df['power'] > 2500.] = np.nan
# resample to have a precision of a second with additional linear
# interpolation for missing value
return df.resample('s').interpolate('linear') | /scikit_cycling-0.1.3-cp35-cp35m-win32.whl/skcycling/io/base.py | 0.875348 | 0.572424 | base.py | pypi |
from os import listdir
from os.path import dirname, join
__all__ = ['load_fit',
'load_rider']
def load_fit(returned_type='list_file', set_data='normal'):
"""Return path to some FIT toy data.
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
returned_type : str, optional (default='list_file')
If 'list_file', return a list containing the fit files;
If 'path', return a string where the data are localized.
set_data : str, optional (default='normal')
If 'normal', return 3 files.
If 'corrupted, return corrupted files for testing.
Returns
-------
filenames : str or list of str,
List of string or string depending of input parameters.
Examples
--------
>>> from skcycling.datasets import load_fit
>>> load_fit() # doctest : +ELLIPSIS
[...]
"""
module_path = dirname(__file__)
if set_data == 'normal':
if returned_type == 'list_file':
return sorted([
join(module_path, 'data', name)
for name in listdir(join(module_path, 'data'))
if name.endswith('.fit')
])
elif returned_type == 'path':
return join(module_path, 'data')
elif set_data == 'corrupted':
if returned_type == 'list_file':
return sorted([
join(module_path, 'corrupted_data', name)
for name in listdir(
join(module_path, 'corrupted_data'))
if name.endswith('.fit')
])
elif returned_type == 'path':
return join(module_path, 'corrupted_data')
def load_rider():
"""Return the path to a CSV file containing rider information.
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
None
Returns
-------
filename : str
The path to the CSV file.
Examples
--------
>>> from skcycling.datasets import load_rider
>>> load_rider() # doctest : +ELLIPSIS
'...rider.csv'
"""
module_path = dirname(__file__)
return join(module_path, 'data', 'rider.csv') | /scikit_cycling-0.1.3-cp35-cp35m-win32.whl/skcycling/datasets/__init__.py | 0.783533 | 0.349921 | __init__.py | pypi |
# Authors: Guillaume Lemaitre <[email protected]>
# Cedric Lemaitre
# License: BSD 3 clause
from __future__ import division
import numpy as np
TS_SCALE_GRAPPE = dict([('I1', 2.), ('I2', 2.5), ('I3', 3.),
('I4', 3.5), ('I5', 4.5), ('I6', 7.),
('I7', 11.)])
ESIE_SCALE_GRAPPE = dict([('I1', (.3, .5)), ('I2', (.5, .6)),
('I3', (.6, .75)), ('I4', (.75, .85)),
('I5', (.85, 1.)), ('I6', (1., 1.80)),
('I7', (1.8, 3.))])
def mpa2ftp(mpa):
"""Convert the maximum power aerobic into the functional threshold power.
Parameters
----------
mpa : float
Maximum power aerobic.
Return:
-------
ftp : float
Functional threshold power.
Examples
--------
>>> from skcycling.metrics import mpa2ftp
>>> print(mpa2ftp(400)) # doctest: +ELLIPSIS
304...
"""
return 0.76 * mpa
def ftp2mpa(ftp):
"""Convert the functional threshold power into the maximum threshold power.
Parameters
----------
ftp : float
Functional threshold power.
Return:
-------
mpa : float
Maximum power aerobic.
Examples
--------
>>> from skcycling.metrics import ftp2mpa
>>> print(ftp2mpa(304)) # doctest: +ELLIPSIS
400...
"""
return ftp / 0.76
def normalized_power_score(activity_power, mpa, window_width=30):
"""Normalized power®.
The normalized power is an average power computing a smoothed power input
and rejecting the low power intensities.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
activity_power : Series
A Series containing the power data from an activity.
mpa : float
Maximum power aerobic. Use :func:`metrics.ftp2mpa` if you use the
functional threshold power metric.
window_width : int, optional
The width of the window used to smooth the power data before to compute
the normalized power. The default width is 30 samples.
Returns
-------
score : float
Normalized power score.
References
----------
.. [1] Allen, H., and A. Coggan. "Training and racing with a power
meter." VeloPress, 2012.
Examples
--------
>>> from skcycling.datasets import load_fit
>>> from skcycling.io import bikeread
>>> from skcycling.metrics import normalized_power_score
>>> ride = bikeread(load_fit()[0])
>>> mpa = 400
>>> np = normalized_power_score(ride['power'], mpa)
>>> print('Normalized power {:.2f} W'.format(np))
Normalized power 218.49 W
"""
smooth_activity = (activity_power.rolling(window_width, center=True)
.mean().dropna())
# removing value < I1-ESIE, i.e. 30 % MPA
smooth_activity = smooth_activity[
smooth_activity > ESIE_SCALE_GRAPPE['I1'][0] * mpa]
return (smooth_activity ** 4).mean() ** (1 / 4)
def intensity_factor_score(activity_power, mpa):
"""Intensity factor®.
The intensity factor® is the ratio of the normalized power® over the
functional threshold power. Note that all our computation consider the
maximum power aerobic for consistency. If you only have the functional
threshold power, use :func:`metrics.ftp2mpa`.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
activity_power : Series
A Series containing the power data from an activity.
mpa : float
Maximum power aerobic. Use :func:`metrics.ftp2mpa` if you use the
functional threshold power metric.
Returns
-------
score: float
Intensity factor.
References
----------
.. [1] Allen, H., and A. Coggan. "Training and racing with a power
meter." VeloPress, 2012.
Examples
--------
>>> from skcycling.datasets import load_fit
>>> from skcycling.io import bikeread
>>> from skcycling.metrics import intensity_factor_score
>>> ride = bikeread(load_fit()[0])
>>> mpa = 400
>>> if_score = intensity_factor_score(ride['power'], mpa)
>>> print('Intensity factor {:.2f} W'.format(if_score))
Intensity factor 0.72 W
"""
ftp = mpa2ftp(mpa)
return normalized_power_score(activity_power, mpa) / ftp
def training_stress_score(activity_power, mpa):
"""Training stress score®.
The training stress score® corresponds to the intensity factor® normalized
by the time of the activity. You can use the function
:func:`metrics.ftp2mpa` if you are using the functional threshold metric.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
activity_power : Series
A Series containing the power data from an activity.
mpa : float
Maximum power aerobic. Use :func:`metrics.ftp2mpa` if you use the
functional threshold power metric.
Returns
-------
score: float
Training stress score.
References
----------
.. [1] Allen, H., and A. Coggan. "Training and racing with a power
meter." VeloPress, 2012.
Examples
--------
>>> from skcycling.datasets import load_fit
>>> from skcycling.io import bikeread
>>> from skcycling.metrics import training_stress_score
>>> ride = bikeread(load_fit()[0])
>>> mpa = 400
>>> ts_score = training_stress_score(ride['power'], mpa)
>>> print('Training stress score {:.2f}'.format(ts_score))
Training stress score 32.38
"""
activity_power = activity_power.resample('1S').mean()
if_score = intensity_factor_score(activity_power, mpa)
return (activity_power.size * if_score ** 2) / 3600 * 100
def training_load_score(activity_power, mpa):
"""Training load score.
Grappe et al. proposes to compute the load of an activity by a weighted sum
of the time spend in the different ESIE zones.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
activity_power : Series
A Series containing the power data from an activity.
mpa : float
Maximum power aerobic. Use :func:`metrics.ftp2mpa` if you use the
functional threshold power metric.
Returns
-------
tls_score: float
Training load score.
References
----------
.. [1] Grappe, F. "Cyclisme et optimisation de la performance: science
et méthodologie de l'entraînement." De Boeck Supérieur, 2009.
Examples
--------
>>> from skcycling.datasets import load_fit
>>> from skcycling.io import bikeread
>>> from skcycling.metrics import training_load_score
>>> ride = bikeread(load_fit()[0])
>>> mpa = 400
>>> tl_score = training_load_score(ride['power'], mpa)
>>> print('Training load score {:.2f}'.format(tl_score))
Training load score 74.90
"""
tls_score = 0.
activity_power = activity_power.resample('1S').mean()
for key in TS_SCALE_GRAPPE.keys():
power_samples = activity_power[
np.bitwise_and(activity_power >= ESIE_SCALE_GRAPPE[key][0] * mpa,
activity_power < ESIE_SCALE_GRAPPE[key][1] * mpa)]
tls_score += power_samples.size / 60 * TS_SCALE_GRAPPE[key]
return tls_score | /scikit_cycling-0.1.3-cp35-cp35m-win32.whl/skcycling/metrics/activity.py | 0.943906 | 0.683014 | activity.py | pypi |
# Authors: Guillaume Lemaitre <[email protected]>
# Cedric Lemaitre
# License: BSD 3 clause
from __future__ import division
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
SAMPLING_WKO = pd.TimedeltaIndex(
['00:00:01', '00:00:05', '00:00:30', '00:01:00', '00:03:00',
'00:03:30', '00:04:00', '00:04:30', '00:05:00', '00:05:30',
'00:06:00', '00:06:30', '00:07:00', '00:10:00', '00:20:00',
'00:30:00', '00:45:00', '01:00:00', '02:00:00', '03:00:00',
'04:00:00'])
def std_dev_squared_error(y_true, y_pred):
"""Compute the standard deviation of the squared error.
Parameters
----------
y_true : ndarray, shape (n_samples,)
Ground truth (correct) target values.
y_pred : ndarray, shape (n_samples,)
Estimated target values.
Returns
-------
std_dev : float
Standard deviation of the squared error.
"""
return np.sqrt(np.sum((y_true - y_pred) ** 2 / (y_true.size - 2)))
def aerobic_meta_model(record_power_profile, time_samples=None):
"""Compute the aerobic metabolism model from the record power-profile.
Read more in the :ref:`User Guide <mpa_estimate>`.
Parameters
----------
record_power_profile : Series
The record power profile from which to extract the aerobic model.
time_samples : TimedeltaIndex or None, optional
The time samples of the record power-profile to take into account. If
None, the sampling of the method of Pinot et al. is applied, which is
equivalent to the sampling from WKO+.
Returns
-------
mpa : float
Maximum Aerobic Power.
t_mpa : Timedelta
Time of the Maximum Aerobic Power.
aei : float
Aerobic Endurance Index.
fit_info_mpa_fitting : dict
This is a dictionary with the information collected about the fitting
related to the MAP. The attributes will be the following:
- `slope`: slope of the linear fitting,
- `intercept`: intercept of the linear fitting,
- `std_err`: standard error of the fitting,
- `coeff_det`: coefficient of determination.
fit_info_aei_fitting : dict
This is a dictionary with the information collected about the fitting
related to the AEI. The attributes will be the following:
- `slope`: slope of the linear fitting,
- `intercept`: intercept of the linear fitting,
- `std_err`: standard error of the fitting,
- `coeff_det`: coefficient of determination.
Notes
-----
The method implemented here follow the work presented in [1]_.
References
----------
.. [1] Pinot et al., "Determination of Maximal Aerobic Power
on the Field in Cycling", Jounal of Science and Cycling, vol. 3(1),
pp. 26-31, 2014.
"""
if time_samples is None:
time_samples = SAMPLING_WKO.copy()
# keep only the time samples available in the record power-profile
mask_time_samples = time_samples < record_power_profile.index.max()
time_samples = time_samples[mask_time_samples]
# to avoid losing data, we will first interpolate the time samples
# using all the data available in the record power-profile before
# to select only the samples required.
ts_union = record_power_profile.index.union(time_samples)
record_power_profile = (record_power_profile.reindex(ts_union)
.interpolate('linear')
.reindex(time_samples))
# only samples between 10 minutes and 4 hours are considered for the
# regression
mask_samples_map = np.bitwise_and(time_samples >= '00:10:00',
time_samples <= '04:00:00')
extracted_profile = record_power_profile.loc[mask_samples_map].values
extracted_time = record_power_profile.loc[mask_samples_map].index.values
extracted_time = np.log(extracted_time /
np.timedelta64(1, 's')).reshape(-1, 1)
ols = LinearRegression()
ols.fit(extracted_time, extracted_profile)
std_fit = std_dev_squared_error(extracted_profile,
ols.predict(extracted_time))
fit_info_mpa_fitting = {
'slope': ols.coef_[0],
'intercept': ols.intercept_,
'std_err': std_fit,
'coeff_det': ols.score(extracted_time, extracted_profile)}
# mpa will be find between 3 minutes and 7 minutes
mask_samples_map = np.bitwise_and(time_samples >= '00:03:00',
time_samples <= '00:10:00')
extracted_profile = record_power_profile.loc[mask_samples_map].values
extracted_time = record_power_profile.loc[mask_samples_map].index.values
extracted_time = np.log(extracted_time /
np.timedelta64(1, 's')).reshape(-1, 1)
aerobic_model = ols.predict(extracted_time)
# find the first value in the 2 * std confidence interval
samples_within = np.abs(extracted_profile - aerobic_model) < 2 * std_fit
if np.count_nonzero(samples_within):
index_mpa = np.flatnonzero(samples_within)[0]
time_mpa = record_power_profile.loc[mask_samples_map].index[index_mpa]
mpa = record_power_profile.loc[mask_samples_map].iloc[index_mpa]
else:
raise ValueError('There is no value entering in the confidence'
' level between 3 and 7 minutes.')
# find aerobic endurance index
mask_samples_aei = np.bitwise_and(time_samples >= time_mpa,
time_samples <= '04:00:00')
extracted_profile = record_power_profile.loc[mask_samples_aei].values
extracted_profile = extracted_profile / mpa * 100
extracted_time = record_power_profile.loc[mask_samples_aei].index.values
extracted_time = np.log(extracted_time /
np.timedelta64(1, 's')).reshape(-1, 1)
ols.fit(extracted_time, extracted_profile)
fit_info_aei_fitting = {
'slope': ols.coef_[0],
'intercept': ols.intercept_,
'std_err': std_fit,
'coeff_det': ols.score(extracted_time, extracted_profile)}
return (mpa, time_mpa, ols.coef_[0],
fit_info_mpa_fitting, fit_info_aei_fitting) | /scikit_cycling-0.1.3-cp35-cp35m-win32.whl/skcycling/metrics/power_profile.py | 0.93638 | 0.803791 | power_profile.py | pypi |
# Authors: Guillaume Lemaitre <[email protected]>
# Cedric Lemaitre
# License: BSD 3 clause
from __future__ import division
import numpy as np
from scipy import constants
from ..extraction import gradient_elevation
from ..extraction import acceleration
def strava_power_model(activity, cyclist_weight, bike_weight=6.8,
coef_roll_res=0.0045, pressure=101325.0,
temperature=15.0, coef_drag=1, surface_rider=0.32,
use_acceleration=False):
"""Strava model used to estimate power.
It corresponds the mathematical formulation which add all forces applied to
a cyclist in movement.
Read more in the :ref:`User Guide <strava>`.
Parameters
----------
activity : DataFrame
The activity containing the ride information.
cyclist_weight : float
The cyclist weight in kg.
bike_weight : float, default=6.8
The bike weight in kg.
coef_roll_res : float, default=0.0045
Rolling resistance coefficient.
pressure : float, default=101325.0
Pressure in Pascal.
temperature : float, default=15.0
Temperature in Celsius.
coef_drag : float, default=1
The drag coefficient also known as Cx.
surface_rider : float, default=0.32
Surface area of the rider facing wind also known as S. The unit is m^2.
use_acceleration : bool, default=False
Either to add the power required to accelerate. This estimation can
become unstable if the acceleration varies for reason which are not
linked to power changes (i.e., braking, bends, etc.)
Returns
-------
power : Series
The power estimated.
References
----------
.. [1] How Strava Calculates Power
https://support.strava.com/hc/en-us/articles/216917107-How-Strava-Calculates-Power
Examples
--------
>>> from skcycling.datasets import load_fit
>>> from skcycling.io import bikeread
>>> from skcycling.model import strava_power_model
>>> ride = bikeread(load_fit()[0])
>>> power = strava_power_model(ride, cyclist_weight=72)
>>> print(power['2014-05-07 12:26:28':
... '2014-05-07 12:26:38']) # Show 10 sec of estimated power
2014-05-07 12:26:28 196.567898
2014-05-07 12:26:29 198.638094
2014-05-07 12:26:30 191.444894
2014-05-07 12:26:31 26.365864
2014-05-07 12:26:32 89.826104
2014-05-07 12:26:33 150.842325
2014-05-07 12:26:34 210.083958
2014-05-07 12:26:35 331.573965
2014-05-07 12:26:36 425.013711
2014-05-07 12:26:37 428.806914
2014-05-07 12:26:38 425.410451
Freq: S, dtype: float64
"""
if 'gradient-elevation' not in activity.columns:
activity = gradient_elevation(activity)
if use_acceleration and 'acceleration' not in activity.columns:
activity = acceleration(activity)
temperature_kelvin = constants.convert_temperature(
temperature, 'Celsius', 'Kelvin')
total_weight = cyclist_weight + bike_weight # kg
speed = activity['speed'] # m.s^-1
power_roll_res = coef_roll_res * constants.g * total_weight * speed
# air density at 0 degree Celsius and a standard atmosphere
molar_mass_dry_air = 28.97 / 1000 # kg.mol^-1
standard_atmosphere = constants.physical_constants[
'standard atmosphere'][0] # Pa
zero_celsius_kelvin = constants.convert_temperature(
0, 'Celsius', 'Kelvin') # 273.15 K
air_density_ref = (
(standard_atmosphere * molar_mass_dry_air) /
(constants.gas_constant * zero_celsius_kelvin)) # kg.m^-3
air_density = air_density_ref * (
(pressure * zero_celsius_kelvin) /
(standard_atmosphere * temperature_kelvin)) # kg.m^-3
power_wind = 0.5 * air_density * surface_rider * coef_drag * speed**3
slope = activity['gradient-elevation'] # grade
power_gravity = (total_weight * constants.g *
np.sin(np.arctan(slope)) * speed)
power_total = power_roll_res + power_wind + power_gravity
if use_acceleration:
acc = activity['acceleration'] # m.s^-1
power_acceleration = total_weight * acc * speed
power_total = power_total + power_acceleration
return power_total.clip(0) | /scikit_cycling-0.1.3-cp35-cp35m-win32.whl/skcycling/model/power.py | 0.940939 | 0.582491 | power.py | pypi |
===============================
SciKit Data
===============================
.. image:: https://img.shields.io/pypi/v/scikit-data.svg
:target: https://pypi.python.org/pypi/scikit-data
.. image:: https://img.shields.io/travis/OpenDataScienceLab/skdata.svg
:target: https://travis-ci.org/OpenDataScienceLab/skdata
.. image:: https://readthedocs.org/projects/skdata/badge/?version=latest
:target: https://skdata.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
Conda package current release info
==================================
.. image:: https://anaconda.org/conda-forge/scikit-data/badges/version.svg
:target: https://anaconda.org/conda-forge/scikit-data
:alt: Anaconda-Server Badge
.. image:: https://anaconda.org/conda-forge/scikit-data/badges/downloads.svg
:target: https://anaconda.org/conda-forge/scikit-data
:alt: Anaconda-Server Badge
About SciKit Data
=================
The propose of this library is to allow the data analysis process more easy and automatic.
The data analysis process is composed of following steps:
* The statement of problem
* Collecting your data
* Cleaning the data
* Normalizing the data
* Transforming the data
* Exploratory statistics
* Exploratory visualization
* Predictive modeling
* Validating your model
* Visualizing and interpreting your results
* Deploying your solution
(Cuesta, Hector and Kumar, Sampath; 2016)
This project contemplates the follow features:
* Data Preparation
* Data Exploration
* Prepare data to Predictive modeling
* Visualizing results
* Reproducible data analysis
Data Preparation
----------------
Data preparation is about how to obtain, clean, normalize, and transform the data into an
optimal dataset, trying to avoid any possible data quality issues such as invalid, ambiguous,
out-of-range, or missing values.
(...)
Scrubbing data, also called data cleansing, is the process of correcting or
removing data in a dataset that is incorrect, inaccurate, incomplete,
improperly formatted, or duplicated.
(...)
In order to avoid dirty data, our dataset should possess the following characteristics:
* Correct
* Completeness
* Accuracy
* Consistency
* Uniformity
(...)
**Data transformation**
Data transformation is usually related to databases and data warehouses where values from
a source format are extract, transform, and load in a destination format.
Extract, Transform, and Load (ETL) obtains data from various data sources, performs some
transformation functions depending on our data model, and loads the resulting data into
the destination.
(...)
Some important transformations:
* Text facet and Clustering
* Numeric fact
* Replace
**Data reduction methods**
Data reduction is the transformation of numerical or alphabetical digital information
derived empirically or experimentally into a corrected, ordered, and simplified form.
Reduced data size is very small in volume and comparatively original, hence, the storage
efficiency will increase and at the same time we can minimize the data handling costs and
will minimize the analysis time also.
We can use several types of data reduction methods, which are listed as follows:
* Filtering and sampling
* Binned algorithm
* Dimensionality reduction
(Cuesta, Hector and Kumar, Sampath; 2016)
Data exploration
----------------
Data exploration is essentially looking at the processed data in a graphical or statistical form
and trying to find patterns, connections, and relations in the data. Visualization is used to
provide overviews in which meaningful patterns may be found.
(...)
The goals of exploratory data analysis (EDA) are as follows:
* Detection of data errors
* Checking of assumptions
* Finding hidden patters (like tendency)
* Preliminary selection of appropriate models
* Determining relationships between the variables
(...)
The four types of EDA are univariate nongraphical, multivariate nongraphical, univariate
graphical, and multivariate graphical. The nongraphical methods refer to the calculation of
summary statistics or the outlier detection. In this book, we will focus on the univariate and
(Cuesta, Hector and Kumar, Sampath; 2016)
**Outlier Detection**
Two outlier detection method should be used, initially, for SkData are:
* IQR;
* Chauvenet.
Another methods should be implemented soon [1].
Prepare data to Predictive modeling
-----------------------------------
From the galaxy of information we have to extract usable hidden patterns and trends using
relevant algorithms. To extract the future behavior of these hidden patterns, we can use
predictive modeling. Predictive modeling is a statistical technique to predict future
behavior by analyzing existing information, that is, historical data. We have to use proper
statistical models that best forecast the hidden patterns of the data or
information (Cuesta, Hector and Kumar, Sampath; 2016).
SkData, should allow you to format your data to send it to some predictive library
as scikit-learn.
Visualizing results
-------------------
In an explanatory data analysis process, simple visualization techniques are very useful for
discovering patterns, since the human eye plays an important role. Sometimes, we have to
generate a three-dimensional plot for finding the visual pattern. But, for getting better
visual patterns, we can also use a scatter plot matrix, instead of a three-dimensional plot. In
practice, the hypothesis of the study, dimensionality of the feature space, and data all play
important roles in ensuring a good visualization technique (Cuesta, Hector and Kumar, Sampath; 2016).
Quantitative and Qualitative data analysis
------------------------------------------
Quantitative data are numerical measurements expressed in terms of numbers.
Qualitative data are categorical measurements expressed in terms of natural language
descriptions.
Quantitative analytics involves analysis of numerical data. The type of the analysis will
depend on the level of measurement. There are four kinds of measurements:
* Nominal data has no logical order and is used as classification data.
* Ordinal data has a logical order and differences between values are not constant.
* Interval data is continuous and depends on logical order. The data has standardized differences between values, but do not include zero.
* Ratio data is continuous with logical order as well as regular intervals differences between values and may include zero.
Qualitative analysis can explore the complexity and meaning of social phenomena. Data for
qualitative study may include written texts (for example, documents or e-mail) and/or
audible and visual data (digital images or sounds).
(Cuesta, Hector and Kumar, Sampath; 2016)
Reproducibility for Data Analysis
---------------------------------
A good way to promote reproducibility for data analysis is store the
operation history. This history can be used to prepare another dataset
with the same steps (operations).
Books used as reference to guide this project:
----------------------------------------------
- https://www.packtpub.com/big-data-and-business-intelligence/clean-data
- https://www.packtpub.com/big-data-and-business-intelligence/python-data-analysis
- https://www.packtpub.com/big-data-and-business-intelligence/mastering-machine-learning-scikit-learn
- https://www.packtpub.com/big-data-and-business-intelligence/practical-data-analysis-second-edition
Some other materials used as reference:
---------------------------------------
- https://github.com/rsouza/MMD/blob/master/notebooks/3.1_Kaggle_Titanic.ipynb
- https://github.com/agconti/kaggle-titanic/blob/master/Titanic.ipynb
- https://github.com/donnemartin/data-science-ipython-notebooks/blob/master/kaggle/titanic.ipynb
Installing scikit-data
======================
Using conda
-----------
Installing `scikit-data` from the `conda-forge` channel can be achieved by adding `conda-forge` to your channels with:
.. code-block:: console
$ conda config --add channels conda-forge
Once the `conda-forge` channel has been enabled, `scikit-data` can be installed with:
.. code-block:: console
$ conda install scikit-data
It is possible to list all of the versions of `scikit-data` available on your platform with:
.. code-block:: console
$ conda search scikit-data --channel conda-forge
Using pip
---------
To install scikit-data, run this command in your terminal:
.. code-block:: console
$ pip install skdata
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
More Information
----------------
* License: MIT
* Documentation: https://skdata.readthedocs.io
References
----------
* CUESTA, Hector; KUMAR, Sampath. Practical Data Analysis. Packt Publishing Ltd, 2016.
**Electronic materials**
* [1] http://www.datasciencecentral.com/profiles/blogs/introduction-to-outlier-detection-methods
| /scikit-data-0.1.3.tar.gz/scikit-data-0.1.3/README.rst | 0.946609 | 0.878991 | README.rst | pypi |
from functools import reduce
# local
from .cleaning import *
import json
import numpy as np
import pandas as pd
class StepSkData:
parent = None
def __init__(self, parent: 'SkDataSet'):
"""
:param parent:
"""
self.parent = parent
def compute(
self, start: int = None, end: int = None,
steps_id: list = None
) -> pd.DataFrame:
"""
:param start:
:param end:
:param steps_id:
:return:
"""
dset = self.parent.parent.data[self.parent.iid]
try:
index_col = dset.attrs['index']
except:
index_col = None
keys = tuple(
k for k in dset.dtype.names[:]
if k not in [index_col]
)
params = {}
if index_col is not None:
params['index'] = dset[index_col]
df = pd.DataFrame(dset[keys], **params)
for k in df.keys():
if df[k].dtype == pd.api.types.pandas_dtype('O'):
df[k] = df[k].str.decode("utf-8")
df[k].replace(
dset.attrs['null_string'], np.nan, inplace=True
)
steps = self.parent.attr_load(attr='steps', default=[])
if steps_id is not None:
_steps = [s for i, s in enumerate(steps) if i in steps_id]
else:
_steps = steps[start:end]
for step in _steps:
df = self.expr(df, step)
return df
def export_steps(self, file_path: str, mode: str = 'a'):
"""
:param file_path:
:param mode: [a]ppend|[w]rite
:return:
"""
pass
@staticmethod
def expr(data: pd.DataFrame, step: str):
# aliases
op = step['operation']
k = step['column'] if 'column' in step else None
k_new = k if 'new-column' not in step else step['new-column']
c_expr = step['expression']
if op == 'text-transform':
f_expr = eval('lambda value: %s' % c_expr)
data[k_new] = data[k].apply(f_expr)
elif op == 'categorize':
params = dict(data=data, col_name=k, categories=eval(c_expr))
params.update(
{'new_col_name': k_new} if 'new-column' in step else {}
)
categorize(**params)
elif op == 'fill-na':
fill = c_expr
if c_expr in ['mean', 'max', 'min', 'median']:
fill = data.eval('%s.%s()' % (k, c_expr))
data[k].fillna(fill, inplace=True)
elif op == 'drop-na':
params = eval(c_expr)
dropna(data, **params)
elif op == 'drop-unique':
params = eval(c_expr)
drop_columns_with_unique_values(data, **params)
return data
def import_steps(self, file_path: str, mode: str='a'):
"""
:param file_path:
:param mode: [a]ppend|[w]rite
:return:
"""
steps_json = json.load(file_path)
def replace(value: str, replace_dict: dict):
"""
"""
if not isinstance(value, str):
return value
return reduce(
lambda x, y: x.replace(y, replace_dict[y]), replace_dict, value
) | /scikit-data-0.1.3.tar.gz/scikit-data-0.1.3/skdata/steps.py | 0.547464 | 0.368406 | steps.py | pypi |
<p align="left">
<img alt="Scikit Data Access" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/skdaccess_logo360x100.png"/>
</p>
- Import scientific data from various sources through one easy Python API.
- Use iterator patterns for each data source (configurable data generators + functions to get next data chunk).
- Skip parser programming and file format handling.
- Enjoy a common namespace for all data and unleash the power of data fusion.
- Handle data distribution in different modes: (1) local download, (2) caching of accessed data, or (3) online stream access
- Easily pull data on cloud servers through Python scripts and facilitate large-scale parallel processing.
- Build on an extensible plattform: Adding access to a new data source only requires addition of its "DataFetcher.py".
- Open source (MIT License)
<p align="center">
<img alt="Scikit Data Access Overview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/skdaccess_overviewdiag.png" width="810"/>
</p>
Supported data sets:
<table>
<tr>
<td>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200>
<p>Namespace</p>
</td>
<!-- preview -->
<td width=63>
<p><span>Preview<br><sup>(link)</sup></span></p>
</td>
<!-- description -->
<td width=500>
<p><span>Description & Data Source</span></p>
</td>
</tr>
<!--- HEADER ENTRY ---------------------------------->
<tr>
<td colspan=4><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_astro.png> Astronomy
</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200>
<sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_astro.png>
astro.kepler
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Kepler.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.astro.kepler.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <sup> Light curves for stars imaged by the NASA Kepler Space Telescope <br>Source: https://keplerscience.arc.nasa.gov</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200>
<sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_astro.png>
astro.spectra
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_SDSS_Spectra.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.astro.spectra.png"/></a>
</td>
<!-- description -->
<td width=500>
<sup> Spectra from the Sloan Digital Sky Survey <br>Source: https://www.sdss.org/dr14/spectro/ </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200>
<sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_astro.png>
astro.tess.data
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_TESS_Data_Alerts.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.astro.tess.data.png"/></a>
</td>
<!-- description -->
<td width=500>
<sup> Light curves from TESS Data Alerts <br>Source: https://archive.stsci.edu/prepds/tess-data-alerts/ </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200>
<sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_astro.png>
astro.tess.simulated
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_TESS_Simulated_Data.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.astro.tess.simulated.png"/></a>
</td>
<!-- description -->
<td width=500>
<sup> Simulated light curves from TESS End-to-End 6 <br>Source: https://archive.stsci.edu/prepds/tess-data-alerts/ </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_astro.png> astro.voyager
</sup>
</td>
<!-- preview -->
<td width=63><sup>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Voyager.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.astro.voyager.png"/></a>
</sup>
</td>
<!-- description -->
<td width=500>
<sup>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> Data from the Voyager mission. <br> Source: https://spdf.gsfc.nasa.gov/
</sup>
</td>
</tr>
<!--- HEADER ENTRY ---------------------------------->
<tr>
<td colspan=4><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_engineering.png> Engineering
</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_engineering.png> engineering.la.traffic_counts
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Traffic_Counts.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.engineering.la.traffic_counts.png"/></a>
</td>
<!-- description -->
<td width=500><sup>
Traffic Count data in Los Angeles. <br> Source: https://data.lacity.org/A-Livable-and-Sustainable-City/LADOT-Traffic-Counts-Summary/94wu-3ps3
<sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=250><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_engineering.png> engineering.webcam.mit_sailing
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Webcam_MIT_Sailing.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.engineering.webcam.mit_sailing.png"/></a>
</td>
<!-- description -->
<td width=500><sup>
Images from webcams located at the MIT Sailing Pavilion <br> Source: http://sailing.mit.edu/webcam.php
<sup>
</td>
</tr>
<!--- HEADER ENTRY ---------------------------------->
<tr>
<td colspan=4><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_finance.png> Finance
</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_finance.png> finance.timeseries
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Finance_Time_Series.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.finance.timeseries.png"/></a>
</td>
<!-- description -->
<td width=500><sup>
Financial time series data retrieved using Alpha Vantage API. <br> Source: https://www.alphavantage.co/
</sup>
</td>
</tr>
<!--- HEADER ENTRY ---------------------------------->
<tr>
<td colspan=4><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> Geoscience
</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200>
<sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.era_interim
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_ERA_Interim.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.era_interim.png"/></a>
</td>
<!-- description -->
<td width=500><sup>
Era-Interim data at different pressure values from <br/> the European Centre for Medium-Range Weather Forecasts accessed through the University Corporation for Atmospheric Research. <br> Source: https://rda.ucar.edu/datasets/ds627.0/
</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.gldas
</sup>
</td>
<!-- preview -->
<td width=63><a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_GLDAS.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.gldas.png"/></a>
</td>
<!-- description -->
<td width=500><img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <sup> Land hydrology model produced by NASA. This version of the data is generated to match the GRACE temporal and spatial characteristics and is available as a complementary data product. <br> Source: https://grace.jpl.nasa.gov/data/get-data/land-water-content </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.grace
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_GRACE.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.grace.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <sup> NASA GRACE Tellus Monthly Mass Grids. 30-day measurements of changes in Earth’s gravity field to quantify equivalent water thickness. <br> Source: https://grace.jpl.nasa.gov/data/get-data/monthly-mass-grids-land </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.grace.mascon
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_GRACE_Mascon.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.grace.mascon.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <sup> NASA GRACE Tellus Monthly Mass Grids - Global Mascons. 30-day measurements of changes in Earth’s gravity field to quantify equivalent water thickness. Source: https://grace.jpl.nasa.gov/data/get-data/jpl_global_mascons </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.groundwater </sup>
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Groundwater.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.groundwater.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_usgs.png" /> <sup> United States groundwater monitoring wells measuring the depth to water level. Source: https://waterservices.usgs.gov </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.magnetometer
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Magnetometer.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.magnetometer.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_usgs.png" /> <sup> Data collected at magnetic observatories operated by the U.S. Geological Survey. Source: https://geomag.usgs.gov</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.mahali.rinex
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Mahali_Rinex.ipynb"> <img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.mahali.rinex.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_mit.png" /> <img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nsf.png" /> <sup> Rinex files from the MIT led NSF project studying the Earth’s ionosphere with GPS. <br> Web: http://mahali.mit.edu </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.mahali.tec
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Mahali_TEC.ipynb"> <img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.mahali.tec.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_mit.png" /> <img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nsf.png" /> <sup> Total Electron Content from the MIT led NSF project studying the Earth’s ionosphere with GPS. <br> Web:http://mahali.mit.edu </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.mahali.temperature
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Mahali_Temperature.ipynb"> <img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.mahali.temperature.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_mit.png" /> <img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nsf.png" /> <sup> Temperature data from the MIT led NSF project studying the Earth’s ionosphere with GPS. <br>Web: http://mahali.mit.edu </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.modis
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_MODIS.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.modis.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <sup> Spectroradiometer aboard the NASA Terra and Aqua image satellites. Generates approximately daily images of the Earth’s surface.<br> Source:https://modis.gsfc.nasa.gov </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.pbo
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_PBO.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.pbo.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_unavco.png" /> <sup> EarthScope - Plate Boundary Observatory (PBO): Daily GPS displacement time series measurements throughout the United States.<br>Source: http://www.unavco.org/projects/major-projects/pbo/pbo.html </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.sentinel_1
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Sentinel_1.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.sentinel_1.png"/></a>
</td>
<!-- description -->
<td width=500>
<sup><img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_esa.png" /> Sentinel-1 TOPSAR data from the European Space Agency retrieved from the Alaska Satellite Facility.<br>Source:https://www.asf.alaska.edu/ </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.srtm
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_SRTM.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.srtm.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_usgs.png" /> <sup> Elevation data at a one arc second resolution from the Shuttle Radar Topography Mission (SRTMGL1).<br>Source: https://lpdaac.usgs.gov/dataset_discovery/measures/measures_products_table/srtmgl1_v003 </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.uavsar
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_UAVSAR.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.uavsar.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <sup> UAVSAR SLC data from JPL.<br>Source: https://uavsar.jpl.nasa.gov/ </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.wyoming_sounding
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Wyoming_Sounding.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.wyoming_sounding.png"/></a>
</td>
<!-- description -->
<td width=500>
<sup> Sounding data from the University of Wyoming.<br>Source: http://weather.uwyo.edu/upperair/sounding.html </sup>
</td>
</tr>
<!--- HEADER ENTRY ---------------------------------->
<tr>
<td colspan=4><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_planetary.png> Planetary Science
</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_planetary.png> planetary.ode
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_ODE.ipynb"> <img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.planetary.ode.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <sup> Mars planetary data from PDS Geosciences Node's Orbital Data Explorer.<br>Source: http://pds-geosciences.wustl.edu/default.htm</sup>
</td>
</tr>
<!--- HEADER ENTRY ---------------------------------->
<tr>
<td colspan=4><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_solar.png> Solar Science
</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_solar.png> solar.sdo
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_SDO.ipynb"> <img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.solar.sdo.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <sup> Images from the Solar Dynamics Observatory.<br>Source: https://sdo.gsfc.nasa.gov/</sup>
</td>
</tr>
</table>
### Install
```python
pip install scikit-dataaccess
```
### Documentation
- User Manual: [/docs/skdaccess_manual.pdf](https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/docs/skdaccess_manual.pdf)<br>
- Code documentation (Doxygen): [/docs/skdaccess_doxygen.pdf](https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/docs/skdaccess_doxygen.pdf)
- Code visualization (treemap): [/docs/skdaccess_treemap.png](https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/docs/skdaccess_treemap.png)
- Code class diagrams: [/docs/class_diagrams](https://github.com/MITHaystack/scikit-dataaccess/tree/master/skdaccess/docs/class_diagrams)
### Contributors
Project lead: [Victor Pankratius (MIT)](http://www.victorpankratius.com)<br>
Contributors: Cody M. Rude, Justin D. Li, David M. Blair, Michael G. Gowanlock, Guillaume Rongier, Victor Pankratius
New contributors welcome! Contact <img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/skdaccess_cont.png" /> to contribute and add interface code for your own datasets :smile:
### Acknowledgements
We acknowledge support from NASA AIST14-NNX15AG84G, NASA AIST16-80NSSC17K0125, NSF ACI-1442997, and NSF AGS-1343967.
## Examples
Code examples (Jupyter notebooks) for all datasets listed above are available at: [/skdaccess/examples](https://github.com/MITHaystack/scikit-dataaccess/tree/master/skdaccess/examples)
<p align="center">
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/docs/images/skdaccess-quickexamples-combined.png">
<img alt="Scikit Data Access Overview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/skdaccess-quickexamples-combined.png"/>
</a>
</p>
| /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/README.md | 0.613005 | 0.765987 | README.md | pypi |
# Skdaccess imports
from skdaccess.framework.data_class import DataFetcherCache, TableWrapper
from skdaccess.framework.param_class import *
# Standard library imports
from collections import OrderedDict
import re
# 3rd part imports
import pandas as pd
class DataFetcher(DataFetcherCache):
'''
Data Fetcher for Mahali temperature data
'''
def __init__(self, start_year, end_year, spacecraft='both'):
'''
Initialize Voyager data fetcher
@param start_year: Starting year
@param end_year: Ending year
@param spacecraft: Which spaceraft to use (voyager1, voyager2, or both).
'''
# Generate list of years for retrieving data
self.year_list = list(range(start_year, end_year+1))
# Create a list of spacecraft data to download
if spacecraft not in ('voyager1', 'voyager2', 'both'):
raise RuntimeError('Spacecraft not understood')
if spacecraft == 'both':
self.spacecraft_list = ['voyager1', 'voyager2']
else:
self.spacecraft_list = [spacecraft]
# Field names for parsing data
self.field_names = [
'Year', 'Day', 'Hour', 'Distance', 'Latitude', 'Longitude',
'Field_Magnitude_Average', 'Magnitude_of_Average_Field', 'BR', 'BT',
'BN', 'Flow_Speed', 'Theta', 'Phi', 'Proton_Density',
'Proton_Temperature', 'LECP_1', 'LECP_2', 'LECP_3', 'CRS_1', 'CRS_2',
'CRS_3', 'CRS_4', 'CRS_5', 'CRS_6', 'CRS_7', 'CRS_8', 'CRS_9',
'CRS_10', 'CRS_11', 'CRS_12', 'CRS_13', 'CRS_14', 'CRS_15', 'CRS_16',
'CRS_17', 'CRS_18',
]
# Field widths as the data is fixed width format
self.field_widths = [
4, 4, 3, 7, 7, 7, 8, 8, 8, 8, 8, 7, 7, 7, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10
]
# Base data location url
self.base_url = 'https://spdf.gsfc.nasa.gov/pub/data/voyager/'
super(DataFetcher, self).__init__([])
def generateURL(self, spacecraft, in_year):
'''
Generate url for voyager data
@param spacecraft: Voyager spacecraft (vy1 or vy2)
@param in_year: Input year (or 'metadata')
@return Url of data location
'''
num = spacecraft[-1]
url = self.base_url + 'voyager' + num + '/merged/'
if in_year == 'metadata':
url = url + 'vy' + num + 'mgd.txt'
else:
url = url + 'vy' + num + '_' + str(in_year) + '.asc'
return url
def parseVoyagerData(self, spacecraft, in_filename):
'''
Parse Voyager Data
@param spacecraft: Voyager spacecraft (vy1 or vy2)
@param in_filename: Input voyager data filename
@return Pandas Dataframe of Voyager data
'''
def convert_date(year, day, hour):
'''
Convert to datetime
@param year: Input year
@param day: Input day
@param hour: Input hour
@return datetime
'''
return pd.to_datetime("{0:0>4}{1:0>3}{2:0>2}".format(year,day,hour), format='%Y%j%H')
# Voyager 1 has 3 less columns than Voyager 2
if spacecraft == 'voyager1':
field_widths = self.field_widths[:34]
field_names = self.field_names[:34]
else:
field_widths = self.field_widths
field_names = self.field_names
# Parse the data
data = pd.read_fwf(in_filename, widths=field_widths, header=None, names=field_names)
# Create date column
data['Date'] = list(map(convert_date,
data.loc[:,'Year'],
data.loc[:,'Day'],
data.loc[:,'Hour']))
data.set_index('Date', inplace=True)
return data
def parseVoyagerMetadata(self, in_file):
''' Parse voyager metadata
@param in_file: Input filename
@return Dictionary containing metadata
'''
with open(in_file,'r',errors='ignore') as metafile:
lines = metafile.readlines()
lines = [line.rstrip() for line in lines]
start_index = -1
end_index = -1
prev_line = None
for index, line in enumerate(lines):
if re.search('FORMAT DESCRIPTION',line):
start_index = index+4
if prev_line == '' and line == '' and start_index > -1:
end_index = index - 2
break
prev_line = line
description_data = lines[start_index:end_index+1]
field_index = 0
description_dict = OrderedDict()
for line in description_data:
if re.search('\s+[0-9]+', line[:6]):
info = re.split('\s\s+',line)[1:]
key = self.field_names[field_index]
description_dict[key] = OrderedDict()
description_dict[key]['MEANING'] = info[2]
description_dict[key]['UNITS/COMMENTS'] = info[3]
field_index += 1
elif line.strip() != '':
description_dict[key]['MEANING'] = description_dict[key]['MEANING'] + ' ' + line.strip()
return description_dict
def getMetadataFiles(self):
'''
Get path to metadata file
Metadata will download if necessary
@return List containing file path(s) for the metadata
'''
urls = [self.generateURL(spacecraft, 'metadata') for spacecraft in self.spacecraft_list]
return self.cacheData('voyager', urls)
def output(self):
'''
Generate data wrapper
@return data wrapper of voyager data
'''
# Generate url_list
url_list = []
for spacecraft in self.spacecraft_list:
url_list += [self.generateURL(spacecraft, 'metadata')]
url_list += [self.generateURL(spacecraft, year) for year in self.year_list]
full_filenames = self.cacheData('voyager', url_list)
num_files = len(self.year_list) + 1
# Parse downloaded data
data_dict = OrderedDict()
metadata_dict = OrderedDict()
for index, spacecraft in enumerate(self.spacecraft_list):
# Need to select data for this spacecraft
filenames = full_filenames[num_files * index : num_files * (1+index)]
# parse data
metadata_dict[spacecraft] = self.parseVoyagerMetadata(filenames[0])
data_list = [self.parseVoyagerData(spacecraft, filename) for filename in filenames[1:]]
data_dict[spacecraft] = pd.concat(data_list)
return TableWrapper(data_dict, meta_data = metadata_dict, default_columns = ['BR','BT','BN']) | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/astro/voyager/data_fetcher.py | 0.680348 | 0.420243 | data_fetcher.py | pypi |
# mithagi required Base,Utils imports
from skdaccess.framework.data_class import DataFetcherCache, TableWrapper
from skdaccess.utilities.tess_utils import parseTessData
# Standard library imports
from collections import OrderedDict
# Third pary imports
from astropy.io import fits
from astropy.table import Table
import pandas as pd
import numpy as np
class DataFetcher(DataFetcherCache):
''' Data Fetcher for TESS data alerts '''
def __init__(self, ap_paramList, toi_information):
'''
Initialize TESS Data Fetcher
@param ap_paramList[tess_ids]: List of TESS IDs to retrieve
@param toi_information: Pandas dataframe containing target information
'''
self.toi_information = toi_information
super(DataFetcher, self).__init__(ap_paramList)
def getTargetInformation():
"""
Retrieve Target list information
@return Target information list
"""
pass
def generateURLFromTID(self, tid_list):
"""
Generate URL from TID
@param tid_list: Input Tess ID list
@return URL List of of objects in tid_list
"""
pass
def output(self):
"""
Retrieve Tess data
@return TableWrapper containing TESS lightcurves
"""
tid_series = pd.Series([int(tid) for tid in self.ap_paramList[0]()])
tid_string_list = [str(tid).zfill(16) for tid in tid_series]
tid_not_found = tid_series.isin(self.toi_information['tic_id'])
if np.count_nonzero(~tid_not_found) > 0:
raise RuntimeError("No data for TID: " + str(tid_series[~tid_not_found].tolist()))
url_list = self.generateURLFromTID(tid_string_list)
file_list = self.cacheData('tess', url_list)
data_dict = OrderedDict()
metadata_dict = OrderedDict()
for filename, tid in zip(file_list, tid_string_list):
fits_data = fits.open(filename)
data_dict[tid], metadata_dict[tid] = parseTessData(fits_data)
return TableWrapper(data_dict, meta_data = metadata_dict) | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/astro/tess/generic/cache.py | 0.636805 | 0.297285 | cache.py | pypi |
# """@package Kepler
# Provides classes for accessing Kepler data.
# """
# mithagi required Base,Utils imports
from skdaccess.framework.data_class import DataFetcherCache, TableWrapper
from skdaccess.utilities.file_util import openPandasHDFStoreLocking
# Standard library imports
import re
import glob
import os
from collections import OrderedDict
from ftplib import FTP
from io import BytesIO
from tarfile import TarFile
# 3rd party package imports
import pandas as pd
import numpy as np
from astropy.table import Table
from astropy.io import fits
class DataFetcher(DataFetcherCache):
''' Data Fetcher for Kepler light curve data '''
def __init__(self, ap_paramList, quarter_list=None):
'''
Initialize Kepler Data Fetcher
@param ap_paramList[kepler_id_list]: List of kepler id's
@param quarter_list: List of quarters (0-17) (default: all quarters)
'''
self.quarter_list = quarter_list
super(DataFetcher, self).__init__(ap_paramList)
def _getKeplerFilePath(self):
'''
Get the path to the Kepler HDF file
This helper function is for backwards compatibility as data
locations for cached data are now all directories.
@return String containing the path to the Kepler HDF file
'''
data_location = DataFetcher.getDataLocation('kepler')
if os.path.split(data_location)[1] == 'kepler_data.h5':
data_file_name = data_location
else:
data_file_name = os.path.join(data_location, 'kepler_data.h5')
data_file_directory = os.path.split(data_file_name)[0]
if not os.path.isdir(data_file_directory):
os.makedirs(data_file_directory, exist_ok=True)
return data_file_name
def downloadKeplerData(self, kid_list):
'''
Download and parse Kepler data for a list of kepler id's
@param kid_list: List of Kepler ID's to download
@return dictionary of kepler data
'''
return_data = dict()
# connect to ftp server
ftp = FTP('archive.stsci.edu')
ftp.login()
# For each kepler id, download the appropriate data
for kid in kid_list:
ftp.cwd('/pub/kepler/lightcurves/' + kid[0:4] + '/' + kid)
file_list = ftp.nlst()
filename = None
for file in file_list:
match = re.match('kplr' + kid + '_lc_.*',file)
if match:
filename = match.group(0)
break
bio = BytesIO()
ftp.retrbinary('RETR ' + filename, bio.write)
bio.seek(0)
# Read tar file
tfile = tfile = TarFile(fileobj=bio)
member_list = [member for member in tfile.getmembers()]
# Extract data from tar file
data_list = []
for member in member_list:
file = tfile.extractfile(member)
fits_data = fits.open(file)
data = Table(fits_data[1].data).to_pandas()
data.set_index('CADENCENO',inplace=True)
data.loc[:,'QUARTER'] = fits_data[0].header['QUARTER']
data_list.append(data)
full_data = pd.concat(data_list)
return_data[kid] = full_data
try:
ftp.quit()
except:
ftp.close()
return return_data
def cacheData(self, data_specification):
'''
Cache Kepler data locally
@param data_specification: List of kepler IDs
'''
kid_list = data_specification
data_location = self._getKeplerFilePath()
store = openPandasHDFStoreLocking(data_location, 'a')
missing_kid_list = []
for kid in kid_list:
if 'kid_' + kid not in store:
missing_kid_list.append(kid)
if len(missing_kid_list) > 0:
print("Downloading data for " + str(len(missing_kid_list)) + " star(s)")
missing_kid_data = self.downloadKeplerData(missing_kid_list)
for kid,data in missing_kid_data.items():
store.put('kid_' + kid, data)
store.close()
def output(self):
'''
Output kepler data wrapper
@return DataWrapper
'''
kid_list = self.ap_paramList[0]()
kid_list = [ str(kid).zfill(9) for kid in kid_list ]
self.cacheData(kid_list)
data_location = self._getKeplerFilePath()
kid_data = dict()
store = openPandasHDFStoreLocking(data_location, 'r')
for kid in kid_list:
kid_data[kid] = store['kid_' + kid]
# If downloaded using old skdaccess version
# switch index
if kid_data[kid].index.name == 'TIME':
kid_data[kid]['TIME'] = kid_data[kid].index
kid_data[kid].set_index('CADENCENO', inplace=True)
store.close()
kid_data = OrderedDict(sorted(kid_data.items(), key=lambda t: t[0]))
# If a list of quarters is specified, only select data in those quarters
if self.quarter_list != None:
for kid in kid_list:
kid_data[kid] = kid_data[kid][kid_data[kid]['QUARTER'].isin(self.quarter_list)]
return TableWrapper(kid_data, default_columns = ['PDCSAP_FLUX'], default_error_columns = ['PDCSAP_FLUX_ERR']) | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/astro/kepler/data_fetcher.py | 0.602529 | 0.340266 | data_fetcher.py | pypi |
The MIT License (MIT)<br>
Copyright (c) 2016,2017 Massachusetts Institute of Technology<br>
Authors: Justin Li, Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
%matplotlib notebook
import matplotlib.pyplot as plt
```
Land hydrology model produced by NASA<br>
https://grace.jpl.nasa.gov/data/get-data/land-water-content/
```
from skdaccess.geo.gldas import DataFetcher as GLDAS_DF
from skdaccess.framework.param_class import *
geo_point = AutoList([(38, -117)]) # location in Nevada
gldas_fetcher = GLDAS_DF([geo_point],start_date='2010-01-01',end_date='2014-01-01')
data_wrapper = gldas_fetcher.output() # Get a data wrapper
label, data = next(data_wrapper.getIterator()) # Get GLDAS data
data.head()
plt.plot(data['Equivalent Water Thickness (cm)']);
plt.xticks(rotation=15);
```
| /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_GLDAS.ipynb | 0.514644 | 0.564459 | Demo_GLDAS.ipynb | pypi |
The MIT License (MIT)<br>
Copyright (c) 2016,2017 Massachusetts Institute of Technology<br>
Authors: Justin Li, Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
# Plate Boundary Observatory GPS Data
# Source: http://www.unavco.org/instrumentation/networks/status/pbo
# Time series data for GPS sensors (North, East, Up), displacement in meters versus time
from skdaccess.geo.pbo import DataFetcher as PBO_DF
from skdaccess.framework.param_class import *
%matplotlib notebook
import matplotlib.pyplot as plt
# Latitude and Longitude range around Akutan Volcano
lat_range = AutoList((54,54.25))
lon_range = AutoList((-166, -165.6))
start_time = '2006-01-01'
end_time = '2015-06-01'
PBO_data_fetcher = PBO_DF(start_time, end_time, [lat_range, lon_range],mdyratio=.7)
PBO_data = PBO_data_fetcher.output().get() # returns an ordered dictionary of data frames
PBO_data['AV06'].head()
plt.figure();
plt.plot(PBO_data['AV06']['dN']);
plt.tight_layout()
```
| /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_PBO.ipynb | 0.818047 | 0.566798 | Demo_PBO.ipynb | pypi |
The MIT License (MIT)<br>
Copyright (c) 2018 Massachusetts Institute of Technology<br>
Authors: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 150
```
TESS End-to-End 6 Simulated Light Curve Time Series<br>
Source: https://archive.stsci.edu/tess/ete-6.html
```
from skdaccess.astro.tess.simulated.cache import DataFetcher as TESS_DF
from skdaccess.framework.param_class import *
import numpy as np
tess_fetcher = TESS_DF([AutoList([376664523])])
tess_dw = tess_fetcher.output()
label, data = next(tess_dw.getIterator())
```
Normalize flux
```
valid_index = data['PDCSAP_FLUX'] != 0.0
data.loc[valid_index, 'RELATIVE_PDCSAP_FLUX'] = data.loc[valid_index, 'PDCSAP_FLUX'] / np.median(data.loc[valid_index, 'PDCSAP_FLUX'])
```
Plot Relative PDCSAP Flux vs time
```
plt.gcf().set_size_inches(6,2);
plt.scatter(data.loc[valid_index, 'TIME'], data.loc[valid_index, 'RELATIVE_PDCSAP_FLUX'], s=2, edgecolor='none');
plt.xlabel('Time');
plt.ylabel('Relative PDCSAP Flux');
plt.title('Simulated Data TID: ' + str(int(label)));
```
| /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_TESS_Simulated_Data.ipynb | 0.403097 | 0.647534 | Demo_TESS_Simulated_Data.ipynb | pypi |
The MIT License (MIT)<br>
Copyright (c) 2017 Massachusetts Institute of Technology<br>
Author: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
plt.rcParams['figure.figsize'] = (14,14)
```
Temperature data from Mahali 2015 Alaska Experiment (Temperature sensor inside Mahali box) <br>
MIT led NSF project studying the Earth’s ionosphere with GPS<br>
http://mahali.mit.edu/
```
from skdaccess.framework.param_class import *
from skdaccess.geo.mahali.temperature import DataFetcher as MHDF
station_list = [ 'mh02', 'mh03', 'mh04', 'mh05', 'mh06', 'mh07', 'mh08', 'mh09', 'mh13']
mhdf = MHDF([AutoList(station_list)])
dw = mhdf.output()
label = 'mh06'
data = dw.get()['mh06']
plt.plot(data['Temperature'],'o',markersize=1);
plt.title(label,fontsize=14);
plt.ylabel('Temperature + (C)', fontsize=14);
plt.xticks(rotation=15);
plt.gcf().set_size_inches(14,4)
def plotAllStations(start_date, end_date):
plt.gcf().set_size_inches(12,9)
for index, (label, data) in enumerate(dw.getIterator()):
plt.subplot(3,3, index+1)
plt.title(label);
plt.ylabel('Temperature (C)');
plt.xticks(rotation=25);
plt.plot(data['Temperature'],'o',markersize=1);
plt.xlim(start_date, end_date)
plt.tight_layout()
plotAllStations(pd.to_datetime('2015-09-28'),pd.to_datetime('2015-11-13'))
```
| /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_Mahali_Temperature.ipynb | 0.518546 | 0.658143 | Demo_Mahali_Temperature.ipynb | pypi |
The MIT License (MIT)<br>
Copyright (c) 2016, 2017, 2018 Massachusetts Institute of Technology<br>
Authors: Justin Li, Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi']=150
# Gravity Recovery and Climate Experiment (GRACE) Data
# Source: http://grace.jpl.nasa.gov/
# Current surface mass change data, measuring equivalent water thickness in cm, versus time
# This data fetcher uses results from the Mascon solutions
from skdaccess.geo.grace.mascon.cache import DataFetcher as GR_DF
from skdaccess.framework.param_class import *
geo_point = AutoList([(38, -117)]) # location in Nevada
grace_fetcher = GR_DF([geo_point],start_date='2010-01-01',end_date='2014-01-01')
grace_data_wrapper = grace_fetcher.output() # Get a data wrapper
grace_label, grace_data = next(grace_data_wrapper.getIterator())# Get GRACE data
grace_data.head()
```
Get scale factor
```
scale_factor = grace_data_wrapper.info(grace_label)['scale_factor']
```
Plot EWD $\times$ scale factor
```
plt.plot(grace_data['EWD']*scale_factor);
plt.xticks(rotation=35);
```
| /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_GRACE_Mascon.ipynb | 0.707506 | 0.567607 | Demo_GRACE_Mascon.ipynb | pypi |