code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
from abc import abstractmethod
from math import pi
from .base import dot, transpose, safe_log, safe_exp
from .utils import check_array, check_types, check_version
__all__ = ["GaussianNBPure", "MultinomialNBPure", "ComplementNBPure"]
class _BaseNBPure:
"""Base class for naive Bayes classifiers"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
"""
X = check_array(X, handle_sparse="error")
jll = self._joint_log_likelihood(X)
indices = map(lambda a: a.index(max(a)), jll)
return [self.classes_[i] for i in indices]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
"""
X = check_array(X, handle_sparse="error")
jll = self._joint_log_likelihood(X)
log_prob_x = list(map(lambda a: safe_log(sum(map(safe_exp, a))), jll))
return [
list(map(lambda a: a - log_prob_x[index], jll[index]))
for index in range(len(jll))
]
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
"""
return [list(map(safe_exp, a)) for a in self.predict_log_proba(X)]
class GaussianNBPure(_BaseNBPure):
"""
Pure python implementation of `GaussianNB`.
Args:
estimator (sklearn estimator): fitted `GaussianNB` object
"""
def __init__(self, estimator):
check_version(estimator)
self.class_prior_ = estimator.class_prior_.tolist()
self.classes_ = estimator.classes_.tolist()
self.var_ = estimator.var_.tolist()
self.theta_ = estimator.theta_.tolist()
check_types(self)
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
joint_log_likelihood = []
for i in range(len(self.classes_)):
jointi = safe_log(self.class_prior_[i])
n_ij = -0.5 * sum(list(map(lambda x: safe_log(2.0 * pi * x), self.var_[i])))
jll = [
list(
map(
lambda b: ((a[b] - self.theta_[i][b]) ** 2) / self.var_[i][b],
range(len(a)),
)
)
for a in X
]
jll = list(map(lambda a: 0.5 * sum(a), jll))
jll = [(n_ij - a) + jointi for a in jll]
joint_log_likelihood.append(jll)
return transpose(joint_log_likelihood)
class MultinomialNBPure(_BaseNBPure):
"""
Pure python implementation of `MultinomialNB`.
Args:
estimator (sklearn estimator): fitted `MultinomialNB` object
"""
def __init__(self, estimator):
check_version(estimator)
self.class_log_prior_ = estimator.class_log_prior_.tolist()
self.classes_ = estimator.classes_.tolist()
self.feature_log_prob_ = estimator.feature_log_prob_.tolist()
check_types(self)
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
return [self._jll(a) for a in X]
def _jll(self, x):
"""Calculate the joint log likelihood for one sample"""
dot_prod = dot(x, self.feature_log_prob_)
return [
(dot_prod[index] + self.class_log_prior_[index])
for index in range(len(self.classes_))
]
class ComplementNBPure(_BaseNBPure):
"""
Pure python implementation of `ComplementNB`.
Args:
estimator (sklearn estimator): fitted `ComplementNB` object
"""
def __init__(self, estimator):
check_version(estimator)
self.class_log_prior_ = estimator.class_log_prior_.tolist()
self.classes_ = estimator.classes_.tolist()
self.feature_log_prob_ = estimator.feature_log_prob_.tolist()
check_types(self)
def _joint_log_likelihood(self, X):
"""Calculate the class scores for the samples in X"""
jll = [dot(x, self.feature_log_prob_) for x in X]
if len(self.classes_) == 1:
jll = [[x[0] + self.class_log_prior_[0]] for x in jll]
return jll | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/naive_bayes.py | 0.931952 | 0.583856 | naive_bayes.py | pypi |
from operator import add
from ..utils import check_array, ndim, shape, check_types
from ..base import dot, expit, ravel
class LinearClassifierMixinPure:
"""Mixin for linear classifiers"""
def __init__(self, estimator):
self.coef_ = estimator.coef_.tolist()
self.classes_ = estimator.classes_.tolist()
if hasattr(estimator, "intercept_"):
if isinstance(estimator.intercept_, float):
self.intercept_ = [estimator.intercept_] * len(self.classes_)
else:
self.intercept_ = estimator.intercept_.tolist()
if hasattr(estimator, "multi_class"):
self.multi_class = estimator.multi_class
if hasattr(estimator, "solver"):
self.solver = estimator.solver
if hasattr(estimator, "loss"):
self.loss = estimator.loss
check_types(self)
def decision_function(self, X):
"""
Predict confidence scores for samples.
The confidence score for a sample is the signed distance of that
sample to the hyperplane.
"""
X = check_array(X, handle_sparse="allow")
n_features = shape(self.coef_)[1]
if shape(X)[1] != n_features:
raise ValueError(
"X has %d features per sample; expecting %d" % (shape(X)[1], n_features)
)
scores = [
list(map(add, dot(X[i], self.coef_), self.intercept_))
for i in range(len(X))
]
return ravel(scores) if shape(scores)[1] == 1 else scores
def predict(self, X):
"""Predict class labels for samples in X"""
scores = self.decision_function(X)
if len(shape(scores)) == 1:
indices = map(lambda x: int(x > 0), scores)
else:
indices = map(lambda a: a.index(max(a)), scores)
return [self.classes_[i] for i in indices]
def _predict_proba_lr(self, X):
"""
Probability estimation for OvR logistic regression.
Positive class probabilities are computed as
1. / (1. + np.exp(-self.decision_function(X)));
multiclass is handled by normalizing that over all classes.
"""
prob = self.decision_function(X)
if ndim(prob) == 1:
return [[1 - a, a] for a in map(expit, prob)]
else:
prob = [list(map(expit, a)) for a in prob]
return [
list(map(lambda b: (b / sum(a)) if sum(a) != 0 else float("NaN"), a))
for a in prob
] | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/linear_model/_base.py | 0.772702 | 0.315413 | _base.py | pypi |
import re
import unicodedata
from functools import partial
from math import isnan
import warnings
from ._hash import _FeatureHasherPure
from ..map import convert_estimator
from ..preprocessing import normalize_pure
from ..utils import (
convert_type,
sparse_list,
shape,
check_array,
check_types,
check_version,
)
from ..base import safe_log
__all__ = [
"CountVectorizerPure",
"TfidfTransformerPure",
"TfidfVectorizerPure",
"HashingVectorizerPure",
]
def _preprocess(doc, accent_function=None, lower=False):
"""
Chain together an optional series of text preprocessing steps to
apply to a document.
"""
if lower:
doc = doc.lower()
if accent_function is not None:
doc = accent_function(doc)
return doc
def _analyze(
doc,
analyzer=None,
tokenizer=None,
ngrams=None,
preprocessor=None,
decoder=None,
stop_words=None,
):
"""
Chain together an optional series of text processing steps to go from
a single document to ngrams, with or without tokenizing or preprocessing.
"""
if decoder is not None:
doc = decoder(doc)
if analyzer is not None:
doc = analyzer(doc)
else:
if preprocessor is not None:
doc = preprocessor(doc)
if tokenizer is not None:
doc = tokenizer(doc)
if ngrams is not None:
if stop_words is not None:
doc = ngrams(doc, stop_words)
else:
doc = ngrams(doc)
return doc
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart"""
try:
# If `s` is ASCII-compatible, then it does not contain any accented
# characters and we can avoid an expensive list comprehension
s.encode("ASCII", errors="strict")
return s
except UnicodeEncodeError:
normalized = unicodedata.normalize("NFKD", s)
return "".join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing"""
nkfd_form = unicodedata.normalize("NFKD", s)
return nkfd_form.encode("ASCII", "ignore").decode("ASCII")
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
raise ValueError(
"English stopwords not supported. Pass explicitly as a custom stopwords list."
)
elif isinstance(stop, str):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class _VectorizerMixinPure:
"""Provides common code for text vectorizers (tokenization logic)"""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols"""
if self.input == "filename":
with open(doc, "rb") as fh:
doc = fh.read()
elif self.input == "file":
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if not isinstance(doc, str) and isnan(doc):
raise ValueError(
"np.nan is an invalid document, expected byte or unicode string."
)
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
if min_n == 1:
# no need to do any slicing for unigrams
# just iterate through the original tokens
tokens = list(original_tokens)
min_n += 1
else:
tokens = []
n_original_tokens = len(original_tokens)
# bind method outside of loop to reduce overhead
tokens_append = tokens.append
space_join = " ".join
for n in range(min_n, min(max_n + 1, n_original_tokens + 1)):
for i in range(n_original_tokens - n + 1):
tokens_append(space_join(original_tokens[i : i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
min_n, max_n = self.ngram_range
if min_n == 1:
# no need to do any slicing for unigrams
# iterate through the string
ngrams = list(text_document)
min_n += 1
else:
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for n in range(min_n, min(max_n + 1, text_len + 1)):
for i in range(text_len - n + 1):
ngrams_append(text_document[i : i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for w in text_document.split():
w = " " + w + " "
w_len = len(w)
for n in range(min_n, max_n + 1):
offset = 0
ngrams_append(w[offset : offset + n])
while offset + n < w_len:
offset += 1
ngrams_append(w[offset : offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# accent stripping
if not self.strip_accents:
strip_accents = None
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == "ascii":
strip_accents = strip_accents_ascii
elif self.strip_accents == "unicode":
strip_accents = strip_accents_unicode
else:
raise ValueError(
'Invalid value for "strip_accents": %s' % self.strip_accents
)
return partial(_preprocess, accent_function=strip_accents, lower=self.lowercase)
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return token_pattern.findall
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def _check_stop_words_consistency(self, stop_words, preprocess, tokenize):
"""Check if stop words are consistent"""
if id(self.stop_words) == getattr(self, "_stop_words_id", None):
# Stop words are were previously validated
return None
# NB: stop_words is validated, unlike self.stop_words
try:
inconsistent = set()
for w in stop_words or ():
tokens = list(tokenize(preprocess(w)))
for token in tokens:
if token not in stop_words:
inconsistent.add(token)
self._stop_words_id = id(self.stop_words)
if inconsistent:
warnings.warn(
"Your stop_words may be inconsistent with "
"your preprocessing. Tokenizing the stop "
"words generated tokens %r not in "
"stop_words." % sorted(inconsistent)
)
return not inconsistent
except Exception:
# Failed to check stop words consistency (e.g. because a custom
# preprocessor or tokenizer was used)
self._stop_words_id = id(self.stop_words)
return "error"
def build_analyzer(self):
"""
Return a callable that handles preprocessing, tokenization
and n-grams generation.
"""
if callable(self.analyzer):
if self.input in ["file", "filename"]:
self._validate_custom_analyzer()
return partial(_analyze, analyzer=self.analyzer, decoder=self.decode)
preprocess = self.build_preprocessor()
if self.analyzer == "char":
return partial(
_analyze,
ngrams=self._char_ngrams,
preprocessor=preprocess,
decoder=self.decode,
)
elif self.analyzer == "char_wb":
return partial(
_analyze,
ngrams=self._char_wb_ngrams,
preprocessor=preprocess,
decoder=self.decode,
)
elif self.analyzer == "word":
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
self._check_stop_words_consistency(stop_words, preprocess, tokenize)
return partial(
_analyze,
ngrams=self._word_ngrams,
tokenizer=tokenize,
preprocessor=preprocess,
decoder=self.decode,
stop_words=stop_words,
)
else:
raise ValueError(
"%s is not a valid tokenization scheme/analyzer" % self.analyzer
)
class CountVectorizerPure(_VectorizerMixinPure):
"""
Pure python implementation of `CountVectorizer`.
Args:
estimator (sklearn estimator): fitted `CountVectorizer` object
"""
def __init__(self, estimator):
check_version(estimator)
self.dtype = convert_type(estimator.dtype)
self.binary = estimator.binary
self.vocabulary_ = {k: int(v) for k, v in estimator.vocabulary_.items()}
self.analyzer = estimator.analyzer
self.preprocessor = estimator.preprocessor
self.tokenizer = estimator.tokenizer
self.stop_words = estimator.stop_words
self.token_pattern = estimator.token_pattern
self.ngram_range = estimator.ngram_range
self.strip_accents = estimator.strip_accents
self.decode_error = estimator.decode_error
self.encoding = estimator.encoding
self.lowercase = estimator.lowercase
self.input = estimator.input
check_types(self)
def _count_vocab(self, raw_documents):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False"""
vocabulary = self.vocabulary_
analyze = self.build_analyzer()
data = []
for doc in raw_documents:
feature_counter = {}
for feature in analyze(doc):
try:
feature_idx = vocabulary[feature]
if feature_idx not in feature_counter:
feature_counter[feature_idx] = 1
else:
feature_counter[feature_idx] += 1
except KeyError:
continue
data.append(feature_counter)
X = sparse_list(data, size=len(vocabulary), dtype=self.dtype)
return vocabulary, X
def transform(self, raw_documents):
"""Transform documents to document-term matrix"""
if isinstance(raw_documents, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
_, X = self._count_vocab(raw_documents)
if self.binary:
X = [dict.fromkeys(x, 1) for x in X]
return X
class TfidfVectorizerPure(CountVectorizerPure):
"""
Pure python implementation of `TfidfVectorizer`.
Args:
estimator (sklearn estimator): fitted `TfidfVectorizer` object
"""
def __init__(self, estimator):
check_version(estimator)
self._tfidf = convert_estimator(estimator._tfidf)
super().__init__(estimator)
def transform(self, raw_documents):
"""Transform documents to document-term matrix."""
X = super().transform(raw_documents)
return self._tfidf.transform(X, copy=False)
class TfidfTransformerPure:
"""
Pure python implementation of `TfidfTransformer`.
Args:
estimator (sklearn estimator): fitted `TfidfTransformer` object
"""
def __init__(self, estimator):
check_version(estimator)
self.norm = estimator.norm
self.use_idf = estimator.use_idf
self.smooth_idf = estimator.smooth_idf
self.sublinear_tf = estimator.sublinear_tf
self.idf_ = estimator.idf_.tolist()
self.expected_n_features_ = estimator._idf_diag.shape[0]
check_types(self)
def transform(self, X, copy=True):
X = check_array(X, handle_sparse="allow")
n_samples, n_features = shape(X)
if self.sublinear_tf:
for index in range(len(X)):
X[index] = safe_log(X[index]) + 1
if self.use_idf:
if n_features != self.expected_n_features_:
raise ValueError(
"Input has n_features=%d while the model"
" has been trained with n_features=%d"
% (n_features, self.expected_n_features_)
)
for index in range(len(X)):
for k, v in X[index].items():
X[index][k] = v * self.idf_[k]
if self.norm:
X = normalize_pure(X, norm=self.norm, copy=False)
return X
class HashingVectorizerPure(_VectorizerMixinPure):
"""
Pure python implementation of `HashingVectorizer`.
Args:
estimator (sklearn estimator): fitted `HashingVectorizer` object
"""
def __init__(self, estimator):
check_version(estimator)
self.dtype = convert_type(estimator.dtype)
self.norm = estimator.norm
self.binary = estimator.binary
self.analyzer = estimator.analyzer
self.preprocessor = estimator.preprocessor
self.tokenizer = estimator.tokenizer
self.stop_words = estimator.stop_words
self.token_pattern = estimator.token_pattern
self.ngram_range = estimator.ngram_range
self.strip_accents = estimator.strip_accents
self.decode_error = estimator.decode_error
self.encoding = estimator.encoding
self.lowercase = estimator.lowercase
self.input = estimator.input
self.n_features = estimator.n_features
self.alternate_sign = estimator.alternate_sign
check_types(self)
def transform(self, X):
"""Transform a sequence of documents to a document-term matrix"""
if isinstance(X, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X = [dict.fromkeys(x, 1) for x in X]
if self.norm is not None:
X = normalize_pure(X, norm=self.norm, copy=False)
return X
def _get_hasher(self):
return _FeatureHasherPure(
n_features=self.n_features,
input_type="string",
dtype=self.dtype,
alternate_sign=self.alternate_sign,
) | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/feature_extraction/text.py | 0.586641 | 0.316316 | text.py | pypi |
import numbers
from ..utils import check_types, sparse_list
MAX_INT = 2147483647
def _xrange(a, b, c):
return range(a, b, c)
def _xencode(x):
if isinstance(x, (bytes, bytearray)):
return x
else:
return x.encode()
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
def _hash(key, seed=0x0):
"""Implements 32bit murmur3 hash"""
key = bytearray(_xencode(key))
def fmix(h):
h ^= h >> 16
h = (h * 0x85EBCA6B) & 0xFFFFFFFF
h ^= h >> 13
h = (h * 0xC2B2AE35) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len(key)
nblocks = int(length / 4)
h1 = seed
c1 = 0xCC9E2D51
c2 = 0x1B873593
# body
for block_start in _xrange(0, nblocks * 4, 4):
# ??? big endian?
k1 = (
key[block_start + 3] << 24
| key[block_start + 2] << 16
| key[block_start + 1] << 8
| key[block_start + 0]
)
k1 = (c1 * k1) & 0xFFFFFFFF
k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF # inlined ROTL32
k1 = (c2 * k1) & 0xFFFFFFFF
h1 ^= k1
h1 = (h1 << 13 | h1 >> 19) & 0xFFFFFFFF # inlined ROTL32
h1 = (h1 * 5 + 0xE6546B64) & 0xFFFFFFFF
# tail
tail_index = nblocks * 4
k1 = 0
tail_size = length & 3
if tail_size >= 3:
k1 ^= key[tail_index + 2] << 16
if tail_size >= 2:
k1 ^= key[tail_index + 1] << 8
if tail_size >= 1:
k1 ^= key[tail_index + 0]
if tail_size > 0:
k1 = (k1 * c1) & 0xFFFFFFFF
k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF # inlined ROTL32
k1 = (k1 * c2) & 0xFFFFFFFF
h1 ^= k1
# finalization
unsigned_val = fmix(h1 ^ length)
if unsigned_val & 0x80000000 == 0:
return unsigned_val
else:
return -((unsigned_val ^ 0xFFFFFFFF) + 1)
def _hashing_transform(raw_X, n_features, dtype, alternate_sign=1, seed=0):
"""Guts of FeatureHasher.transform"""
assert n_features > 0
X = []
for x in raw_X:
row = {}
for f, v in x:
if isinstance(v, str):
f = "%s%s%s" % (f, "=", v)
value = 1
else:
value = v
if value == 0:
continue
h = _hash(f, seed)
index = abs(h) % n_features
if alternate_sign:
value *= (h >= 0) * 2 - 1
row[index] = value
X.append(row)
return sparse_list(X, size=n_features, dtype=dtype)
class _FeatureHasherPure:
"""Pure python implementation of `FeatureHasher`"""
def __init__(
self, n_features=(2**20), input_type="dict", dtype=float, alternate_sign=True
):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.alternate_sign = alternate_sign
check_types(self)
@staticmethod
def _validate_params(n_features, input_type):
if not isinstance(n_features, numbers.Integral):
raise TypeError(
"n_features must be integral, got %r (%s)."
% (n_features, type(n_features))
)
elif n_features < 1 or n_features >= MAX_INT + 1:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError(
"input_type must be 'dict', 'pair' or 'string', got %r." % input_type
)
def transform(self, raw_X):
"""Transform a sequence of instances to a `sparse_list`"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
return _hashing_transform(
raw_X, self.n_features, self.dtype, self.alternate_sign, seed=0
) | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/feature_extraction/_hash.py | 0.605566 | 0.343645 | _hash.py | pypi |
from ._label import _encode, _encode_check_unknown
from ..base import accumu, apply_2d
from ..utils import (
check_types,
check_array,
shape,
sparse_list,
convert_type,
check_version,
)
class _BaseEncoderPure:
"""
Base class for encoders that includes the code to categorize and
transform the input features.
"""
def __init__(self, estimator):
check_version(estimator)
self.dtype = convert_type(estimator.dtype)
self.categories_ = [a.tolist() for a in estimator.categories_]
if hasattr(estimator, "sparse"):
self.sparse = estimator.sparse
if hasattr(estimator, "drop") and (estimator.drop is not None):
raise ValueError("Encoder does not handle 'drop' functionality")
if hasattr(estimator, "handle_unknown"):
self.handle_unknown = estimator.handle_unknown
check_types(self)
def _check_X(self, X):
"""Perform custom check_array"""
X = check_array(X)
n_samples, n_features = shape(X)
X_columns = []
for i in range(n_features):
Xi = self._get_feature(X, feature_idx=i)
X_columns.append(Xi)
return X_columns, n_samples, n_features
def _get_feature(self, X, feature_idx):
return [x[feature_idx] for x in X]
def _transform(self, X, handle_unknown="error"):
X_list, n_samples, n_features = self._check_X(X)
X_int = [[0] * n_features] * n_samples
X_mask = [[True] * n_features] * n_samples
if n_features != len(self.categories_):
raise ValueError(
"The number of features in X is different to the number of "
"features of the fitted data. The fitted data had {} features "
"and the X has {} features.".format(
len(
self.categories_,
),
n_features,
)
)
for i in range(n_features):
Xi = X_list[i]
diff, valid_mask = _encode_check_unknown(
Xi, self.categories_[i], return_mask=True
)
if not (sum(valid_mask) == len(valid_mask)):
if handle_unknown == "error":
msg = (
"Found unknown categories {0} in column {1}"
" during transform".format(diff, i)
)
raise ValueError(msg)
else:
X_mask = [
[
valid_mask[j] if idx == i else X_mask[j][idx]
for idx in range(n_features)
]
for j in range(n_samples)
]
Xi = [
Xi[idx] if valid_mask[idx] else self.categories_[i][0]
for idx in range(len(Xi))
]
_, encoded = _encode(
Xi, self.categories_[i], encode=True, check_unknown=False
)
X_int = [
[encoded[j] if idx == i else X_int[j][idx] for idx in range(n_features)]
for j in range(n_samples)
]
return X_int, X_mask
class OrdinalEncoderPure(_BaseEncoderPure):
"""
Pure python implementation of `OrdinalEncoder`.
Args:
estimator (sklearn estimator): fitted `OrdinalEncoder` object
"""
def transform(self, X):
"""Transform X to ordinal codes"""
X_int, _ = self._transform(X)
return apply_2d(X_int, self.dtype)
class OneHotEncoderPure(_BaseEncoderPure):
"""
Pure python implementation of `OneHotEncoder`.
Args:
estimator (sklearn estimator): fitted `OneHotEncoder` object
"""
def transform(self, X):
"""Transform X using one-hot encoding"""
X_int, X_mask = self._transform(X, handle_unknown=self.handle_unknown)
n_samples, n_features = shape(X_int)
n_values = [0] + [len(cats) for cats in self.categories_]
feature_indices = list(accumu(n_values))
data = [
dict(
[
(n_values[i] + X_int[j][i], self.dtype(1))
for i in range(n_features)
if X_mask[j][i]
]
)
for j in range(n_samples)
]
out = sparse_list(data, size=feature_indices[-1], dtype=self.dtype)
if not self.sparse:
return out.todense()
else:
return out | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/preprocessing/_encoders.py | 0.827793 | 0.416915 | _encoders.py | pypi |
from math import sqrt
from copy import copy as cp
from ..utils import sparse_list, issparse, check_array, check_types, check_version
from ..base import transpose, apply_2d, apply_axis_2d, matmult_same_dim
def _handle_zeros_in_scale(scale, copy=True):
"""Makes sure that whenever scale is zero, we handle it correctly"""
if isinstance(scale, (int, float)):
if scale == 0.0:
scale = 1.0
return scale
elif isinstance(scale, list):
if copy:
scale = cp(scale)
return [(1.0 if scale[i] == 0.0 else scale[i]) for i in range(len(scale))]
def _row_norms(X):
"""Row-wise (squared) Euclidean norm of X"""
X_X = matmult_same_dim(X, X)
if issparse(X):
norms = [sum(x.values()) for x in X_X]
else:
norms = apply_axis_2d(X_X, sum, axis=1)
return list(map(sqrt, norms))
def normalize_pure(X, norm="l2", axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm"""
# check input compatibility
if (axis == 0) and issparse(X):
raise ValueError("Axis 0 is not supported for sparse data")
if norm not in ("l1", "l2", "max"):
raise ValueError("'%s' is not a supported norm" % norm)
if axis not in [0, 1]:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, handle_sparse="allow")
if axis == 0:
X = transpose(X)
if issparse(X):
if return_norm and norm in ("l1", "l2"):
raise NotImplementedError(
"return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'"
)
if norm == "l1":
norms = [sum(map(abs, x.values())) for x in X]
elif norm == "l2":
norms = _row_norms(X)
elif norm == "max":
norms = [max(list(x.values()) + [0]) for x in X]
norms = _handle_zeros_in_scale(norms, copy=False)
X_sparse = [
{k: (v / float(norms[index])) for k, v in X[index].items()}
for index in range(len(X))
]
X = sparse_list(X_sparse, X.size, X.dtype)
else:
if norm == "l1":
norms = apply_axis_2d(apply_2d(X, abs), sum, axis=1)
elif norm == "l2":
norms = _row_norms(X)
elif norm == "max":
norms = apply_axis_2d(X, max, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X = [
list(map(lambda a: a / float(norms[index]), X[index]))
for index in range(len(X))
]
if axis == 0:
X = transpose(X)
if return_norm:
return X, norms
else:
return X
class NormalizerPure:
"""
Pure python implementation of `Normalizer`.
Args:
estimator (sklearn estimator): fitted `Normalizer` object
"""
def __init__(self, estimator):
check_version(estimator)
self.norm = estimator.norm
self.copy = estimator.copy
check_types(self)
def transform(self, X, copy=None):
"""Scale each non zero row of X to unit norm."""
copy = copy if copy is not None else self.copy
X = check_array(X, handle_sparse="allow")
return normalize_pure(X, norm=self.norm, axis=1, copy=copy)
class StandardScalerPure:
"""
Pure python implementation of `StandardScaler`.
Args:
estimator (sklearn estimator): fitted `StandardScaler` object
"""
def __init__(self, estimator):
check_version(estimator)
self.with_mean = estimator.with_mean
self.with_std = estimator.with_std
if estimator.scale_ is None:
self.scale_ = None
else:
self.scale_ = estimator.scale_.tolist()
if estimator.mean_ is None:
self.mean_ = None
else:
self.mean_ = estimator.mean_.tolist()
check_types(self)
def transform(self, X, copy=None):
"""Perform standardization by centering and scaling"""
X = check_array(X, handle_sparse="allow")
if issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives."
)
if self.scale_ is not None:
X_ = [{k: (v / self.scale_[k]) for k, v in x.items()} for x in X]
X = sparse_list(X_, size=X.size, dtype=X.dtype)
else:
if self.with_mean:
X = [[x[i] - self.mean_[i] for i in range(len(self.mean_))] for x in X]
if self.with_std:
X = [
[x[i] / self.scale_[i] for i in range(len(self.scale_))] for x in X
]
return X
class MinMaxScalerPure:
"""
Pure python implementation of `MinMaxScaler`.
Args:
estimator (sklearn estimator): fitted `MinMaxScaler` object
"""
def __init__(self, estimator):
check_version(estimator)
self.feature_range = estimator.feature_range
if estimator.scale_ is None:
self.scale_ = None
else:
self.scale_ = estimator.scale_.tolist()
if estimator.min_ is None:
self.min_ = None
else:
self.min_ = estimator.min_.tolist()
check_types(self)
def transform(self, X):
"""Scale features of X according to feature_range"""
if issparse(X):
raise TypeError(
"MinMaxScalerPure does not support sparse input. "
"Consider using MaxAbsScalerPure instead."
)
X = check_array(X)
return [
[(x[i] * self.scale_[i]) + self.min_[i] for i in range(len(self.scale_))]
for x in X
]
class MaxAbsScalerPure:
"""
Pure python implementation of `MaxAbsScaler`.
Args:
estimator (sklearn estimator): fitted `MaxAbsScaler` object
"""
def __init__(self, estimator):
check_version(estimator)
self.copy = estimator.copy
if estimator.scale_ is None:
self.scale_ = None
else:
self.scale_ = estimator.scale_.tolist()
if estimator.max_abs_ is None:
self.max_abs_ = None
else:
self.max_abs_ = estimator.max_abs_.tolist()
check_types(self)
def transform(self, X):
"""Scale the data"""
X = check_array(X, handle_sparse="allow")
if issparse(X):
X_ = [{k: (v / self.scale_[k]) for k, v in x.items()} for x in X]
X = sparse_list(X_, size=X.size, dtype=X.dtype)
else:
X = [[x[i] / self.scale_[i] for i in range(len(self.scale_))] for x in X]
return X | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/preprocessing/_data.py | 0.847747 | 0.490907 | _data.py | pypi |
from ..base import sfmax, expit
from ..tree import DecisionTreeRegressorPure
from ..utils import check_types, check_array
MIN_VERSION = "0.82"
SUPPORTED_OBJ = ["binary:logistic", "multi:softprob"]
SUPPORTED_BOOSTER = ["gbtree"]
class XGBClassifierPure:
"""
Pure python implementation of `XGBClassifier`. Only supports 'gbtree'
booster and 'binary:logistic' or 'multi:softprob' objectives.
Args:
estimator (xgboost estimator): fitted `XGBClassifier` object
"""
def __init__(self, estimator):
if (not isinstance(estimator.objective, str)) or (
estimator.objective not in SUPPORTED_OBJ
):
raise ValueError(
"Objective function not supported; only {} are supported".format(
SUPPORTED_OBJ
)
)
else:
self.objective = estimator.objective
if estimator.booster not in SUPPORTED_BOOSTER:
raise ValueError("Booster: '{}' not supported".format(estimator.booster))
else:
self.booster = estimator.booster
self.classes_ = estimator.classes_.tolist()
self.n_classes_ = estimator.n_classes_
self.n_estimators = estimator.n_estimators
self.estimators_ = self._build_estimators(estimator)
check_types(self)
def _build_estimators(self, estimator):
"""Convert booster to list of pure decision tree regressors"""
if not hasattr(estimator.get_booster(), "trees_to_dataframe"):
raise Exception(
"This xgboost estimator was likely fitted with version < {} "
"which is not supported".format(MIN_VERSION)
)
tree_df = estimator.get_booster().trees_to_dataframe()
estimators_ = []
idx = 0
for est_id in range(self.n_estimators):
if self.n_classes_ == 2:
tree = tree_df[tree_df["Tree"] == idx].to_dict(orient="list")
est_row_ = DecisionTreeRegressorPure(tree)
idx += 1
else:
est_row_ = []
for cls_id in range(self.n_classes_):
tree = tree_df[tree_df["Tree"] == idx].to_dict(orient="list")
est_row_.append(DecisionTreeRegressorPure(tree))
idx += 1
estimators_.append(est_row_)
return estimators_
def _predict(self, X):
"""Raw sums of estimator predictions for each class for multi-class"""
preds = []
for cls_index in range(self.n_classes_):
cls_sum = [0] * len(X)
for est_index in range(self.n_estimators):
est_preds = self.estimators_[est_index][cls_index].predict(X)
cls_sum = list(map(lambda x, y: x + y, cls_sum, est_preds))
preds.append(cls_sum)
return preds
def _predict_binary(self, X):
"""Raw sums of estimator predictions for each class for binary"""
preds = [0] * len(X)
for estimator in self.estimators_:
preds = list(map(lambda x, y: x + y, preds, estimator.predict(X)))
return preds
def predict(self, X):
proba = self.predict_proba(X)
return [self.classes_[a.index(max(a))] for a in proba]
def predict_proba(self, X):
X = check_array(X)
if self.objective == "multi:softprob":
preds = self._predict(X)
out = []
for i in range(len(X)):
out.append(sfmax([preds[j][i] for j in range(self.n_classes_)]))
elif self.objective == "binary:logistic":
preds = self._predict_binary(X)
out = list(map(expit, preds))
out = list(map(lambda x: [1 - x, x], out))
else:
raise ValueError(
"Objective function not supported; only {} are supported".format(
SUPPORTED_OBJ
)
)
return out | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/xgboost/_classes.py | 0.685002 | 0.384392 | _classes.py | pypi |
import warnings
from math import isnan
from ..base import safe_log
from ..utils import check_array, check_types, check_version
class _DecisionTreeBase:
"""Decision tree base class"""
def __init__(self, estimator):
if isinstance(estimator, dict):
# sourced from xgboost booster object tree dictionary
self.threshold_ = list(
map(lambda x: -2 if isnan(x) else x, estimator["Split"])
)
self.value_ = [[a] for a in estimator["Gain"]]
self.children_left_ = list(
map(
lambda x: -1 if not isinstance(x, str) else int(x.split("-")[-1]),
estimator["Yes"],
)
)
self.children_right_ = list(
map(
lambda x: -1 if not isinstance(x, str) else int(x.split("-")[-1]),
estimator["No"],
)
)
self.feature_ = list(
map(
lambda x: -2 if x == "Leaf" else int(x.replace("f", "")[-1]),
estimator["Feature"],
)
)
else:
# sourced from sklearn decision tree
check_version(estimator)
self.children_left_ = estimator.tree_.children_left.tolist()
self.children_right_ = estimator.tree_.children_right.tolist()
self.feature_ = estimator.tree_.feature.tolist()
self.threshold_ = estimator.tree_.threshold.tolist()
self.value_ = [a[0] for a in estimator.tree_.value.tolist()]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if hasattr(estimator, "classes_") and (estimator.classes_ is not None):
self.classes_ = estimator.classes_.tolist()
else:
self.classes_ = [0, 1]
check_types(self)
def _get_leaf_node(self, x):
if isinstance(x, dict):
left_equal = lambda nd: x.get(self.feature_[nd], 0.0)
else:
left_equal = lambda nd: x[self.feature_[nd]]
found_node = False
node_id = 0
while not found_node:
if self.children_left_[node_id] == self.children_right_[node_id]:
found_node = True
else:
if left_equal(node_id) <= self.threshold_[node_id]:
node_id = self.children_left_[node_id]
else:
node_id = self.children_right_[node_id]
return node_id
class DecisionTreeClassifierPure(_DecisionTreeBase):
"""
Pure python implementation of `DecisionTreeClassifier`.
Args:
estimator (sklearn estimator): fitted `DecisionTreeClassifier` object
"""
def _get_pred_from_leaf_node(self, node_id):
return self.value_[node_id].index(max(self.value_[node_id]))
def _get_proba_from_leaf_node(self, node_id):
return [a / sum(self.value_[node_id]) for a in self.value_[node_id]]
def predict(self, X):
X = check_array(X, handle_sparse="allow")
leaves = [self._get_leaf_node(x) for x in X]
preds = [self._get_pred_from_leaf_node(x) for x in leaves]
return [self.classes_[x] for x in preds]
def predict_proba(self, X):
X = check_array(X, handle_sparse="allow")
leaves = [self._get_leaf_node(x) for x in X]
return [self._get_proba_from_leaf_node(x) for x in leaves]
def predict_log_proba(self, X):
return [list(map(safe_log, x)) for x in self.predict_proba(X)]
class DecisionTreeRegressorPure(_DecisionTreeBase):
"""
Pure python implementation of `DecisionTreeRegressor`.
Args:
estimator (sklearn estimator): fitted `DecisionTreeRegressor` object
"""
def _get_pred_from_leaf_node(self, node_id):
return self.value_[node_id][0]
def predict(self, X):
X = check_array(X, handle_sparse="allow")
leaves = [self._get_leaf_node(x) for x in X]
return [self._get_pred_from_leaf_node(x) for x in leaves]
class ExtraTreeClassifierPure(DecisionTreeClassifierPure):
"""
Pure python implementation of `ExtraTreeClassifier`.
Args:
estimator (sklearn estimator): fitted `ExtraTreeClassifier` object
"""
pass
class ExtraTreeRegressorPure(DecisionTreeRegressorPure):
"""
Pure python implementation of `ExtraTreeRegressor`.
Args:
estimator (sklearn estimator): fitted `ExtraTreeRegressor` object
"""
pass | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/tree/_classes.py | 0.795142 | 0.333313 | _classes.py | pypi |
from ..base import transpose, apply_axis_2d, apply_2d, safe_exp, safe_log, ravel, expit
from ..utils import check_types, shape
EPS = 1.1920929e-07
def _clip(a, a_min, a_max):
if a < a_min:
return a_min
elif a > a_max:
return a_max
else:
return a
class _MultinomialDeviancePure:
"""Multinomial deviance loss function for multi-class classification"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError(
"{0:s} requires more than 2 classes.".format(self.__class__.__name__)
)
self.n_classes_ = n_classes
check_types(self)
def _raw_prediction_to_proba(self, raw_predictions):
logsumexp = list(
map(safe_log, apply_axis_2d(apply_2d(raw_predictions, safe_exp), sum))
)
return [
[
safe_exp(raw_predictions[index][i] - logsumexp[index])
for i in range(self.n_classes_)
]
for index in range(len(raw_predictions))
]
def _raw_prediction_to_decision(self, raw_predictions):
proba = self._raw_prediction_to_proba(raw_predictions)
return [a.index(max(a)) for a in proba]
def get_init_raw_predictions(self, X, estimator):
probas = estimator.predict_proba(X)
func = lambda x: safe_log(_clip(x, EPS, 1 - EPS))
return apply_2d(probas, func)
class _BinomialDeviancePure:
"""Binomial deviance loss function for binary classification"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError(
"{0:s} requires 2 classes; got {1:d} class(es)".format(
self.__class__.__name__, n_classes
)
)
check_types(self)
def _raw_prediction_to_proba(self, raw_predictions):
proba = (
ravel(raw_predictions)
if shape(raw_predictions)[1] == 1
else raw_predictions
)
proba_1 = list(map(expit, proba))
proba = [[(1 - x) for x in proba_1], proba_1]
return transpose(proba)
def _raw_prediction_to_decision(self, raw_predictions):
proba = self._raw_prediction_to_proba(raw_predictions)
return [a.index(max(a)) for a in proba]
def get_init_raw_predictions(self, X, estimator):
probas = estimator.predict_proba(X)
func = lambda x: _clip(x, EPS, 1 - EPS)
proba_pos_class = [func(a[1]) for a in probas]
log_func = lambda x: safe_log(x / (1 - x))
return [[log_func(a)] for a in proba_pos_class]
class _ExponentialLossPure:
"""Exponential loss function for binary classification"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError(
"{0:s} requires 2 classes; got {1:d} class(es)".format(
self.__class__.__name__, n_classes
)
)
check_types(self)
def _raw_prediction_to_proba(self, raw_predictions):
proba = (
ravel(raw_predictions)
if shape(raw_predictions)[1] == 1
else raw_predictions
)
func = lambda x: expit(x) * 2.0
proba_1 = list(map(func, proba))
proba = [[(1 - x) for x in proba_1], proba_1]
return transpose(proba)
def _raw_prediction_to_decision(self, raw_predictions):
raw_predictions = (
ravel(raw_predictions)
if shape(raw_predictions)[1] == 1
else raw_predictions
)
return [int(a >= 0) for a in raw_predictions]
def get_init_raw_predictions(self, X, estimator):
probas = estimator.predict_proba(X)
func = lambda x: _clip(x, EPS, 1 - EPS)
proba_pos_class = [func(a[1]) for a in probas]
log_func = lambda x: 0.5 * safe_log(x / (1 - x))
return [[log_func(a)] for a in proba_pos_class] | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/ensemble/_gb_losses.py | 0.618665 | 0.504089 | _gb_losses.py | pypi |
from operator import add
from ._gb_losses import (
_MultinomialDeviancePure,
_BinomialDeviancePure,
_ExponentialLossPure,
)
from ..base import transpose, apply_2d, safe_log, operate_2d
from ..utils import check_version, check_types, check_array, shape
from ..map import convert_estimator
class GradientBoostingClassifierPure:
"""
Pure python implementation of `GradientBoostingClassifier`.
Args:
estimator (sklearn estimator): fitted `GradientBoostingClassifier` object
"""
def __init__(self, estimator):
check_version(estimator, "0.21.0")
self.classes_ = estimator.classes_.tolist()
self.estimators_ = []
for est_arr in estimator.estimators_:
est_arr_ = []
for est in est_arr:
est_ = convert_estimator(est)
est_arr_.append(est_)
self.estimators_.append(est_arr_)
if hasattr(estimator, "init_"):
self.init_ = convert_estimator(estimator.init_)
self.loss = estimator.loss
self.learning_rate = estimator.learning_rate
self.n_features_ = estimator.n_features_in_
if self.loss == "deviance":
self.loss_ = (
_MultinomialDeviancePure(len(self.classes_))
if len(self.classes_) > 2
else _BinomialDeviancePure(len(self.classes_))
)
elif self.loss == "exponential":
self.loss_ = _ExponentialLossPure(len(self.classes_))
else:
raise ValueError("Loss: '{}' not supported.".format(self.loss))
check_types(self)
def _raw_predict_init(self, X):
"""Check input and compute raw predictions of the init estimator"""
X = check_array(X)
if shape(X)[1] != self.n_features_:
raise ValueError(
"X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features_, shape(X)[1]
)
)
if self.init_ == "zero":
raw_predictions = [[0.0] * shape(X)[1]] * shape(X)[0]
else:
raw_predictions = self.loss_.get_init_raw_predictions(X, self.init_)
return raw_predictions
def _raw_predict(self, X):
init_preds = self._raw_predict_init(X)
out = []
for k in range(len(self.estimators_[0])):
column = [0] * (shape(X)[0])
for index in range(len(self.estimators_)):
preds = self.estimators_[index][k].predict(X)
column = [
column[i] + (preds[i] * self.learning_rate)
for i in range(len(preds))
]
out.append(column)
out = transpose(out)
return operate_2d(init_preds, out, add)
def predict_proba(self, X):
raw_predictions = self._raw_predict(X)
return self.loss_._raw_prediction_to_proba(raw_predictions)
def predict_log_proba(self, X):
return apply_2d(self.predict_proba(X), safe_log)
def predict(self, X):
proba = self.predict_proba(X)
return [self.classes_[a.index(max(a))] for a in proba] | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/ensemble/_gb.py | 0.865991 | 0.385519 | _gb.py | pypi |
from math import isnan
from ..utils import shape, check_array, check_types, check_version
from ..base import apply_2d, apply_axis_2d
def _to_impute(val, missing_values):
if isnan(missing_values):
return isnan(val)
else:
return val == missing_values
class MissingIndicatorPure:
"""
Pure python implementation of `MissingIndicator`.
Args:
estimator (sklearn estimator): fitted `MissingIndicator` object
"""
def __init__(self, estimator):
check_version(estimator)
self.features = estimator.features
self.features_ = estimator.features_.tolist()
self._n_features = estimator._n_features
self.missing_values = (
float(estimator.missing_values)
if isinstance(estimator.missing_values, float)
else estimator.missing_values
)
self.error_on_new = estimator.error_on_new
check_types(self)
def transform(self, X):
X = check_array(X)
if shape(X)[1] != self._n_features:
raise ValueError(
"X has a different number of features than during fitting."
)
imputer_mask, features = self._get_missing_features_info(X)
if self.features == "missing-only":
features_diff_fit_trans = set(features) - set(self.features_)
if self.error_on_new and len(features_diff_fit_trans) > 0:
raise ValueError(
"The features {} have missing values "
"in transform but have no missing values "
"in fit.".format(features_diff_fit_trans)
)
if len(self.features_) < self._n_features:
imputer_mask = [
[float(a[i]) for i in range(len(a)) if i in self.features_]
for a in imputer_mask
]
return imputer_mask
def _get_missing_features_info(self, X):
func = lambda x: _to_impute(x, self.missing_values)
imputer_mask = apply_2d(X, func)
if self.features == "missing-only":
n_missing = apply_axis_2d(imputer_mask, sum, axis=0)
if self.features == "all":
features_indices = range(shape(X)[1])
else:
features_indices = [a for a in n_missing if a != 0]
return imputer_mask, features_indices
class SimpleImputerPure:
"""
Pure python implementation of `SimpleImputer`.
Args:
estimator (sklearn estimator): fitted `SimpleImputer` object
"""
def __init__(self, estimator):
check_version(estimator)
self.statistics_ = estimator.statistics_.tolist()
self.strategy = estimator.strategy
if hasattr(estimator, "add_indicator"):
self.add_indicator = estimator.add_indicator
else:
self.add_indicator = False
self.missing_values = (
float(estimator.missing_values)
if isinstance(estimator.missing_values, float)
else estimator.missing_values
)
if hasattr(estimator, "indicator_") and (estimator.indicator_ is not None):
self.indicator_ = MissingIndicatorPure(estimator.indicator_)
self.indicator_.error_on_new = False
check_types(self)
def _concatenate_indicator(self, X_imputed, X_indicator):
"""Concatenate indicator mask with the imputed data"""
if not self.add_indicator:
return X_imputed
if X_indicator is None:
raise ValueError(
"Data from the missing indicator are not provided. Call "
"_fit_indicator and _transform_indicator in the imputer "
"implementation."
)
return [
X_imputed[index] + X_indicator[index] for index in range(len(X_imputed))
]
def _transform_indicator(self, X):
"""
Compute the indicator mask.
Note that X must be the original data as passed to the imputer before
any imputation, since imputation may be done inplace in some cases.
"""
if self.add_indicator:
if not hasattr(self, "indicator_"):
raise ValueError(
"Make sure to call _fit_indicator before _transform_indicator"
)
return self.indicator_.transform(X)
def transform(self, X):
"""Transform inpute X by imputing values"""
X = check_array(X)
X_indicator = self._transform_indicator(X)
if shape(X)[1] != shape(self.statistics_)[0]:
raise ValueError(
"X has %d features per sample, expected %d"
% (shape(X)[1], shape(self.statistics_)[0])
)
# delete the invalid columns if strategy is not constant
if self.strategy == "constant":
valid_statistics = self.statistics_
else:
to_remove = [
index
for index in range(len(self.statistics_))
if isnan(self.statistics_[index])
]
if len(to_remove) > 0:
X = [[a[i] for i in range(len(a)) if i not in to_remove] for a in X]
valid_statistics = [
self.statistics_[i]
for i in range(len(self.statistics_))
if i not in to_remove
]
else:
valid_statistics = self.statistics_
func = (
lambda a, i: a[i]
if not _to_impute(a[i], self.missing_values)
else valid_statistics[i]
)
X_imputed = [[func(a, i) for i in range(len(a))] for a in X]
return self._concatenate_indicator(X_imputed, X_indicator) | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/impute/_base.py | 0.829492 | 0.471223 | _base.py | pypi |
<p>
<img src="https://github.com/monte-flora/scikit-explain/blob/master/images/mintpy_logo.png?raw=true" align="right" width="400" height="400" />
</p>
![Unit Tests](https://github.com/monte-flora/scikit-explain/actions/workflows/continuous_intergration.yml/badge.svg)
[![codecov](https://codecov.io/gh/monte-flora/s/branch/master/graph/badge.svg?token=GG9NRQOZ0N)](https://codecov.io/gh/monte-flora/scikit-explain)
[![Updates](https://pyup.io/repos/github/monte-flora/scikit-explain/shield.svg)](https://pyup.io/repos/github/monte-flora/scikit-explain/)
[![Python 3](https://pyup.io/repos/github/monte-flora/scikit-explain/python-3-shield.svg)](https://pyup.io/repos/github/monte-flora/scikit-explain/)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
![PyPI](https://img.shields.io/pypi/v/scikit-explain)
[![Documentation Status](https://readthedocs.org/projects/scikit-explain/badge/?version=latest)](https://scikit-explain.readthedocs.io/en/latest/?badge=latest)
scikit-explain is a user-friendly Python module for tabular-style machine learning explainability. Current explainability products includes
* Feature importance:
* [Single- and Multi-pass Permutation Importance](https://permutationimportance.readthedocs.io/en/latest/methods.html#permutation-importance) ([Brieman et al. 2001](https://link.springer.com/article/10.1023/A:1010933404324)], [Lakshmanan et al. 2015](https://journals.ametsoc.org/view/journals/atot/32/6/jtech-d-13-00205_1.xml?rskey=hlSyXu&result=2))
* [SHAP](https://christophm.github.io/interpretable-ml-book/shap.html)
* First-order PD/ALE Variance ([Greenwell et al. 2018](https://arxiv.org/abs/1805.04755))
* Grouped permutation importance ([Au et al. 2021](https://arxiv.org/abs/2104.11688))
* Feature Effects/Attributions:
* [Partial Dependence](https://christophm.github.io/interpretable-ml-book/pdp.html) (PD),
* [Accumulated local effects](https://christophm.github.io/interpretable-ml-book/ale.html) (ALE),
* Random forest-based feature contributions ([treeinterpreter](http://blog.datadive.net/interpreting-random-forests/))
* [SHAP](https://christophm.github.io/interpretable-ml-book/shap.html)
* [LIME](https://christophm.github.io/interpretable-ml-book/lime.html#lime)
* Main Effect Complexity (MEC; [Molnar et al. 2019](https://arxiv.org/abs/1904.03867))
* Feature Interactions:
* Second-order PD/ALE
* Interaction Strength and Main Effect Complexity (IAS; [Molnar et al. 2019](https://arxiv.org/abs/1904.03867))
* Second-order PD/ALE Variance ([Greenwell et al. 2018](https://arxiv.org/abs/1805.04755))
* Second-order Permutation Importance ([Oh et al. 2019](https://www.mdpi.com/2076-3417/9/23/5191))
* Friedman H-statistic ([Friedman and Popescu 2008](https://projecteuclid.org/journals/annals-of-applied-statistics/volume-2/issue-3/Predictive-learning-via-rule-ensembles/10.1214/07-AOAS148.full))
These explainability methods are discussed at length in Christoph Molnar's [Interpretable Machine Learning](https://christophm.github.io/interpretable-ml-book/). The primary feature of this package is the accompanying built-in plotting methods, which are desgined to be easy to use while producing publication-level quality figures. The computations do leverage parallelization when possible. Documentation for scikit-explain can be found at [Read the Docs](https://scikit-explain.readthedocs.io/en/latest/index.html#).
The package is under active development and will likely contain bugs or errors. Feel free to raise issues!
This package is largely original code, but also includes snippets or chunks of code from preexisting packages. Our goal is not take credit from other code authors, but to make a single source for computing several machine learning explanation methods. Here is a list of packages used in scikit-explain:
[**PyALE**](https://github.com/DanaJomar/PyALE),
[**PermutationImportance**](https://github.com/gelijergensen/PermutationImportance),
[**ALEPython**](https://github.com/blent-ai/ALEPython),
[**SHAP**](https://github.com/slundberg/shap/),
[**scikit-learn**](https://github.com/scikit-learn/scikit-learn),
[**LIME**](https://github.com/marcotcr/lime),
[**Faster-LIME**](https://github.com/seansaito/Faster-LIME),
[**treeinterpreter**](https://github.com/andosa/treeinterpreter)
If you employ scikit-explain in your research, please cite this github and the relevant packages listed above.
If you are experiencing issues with loading the tutorial jupyter notebooks, you can enter the URL/location of the notebooks into the following address: https://nbviewer.jupyter.org/.
## Install
scikit-explain can be installed through conda-forge or pip.
```
conda install -c conda-forge scikit-explain
pip install scikit-explain
```
## Dependencies
scikit-explain is compatible with Python 3.8 or newer. scikit-explain requires the following packages:
```
numpy
scipy
pandas
scikit-learn
matplotlib
shap>=0.30.0
xarray>=0.16.0
tqdm
statsmodels
seaborn>=0.11.0
```
Scikit-explain has built-in saving and loading function for pandas dataframes and xarray datasets. Datasets are saved in netCDF4 format. To use this feature, install netCDF4 with one of the following: `pip install netCDF4` or `conda install -c conda-forge netCDF4`
### Initializing scikit-explain
The interface of scikit-explain is ```ExplainToolkit```, which houses all of the explainability methods and their corresponding plotting methods. See the tutorial notebooks for examples.
```python
import skexplain
# Loads three ML models (random forest, gradient-boosted tree, and logistic regression)
# trained on a subset of the road surface temperature data from Handler et al. (2020).
estimators = skexplain.load_models()
X,y = skexplain.load_data()
explainer = skexplain.ExplainToolkit(estimators=estimators,X=X,y=y,)
```
## Permutation Importance
scikit-explain includes both single-pass and multiple-pass permutation importance method ([Brieman et al. 2001](https://link.springer.com/article/10.1023/A:1010933404324)], [Lakshmanan et al. 2015](https://journals.ametsoc.org/view/journals/atot/32/6/jtech-d-13-00205_1.xml?rskey=hlSyXu&result=2), [McGovern et al. 2019](https://journals.ametsoc.org/view/journals/bams/100/11/bams-d-18-0195.1.xml?rskey=TvAHl8&result=20)). The permutation direction can also be given (i.e., backward or forward). Users can also specify feature groups and compute the grouped permutation feature importance ([Au et al. 2021](https://arxiv.org/abs/2104.11688)). Scikit-explain has a function that allows for any feature ranking to be converted into a format for using the plotting package (skexplain.common.importance_utils.to_skexplain_importance). In the [tutorial](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/permutation_importance_tutorial.ipynb), users have flexibility for making publication-quality figures.
```python
perm_results = explainer.permutation_importance(n_vars=10, evaluation_fn='auc')
explainer.plot_importance(data=perm_results)
```
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/multi_pass_perm_imp.png?raw=true" />
</p>
Sample notebook can be found here: [**Permutation Importance**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/permutation_importance_tutorial.ipynb)
## Partial dependence and Accumulated Local Effects
To compute the expected functional relationship between a feature and an ML model's prediction, scikit-explain has partial dependence, accumulated local effects, or SHAP dependence. There is also an option for second-order interaction effects. For the choice of feature, you can manually select or can run the permutation importance and a built-in method will retrieve those features. It is also possible to configure the plot for readable feature names.
```python
# Assumes the .permutation_importance has already been run.
important_vars = explainer.get_important_vars(results, multipass=True, nvars=7)
ale = explainer.ale(features=important_vars, n_bins=20)
explainer.plot_ale(ale)
```
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/ale_1d.png?raw=true" />
</p>
Additionally, you can use the same code snippet to compute the second-order ALE (see the notebook for more details).
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/ale_2d.png?raw=true" />
</p>
Sample notebook can be found here:
- [**Accumulated Local effects**](https://github.com/monte-flora/skexplain/blob/master/tutorial_notebooks/accumulated_local_effect_tutorial.ipynb)
- [**Partial Dependence**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/partial_dependence_tutorial.ipynb)
## Feature Attributions (Local Explainability)
To explain individual examples (or set of examples), scikit-explain has model-agnostic methods like SHAP and LIME and model-specific methods like tree interpreter (for decision tree-based model from scikit-learn). For SHAP, scikit-explain uses the shap.Explainer method, which automatically determines the most appropriate Shapley value algorithm ([see their docs](https://shap.readthedocs.io/en/latest/generated/shap.Explainer.html)). For LIME, scikit-explain uses the code from the Faster-LIME method. scikit-explain can create the summary and dependence plots from the shap python package, but is adapted for multiple features and an easier user interface. It is also possible to plot attributions for a single example or summarized by model performance.
```python
import shap
single_example = examples.iloc[[0]]
explainer = skexplain.ExplainToolkit(estimators=estimators[0], X=single_example,)
# For the LIME, we must provide the training dataset. We also denote any categorical features.
lime_kws = {'training_data' : X.values, 'categorical_names' : ['rural', 'urban']}
# The masker handles the missing features. In this case, we are using correlations
# in the dataset to determine the feature groupings. These groups of features are remove or added into
# sets together.
shap_kws={'masker' : shap.maskers.Partition(X, max_samples=100, clustering="correlation"),
'algorithm' : 'permutation'}
# method can be a single str or list of strs.
attr_results = explainer.local_attributions(method=['shap', 'lime', 'tree_interpreter'], shap_kws=shap_kws, lime_kws=lime_kws)
fig = explainer.plot_contributions(results)
```
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/feature_contribution_single.png?raw=true" />
</p>
```python
explainer = skexplain.ExplainToolkit(estimators=estimators[0],X=X, y=y)
# average_attributions is used to average feature attributions and their feature values either using a simple mean or the mean based on model performance.
avg_attr_results = explainer.average_attributions(method='shap', shap_kwargs=shap_kwargs, performance_based=True,)
fig = myInterpreter.plot_contributions(avg_attr_results)
```
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/feature_contributions_perform.png?raw=true" />
</p>
```python
explainer = skexplain.ExplainToolkit(estimators=estimators[0],X=X, y=y)
attr_results = explainer.local_attributions(method='lime', lime_kws=lime_kws)
explainer.scatter_plot(plot_type = 'summary', dataset=attr_results)
```
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/shap_dependence.png?raw=true" />
</p>
```python
from skexplain.common import plotting_config
features = ['tmp2m_hrs_bl_frez', 'sat_irbt', 'sfcT_hrs_ab_frez', 'tmp2m_hrs_ab_frez', 'd_rad_d']
explainer.scatter_plot(features=features,
plot_type = 'dependence',
dataset=dataset,
display_feature_names=plotting_config.display_feature_names,
display_units = plotting_config.display_units,
to_probability=True)
```
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/shap_summary.png?raw=true" />
</p>
Sample notebook can be found here:
- [**Feature Contributions**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/feature_contributions.ipynb)
- [**Additional Feature Attributions Plots**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/additional_feature_attribution_plots.ipynb)
## Tutorial notebooks
The notebooks provides the package documentation and demonstrate scikit-explain API, which was used to create the above figures. If you are experiencing issues with loading the jupyter notebooks, you can enter the URL/location of the notebooks into the following address: https://nbviewer.jupyter.org/.
- [**Permutation Importance**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/permutation_importance_tutorial.ipynb)
- [**Accumulated Local effects**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/accumulated_local_effect_tutorial.ipynb)
- [**Partial Dependence**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/partial_dependence_tutorial.ipynb)
- [**Feature Contributions**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/feature_contributions.ipynb)
- [**Additional Feature Attributions Plots**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/additional_feature_attribution_plots.ipynb)
| /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/README.md | 0.838845 | 0.944331 | README.md | pypi |
import numpy as np
import sklearn
from multiprocessing.pool import Pool
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier, _tree
from distutils.version import LooseVersion
from tqdm import tqdm
if LooseVersion(sklearn.__version__) < LooseVersion("0.17"):
raise Exception("treeinterpreter requires scikit-learn 0.17 or later")
class TreeInterpreter:
def __init__(self, model, examples, joint_contribution=False, n_jobs=1):
"""
Parameters
----------
model : DecisionTreeRegressor, DecisionTreeClassifier,
ExtraTreeRegressor, ExtraTreeClassifier,
RandomForestRegressor, RandomForestClassifier,
ExtraTreesRegressor, ExtraTreesClassifier
Scikit-learn model on which the prediction should be decomposed.
X : array-like, shape = (n_samples, n_features)
Test samples.
joint_contribution : boolean
Specifies if contributions are given individually from each feature,
or jointly over them
"""
self._model = model
self._examples = examples
self._joint_contribution = joint_contribution
self._n_jobs = n_jobs
def _get_tree_paths(self, tree, node_id, depth=0):
"""
Returns all paths through the tree as list of node_ids
"""
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
if left_child != _tree.TREE_LEAF:
left_paths = self._get_tree_paths(tree, left_child, depth=depth + 1)
right_paths = self._get_tree_paths(tree, right_child, depth=depth + 1)
for path in left_paths:
path.append(node_id)
for path in right_paths:
path.append(node_id)
paths = left_paths + right_paths
else:
paths = [[node_id]]
return paths
def predict_tree(self, tree):
"""
For a given DecisionTreeRegressor, DecisionTreeClassifier,
ExtraTreeRegressor, or ExtraTreeClassifier,
returns a triple of [prediction, bias and feature_contributions], such
that prediction ≈ bias + feature_contributions.
"""
leaves = tree.apply(self._examples)
paths = self._get_tree_paths(tree.tree_, 0)
for path in paths:
path.reverse()
leaf_to_path = {}
# map leaves to paths
for path in paths:
leaf_to_path[path[-1]] = path
# remove the single-dimensional inner arrays
values = tree.tree_.value.squeeze(axis=1)
# reshape if squeezed into a single float
if len(values.shape) == 0:
values = np.array([values])
if isinstance(tree, DecisionTreeRegressor):
biases = np.full(self._examples.shape[0], values[paths[0][0]])
line_shape = self._examples.shape[1]
elif isinstance(tree, DecisionTreeClassifier):
# scikit stores category counts, we turn them into probabilities
normalizer = values.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
values /= normalizer
biases = np.tile(values[paths[0][0]], (self._examples.shape[0], 1))
line_shape = (self._examples.shape[1], tree.n_classes_)
direct_prediction = values[leaves]
# make into python list, accessing values will be faster
values_list = list(values)
feature_index = list(tree.tree_.feature)
contributions = []
if self._joint_contribution:
for row, leaf in enumerate(leaves):
path = leaf_to_path[leaf]
path_features = set()
contributions.append({})
for i in range(len(path) - 1):
path_features.add(feature_index[path[i]])
contrib = values_list[path[i + 1]] - values_list[path[i]]
# path_features.sort()
contributions[row][tuple(sorted(path_features))] = (
contributions[row].get(tuple(sorted(path_features)), 0)
+ contrib
)
return direct_prediction, biases, contributions
else:
unique_leaves = np.unique(leaves)
unique_contributions = {}
for row, leaf in enumerate(unique_leaves):
for path in paths:
if leaf == path[-1]:
break
contribs = np.zeros(line_shape)
for i in range(len(path) - 1):
contrib = values_list[path[i + 1]] - values_list[path[i]]
contribs[feature_index[path[i]]] += contrib
unique_contributions[leaf] = contribs
for row, leaf in enumerate(leaves):
contributions.append(unique_contributions[leaf])
return direct_prediction, biases, np.array(contributions)
def predict_forest(self):
"""
For a given RandomForestRegressor, RandomForestClassifier,
ExtraTreesRegressor, or ExtraTreesClassifier returns a triple of
[prediction, bias and feature_contributions], such that prediction ≈ bias +
feature_contributions.
"""
biases = []
contributions = []
predictions = []
if self._joint_contribution:
for tree in self._model.estimators_:
pred, bias, contribution = self.predict_tree()
biases.append(bias)
contributions.append(contribution)
predictions.append(pred)
total_contributions = []
for i in range(len(self._examples)):
contr = {}
for j, dct in enumerate(contributions):
for k in set(dct[i]).union(set(contr.keys())):
contr[k] = (contr.get(k, 0) * j + dct[i].get(k, 0)) / (j + 1)
total_contributions.append(contr)
for i, item in enumerate(contribution):
total_contributions[i]
sm = sum([v for v in contribution[i].values()])
return (
np.mean(predictions, axis=0),
np.mean(biases, axis=0),
total_contributions,
)
else:
if self._n_jobs > 1:
pool = Pool(processes=self._n_jobs)
# iterates return values from the issued tasks
iterator = self._model.estimators_
for pred, bias, contribution in tqdm(pool.map(self.predict_tree, iterator), desc='Tree'):
biases.append(bias)
contributions.append(contribution)
predictions.append(pred)
pool.close()
pool.join()
else:
for tree in tqdm(self._model.estimators_, desc='Tree'):
pred, bias, contribution = self.predict_tree(tree)
biases.append(bias)
contributions.append(contribution)
predictions.append(pred)
return (
np.mean(predictions, axis=0),
np.mean(biases, axis=0),
np.mean(contributions, axis=0),
)
def predict(self):
"""Returns a triple (prediction, bias, feature_contributions), such
that prediction ≈ bias + feature_contributions.
Returns
-------
decomposed prediction : triple of
* prediction, shape = (n_samples) for regression and (n_samples, n_classes)
for classification
* bias, shape = (n_samples) for regression and (n_samples, n_classes) for
classification
* contributions, If joint_contribution is False then returns and array of
shape = (n_samples, n_features) for regression or
shape = (n_samples, n_features, n_classes) for classification, denoting
contribution from each feature.
If joint_contribution is True, then shape is array of size n_samples,
where each array element is a dict from a tuple of feature indices to
to a value denoting the contribution from that feature tuple.
"""
# Only single out response variable supported,
if self._model.n_outputs_ > 1:
raise ValueError("Multilabel classification trees not supported")
if isinstance(self._model, DecisionTreeClassifier) or isinstance(
self._model, DecisionTreeRegressor
):
return self.predict_tree()
elif isinstance(self._model, RandomForestClassifier) or isinstance(
self._model, RandomForestRegressor
):
return self.predict_forest()
else:
raise ValueError(
"Wrong model type. Base learner needs to be a "
"DecisionTreeClassifier or DecisionTreeRegressor."
) | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/tree_interpreter.py | 0.817793 | 0.402216 | tree_interpreter.py | pypi |
import numpy as np
from .error_handling import InvalidStrategyException
__all__ = [
"verify_scoring_strategy",
"VALID_SCORING_STRATEGIES",
"argmin_of_mean",
"argmax_of_mean",
"indexer_of_converter",
]
def verify_scoring_strategy(scoring_strategy):
"""Asserts that the scoring strategy is valid and interprets various strings
:param scoring_strategy: a function to be used for determining optimal
variables or a string. If a function, should be of the form
``([some value]) -> index``. If a string, must be one of the options in
``VALID_SCORING_STRATEGIES``
:returns: a function to be used for determining optimal variables
"""
if callable(scoring_strategy):
return scoring_strategy
elif scoring_strategy in VALID_SCORING_STRATEGIES:
return VALID_SCORING_STRATEGIES[scoring_strategy]
else:
raise InvalidStrategyException(
scoring_strategy, options=list(VALID_SCORING_STRATEGIES.keys())
)
class indexer_of_converter(object):
"""This object is designed to help construct a scoring strategy by breaking
the process of determining an optimal score into two pieces:
First, each of the scores are converted to a simpler representation. For
instance, an array of scores resulting from a bootstrapped evaluation method
may be converted to just their mean.
Second, each of the simpler representations are compared to determine the
index of the one which is most optimal. This is typically just an ``argmin``
or ``argmax`` call.
"""
def __init__(self, indexer, converter):
"""Constructs a function which first converts all objects in a list to
something simpler and then uses the indexer to determine the index of
the most "optimal" one
:param indexer: a function which converts a list of probably simply
values (like numbers) to a single index
:param converter: a function which converts a single more complex object
to a simpler one (like a single number)
"""
self.indexer = indexer
self.converter = converter
def __call__(self, scores):
"""Finds the index of the most "optimal" score in a list"""
return self.indexer([self.converter(score) for score in scores])
argmin_of_mean = indexer_of_converter(np.argmin, np.mean)
argmax_of_mean = indexer_of_converter(np.argmax, np.mean)
VALID_SCORING_STRATEGIES = {
"max": argmax_of_mean,
"maximize": argmax_of_mean,
"argmax": np.argmax,
"min": argmin_of_mean,
"minimize": argmin_of_mean,
"argmin": np.argmin,
"argmin_of_mean": argmin_of_mean,
"argmax_of_mean": argmax_of_mean,
} | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/scoring_strategies.py | 0.827793 | 0.480418 | scoring_strategies.py | pypi |
from .abstract_runner import abstract_variable_importance
from .selection_strategies import (
SequentialForwardSelectionStrategy,
SequentialBackwardSelectionStrategy,
)
from .sklearn_api import (
score_untrained_sklearn_model,
score_untrained_sklearn_model_with_probabilities,
)
__all__ = [
"sequential_forward_selection",
"sklearn_sequential_forward_selection",
"sequential_backward_selection",
"sklearn_sequential_backward_selection",
]
def sequential_forward_selection(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
variable_names=None,
nimportant_vars=None,
njobs=1,
):
"""Performs sequential forward selection over data given a particular
set of functions for scoring and determining optimal variables
:param training_data: a 2-tuple ``(inputs, outputs)`` for training in the
``scoring_fn``
:param scoring_data: a 2-tuple ``(inputs, outputs)`` for scoring in the
``scoring_fn``
:param scoring_fn: a function to be used for scoring. Should be of the form
``(training_data, scoring_data) -> some_value``
:param scoring_strategy: a function to be used for determining optimal
variables. Should be of the form ``([some_value]) -> index``
:param variable_names: an optional list for variable names. If not given,
will use names of columns of data (if pandas dataframe) or column
indices
:param nimportant_vars: number of variables to compute importance for.
Defaults to all variables
:param njobs: an integer for the number of threads to use. If negative, will
use ``num_cpus + njobs``. Defaults to 1
:returns: :class:`PermutationImportance.result.ImportanceResult` object
which contains the results for each run
"""
return abstract_variable_importance(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
SequentialForwardSelectionStrategy,
variable_names=variable_names,
nimportant_vars=nimportant_vars,
njobs=njobs,
)
def sklearn_sequential_forward_selection(
model,
training_data,
scoring_data,
evaluation_fn,
scoring_strategy,
variable_names=None,
nimportant_vars=None,
njobs=1,
nbootstrap=None,
subsample=1,
**kwargs
):
"""Performs sequential forward selection for a particular model,
``scoring_data``, ``evaluation_fn``, and strategy for determining optimal
variables
:param model: a sklearn model
:param training_data: a 2-tuple ``(inputs, outputs)`` for training in the
``scoring_fn``
:param scoring_data: a 2-tuple ``(inputs, outputs)`` for scoring in the
``scoring_fn``
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param scoring_strategy: a function to be used for determining optimal
variables. Should be of the form ``([some_value]) -> index``
:param variable_names: an optional list for variable names. If not given,
will use names of columns of data (if pandas dataframe) or column
indices
:param nimportant_vars: number of variables to compute importance for.
Defaults to all variables
:param njobs: an integer for the number of threads to use. If negative, will
use ``num_cpus + njobs``. Defaults to 1
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs will be passed on to the ``evaluation_fn``
:returns: :class:`PermutationImportance.result.ImportanceResult` object
which contains the results for each run
"""
# Check if the data is probabilistic
if len(scoring_data[1].shape) > 1 and scoring_data[1].shape[1] > 1:
scoring_fn = score_untrained_sklearn_model_with_probabilities(
model, evaluation_fn, nbootstrap=nbootstrap, subsample=subsample, **kwargs
)
else:
scoring_fn = score_untrained_sklearn_model(
model, evaluation_fn, nbootstrap=nbootstrap, subsample=subsample, **kwargs
)
return sequential_forward_selection(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
variable_names=variable_names,
nimportant_vars=nimportant_vars,
njobs=njobs,
)
def sequential_backward_selection(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
variable_names=None,
nimportant_vars=None,
njobs=1,
):
"""Performs sequential backward selection over data given a particular
set of functions for scoring and determining optimal variables
:param training_data: a 2-tuple ``(inputs, outputs)`` for training in the
``scoring_fn``
:param scoring_data: a 2-tuple ``(inputs, outputs)`` for scoring in the
``scoring_fn``
:param scoring_fn: a function to be used for scoring. Should be of the form
``(training_data, scoring_data) -> some_value``
:param scoring_strategy: a function to be used for determining optimal
variables. Should be of the form ``([some_value]) -> index``
:param variable_names: an optional list for variable names. If not given,
will use names of columns of data (if pandas dataframe) or column
indices
:param nimportant_vars: number of variables to compute importance for.
Defaults to all variables
:param njobs: an integer for the number of threads to use. If negative, will
use ``num_cpus + njobs``. Defaults to 1
:returns: :class:`PermutationImportance.result.ImportanceResult` object
which contains the results for each run
"""
return abstract_variable_importance(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
SequentialBackwardSelectionStrategy,
variable_names=variable_names,
nimportant_vars=nimportant_vars,
njobs=njobs,
)
def sklearn_sequential_backward_selection(
model,
training_data,
scoring_data,
evaluation_fn,
scoring_strategy,
variable_names=None,
nimportant_vars=None,
njobs=1,
nbootstrap=None,
subsample=1,
**kwargs
):
"""Performs sequential backward selection for a particular model,
``scoring_data``, ``evaluation_fn``, and strategy for determining optimal
variables
:param model: a sklearn model
:param training_data: a 2-tuple ``(inputs, outputs)`` for training in the
``scoring_fn``
:param scoring_data: a 2-tuple ``(inputs, outputs)`` for scoring in the
``scoring_fn``
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param scoring_strategy: a function to be used for determining optimal
variables. Should be of the form ``([some_value]) -> index``
:param variable_names: an optional list for variable names. If not given,
will use names of columns of data (if pandas dataframe) or column
indices
:param nimportant_vars: number of variables to compute importance for.
Defaults to all variables
:param njobs: an integer for the number of threads to use. If negative, will
use ``num_cpus + njobs``. Defaults to 1
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs will be passed on to the ``evaluation_fn``
:returns: :class:`PermutationImportance.result.ImportanceResult` object
which contains the results for each run
"""
# Check if the data is probabilistic
if len(scoring_data[1].shape) > 1 and scoring_data[1].shape[1] > 1:
scoring_fn = score_untrained_sklearn_model_with_probabilities(
model, evaluation_fn, nbootstrap=nbootstrap, subsample=subsample, **kwargs
)
else:
scoring_fn = score_untrained_sklearn_model(
model, evaluation_fn, nbootstrap=nbootstrap, subsample=subsample, **kwargs
)
return sequential_backward_selection(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
variable_names=variable_names,
nimportant_vars=nimportant_vars,
njobs=njobs,
) | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/sequential_selection.py | 0.913141 | 0.528229 | sequential_selection.py | pypi |
from multiprocessing import Process, Queue, cpu_count
try:
from Queue import Full as QueueFull
from Queue import Empty as QueueEmpty
except ImportError: # python3
from queue import Full as QueueFull
from queue import Empty as QueueEmpty
__all__ = ["pool_imap_unordered"]
def worker(func, recvq, sendq):
for args in iter(recvq.get, None):
# The args are training_data, scoring_data, var_idx
# Thus, we want to return the var_idx and then
# send those args to the abstract runner.
result = (args[-1], func(*args))
sendq.put(result)
def pool_imap_unordered(func, iterable, procs=cpu_count()):
"""Lazily imaps in an unordered manner over an iterable in parallel as a
generator
:Author: Grant Jenks <https://stackoverflow.com/users/232571/grantj>
:param func: function to perform on each iterable
:param iterable: iterable which has items to map over
:param procs: number of workers in the pool. Defaults to the cpu count
:yields: the results of the mapping
"""
# Create queues for sending/receiving items from iterable.
sendq = Queue(procs)
recvq = Queue()
# Start worker processes.
for rpt in range(procs):
Process(target=worker, args=(func, sendq, recvq)).start()
# Iterate iterable and communicate with worker processes.
send_len = 0
recv_len = 0
itr = iter(iterable)
try:
value = next(itr)
while True:
try:
sendq.put(value, True, 0.1)
send_len += 1
value = next(itr)
except QueueFull:
while True:
try:
result = recvq.get(False)
recv_len += 1
yield result
except QueueEmpty:
break
except StopIteration:
pass
# Collect all remaining results.
while recv_len < send_len:
result = recvq.get()
recv_len += 1
yield result
# Terminate worker processes.
for rpt in range(procs):
sendq.put(None) | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/multiprocessing_utils.py | 0.494629 | 0.225843 | multiprocessing_utils.py | pypi |
import numpy as np
import pandas as pd
from .utils import get_data_subset, make_data_from_columns, conditional_permutations
__all__ = [
"SequentialForwardSelectionStrategy",
"SequentialBackwardSelectionStrategy",
"PermutationImportanceSelectionStrategy",
"SelectionStrategy",
]
class SelectionStrategy(object):
"""The base ``SelectionStrategy`` only provides the tools for storing the
data and other important information as well as the convenience method for
iterating over the selection strategies triples lazily."""
name = "Abstract Selection Strategy"
def __init__(self, training_data, scoring_data, num_vars, important_vars):
"""Initializes the object by storing the data and keeping track of other
important information
:param training_data: (training_inputs, training_outputs)
:param scoring_data: (scoring_inputs, scoring_outputs)
:param num_vars: integer for the total number of variables
:param important_vars: a list of the indices of variables which are already
considered important
"""
self.training_data = training_data
self.scoring_data = scoring_data
self.num_vars = num_vars
self.important_vars = important_vars
def generate_datasets(self, important_variables):
"""Generator which returns triples (variable, training_data, scoring_data)"""
raise NotImplementedError(
"Please implement a strategy for generating datasets on class %s"
% self.name
)
def generate_all_datasets(self):
"""By default, loops over all variables not yet considered important"""
for var in range(self.num_vars):
if var not in self.important_vars:
training_data, scoring_data = self.generate_datasets(
self.important_vars
+ [var,]
)
yield (training_data, scoring_data, var)
def __iter__(self):
return self.generate_all_datasets()
class SequentialForwardSelectionStrategy(SelectionStrategy):
"""Sequential Forward Selection tests all variables which are not yet
considered important by adding that columns to the other columns which are
returned. This means that the shape of the training data will be
``(num_rows, num_important_vars + 1)``."""
name = "Sequential Forward Selection"
def generate_datasets(self, important_variables):
"""Check each of the non-important variables. Dataset is the columns
which are important
:returns: (training_data, scoring_data)
"""
training_inputs, training_outputs = self.training_data
scoring_inputs, scoring_outputs = self.scoring_data
columns = important_variables
# Make a slice of the training inputs
training_inputs_subset = get_data_subset(training_inputs, None, columns)
# Make a slice of the scoring inputs
scoring_inputs_subset = get_data_subset(scoring_inputs, None, columns)
return (training_inputs_subset, training_outputs), (
scoring_inputs_subset,
scoring_outputs,
)
class SequentialBackwardSelectionStrategy(SelectionStrategy):
"""Sequential Backward Selection tests all variables which are not yet
considered important by removing that column from the data. This means that
the shape of the training data will be
``(num_rows, num_vars - num_important_vars - 1)``."""
name = "Sequential Backward Selection"
def generate_datasets(self, important_variables):
"""Check each of the non-important variables. Dataset is the columns
which are not important
:yields: a sequence of (variable being evaluated, columns to include)
"""
training_inputs, training_outputs = self.training_data
scoring_inputs, scoring_outputs = self.scoring_data
columns = [x for x in range(self.num_vars) if x not in important_variables]
# Make a slice of the training inputs
training_inputs_subset = get_data_subset(training_inputs, None, columns)
# Make a slice of the scoring inputs
scoring_inputs_subset = get_data_subset(scoring_inputs, None, columns)
return (training_inputs_subset, training_outputs), (
scoring_inputs_subset,
scoring_outputs,
)
class PermutationImportanceSelectionStrategy(SelectionStrategy):
"""Permutation Importance tests all variables which are not yet considered
important by shuffling that column in addition to the columns of the
variables which are considered important. The shape of the data will remain
constant, but at each step, one additional column will be permuted."""
name = "Permutation Importance"
def __init__(
self,
training_data,
scoring_data,
num_vars,
important_vars,
random_state,
**kwargs
):
"""Initializes the object by storing the data and keeping track of other
important information
:param training_data: (training_inputs, training_outputs)
:param scoring_data: (scoring_inputs, scoring_outputs)
:param num_vars: integer for the total number of variables
:param important_vars: a list of the indices of variables which are
already considered important
"""
super(PermutationImportanceSelectionStrategy, self).__init__(
training_data, scoring_data, num_vars, important_vars
)
# Also initialize the "shuffled data"
scoring_inputs, __ = self.scoring_data
indices = random_state.permutation(len(scoring_inputs))
# With each iteration of the algorithm, the indices
# are shuffled once and identically for each feature.
# Thus, when multiple features are permuted they are
# jointly permuted (i.e., without destroying the
# dependencies of the features within the group).
# However, how the features are jointly permuted
# changes from iteration to iteration to limit
# bias due to a poor permutation.
self.shuffled_scoring_inputs = get_data_subset(
scoring_inputs, indices
) # This copies
# keep track of the initial index (assuming this is pandas data)
self.original_index = (
scoring_inputs.index if isinstance(scoring_inputs, pd.DataFrame) else None
)
def generate_datasets(self, important_variables):
"""Check each of the non-important variables. Dataset has columns which
are important shuffled
:returns: (training_data, scoring_data)
"""
scoring_inputs, scoring_outputs = self.scoring_data
complete_scoring_inputs = make_data_from_columns(
[
get_data_subset(
self.shuffled_scoring_inputs
if i in important_variables
else scoring_inputs,
None,
[i],
)
for i in range(self.num_vars)
],
index=self.original_index,
)
return self.training_data, (complete_scoring_inputs, scoring_outputs)
class ConditionalPermutationImportanceSelectionStrategy(SelectionStrategy):
"""Conditional Permutation Importance tests all variables which are not yet considered
important by performing conditional permutation on that column in addition to the columns of the
variables which are considered important. The shape of the data will remain
constant, but at each step, one additional column will be permuted."""
name = "Conditional Permutation Importance"
def __init__(
self,
training_data,
scoring_data,
num_vars,
important_vars,
random_state,
**kwargs
):
"""Initializes the object by storing the data and keeping track of other
important information
:param training_data: (training_inputs, training_outputs)
:param scoring_data: (scoring_inputs, scoring_outputs)
:param num_vars: integer for the total number of variables
:param important_vars: a list of the indices of variables which are
already considered important
"""
super(ConditionalPermutationImportanceSelectionStrategy, self).__init__(
training_data, scoring_data, num_vars, important_vars
)
n_bins = kwargs.get("n_bins", 50)
# Also initialize the "shuffled data"
scoring_inputs, __ = self.scoring_data
self.shuffled_scoring_inputs = conditional_permutations(
scoring_inputs, n_bins, random_state
) # This copies
# keep track of the initial index (assuming this is pandas data)
self.original_index = (
scoring_inputs.index if isinstance(scoring_inputs, pd.DataFrame) else None
)
def generate_datasets(self, important_variables):
"""Check each of the non-important variables. Dataset has columns which
are important shuffled
:returns: (training_data, scoring_data)
"""
scoring_inputs, scoring_outputs = self.scoring_data
# If a feature has been deemed important it remains shuffled
complete_scoring_inputs = make_data_from_columns(
[
get_data_subset(
self.shuffled_scoring_inputs
if i in important_variables
else scoring_inputs,
None,
[i],
)
for i in range(self.num_vars)
],
index=self.original_index,
)
return self.training_data, (complete_scoring_inputs, scoring_outputs)
class ForwardPermutationImportanceSelectionStrategy(SelectionStrategy):
"""Forward Permutation Importance permutes all variables and then tests
all variables which are not yet considered."""
name = "Forward Permutation Importance"
def __init__(
self,
training_data,
scoring_data,
num_vars,
important_vars,
random_state,
**kwargs
):
"""Initializes the object by storing the data and keeping track of other
important information
:param training_data: (training_inputs, training_outputs)
:param scoring_data: (scoring_inputs, scoring_outputs)
:param num_vars: integer for the total number of variables
:param important_vars: a list of the indices of variables which are
already considered important
"""
super(ForwardPermutationImportanceSelectionStrategy, self).__init__(
training_data, scoring_data, num_vars, important_vars
)
# With each iteration of the algorithm, the indices
# are shuffled once and identically for each feature.
# Thus, when multiple features are permuted they are
# jointly permuted (i.e., without destroying the
# dependencies of the features within the group).
# However, how the features are jointly permuted
# changes from iteration to iteration to limit
# bias due to a poor permutation.
# Also initialize the "shuffled data"
scoring_inputs, __ = self.scoring_data
indices = random_state.permutation(len(scoring_inputs))
self.shuffled_scoring_inputs = get_data_subset(
scoring_inputs, indices
) # This copies
# keep track of the initial index (assuming this is pandas data)
self.original_index = (
scoring_inputs.index if isinstance(scoring_inputs, pd.DataFrame) else None
)
def generate_datasets(self, important_variables):
"""Check each of the non-important variables. Dataset has columns which
are non-important variables are shuffled
:returns: (training_data, scoring_data)
"""
scoring_inputs, scoring_outputs = self.scoring_data
# If a feature has been deemed important it remains shuffled
complete_scoring_inputs = make_data_from_columns(
[
get_data_subset(
scoring_inputs
if i in important_variables
else self.shuffled_scoring_inputs,
columns = [i],
)
for i in range(self.num_vars)
],
index=self.original_index,
)
return self.training_data, (complete_scoring_inputs, scoring_outputs) | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/selection_strategies.py | 0.842053 | 0.553928 | selection_strategies.py | pypi |
import numpy as np
import pandas as pd
import numbers
from .error_handling import InvalidDataException
__all__ = ["add_ranks_to_dict", "get_data_subset", "make_data_from_columns"]
def add_ranks_to_dict(result, variable_names, scoring_strategy):
"""Takes a list of (var, score) and converts to a dictionary of
{var: (rank, score)}
:param result: a dict of {var_index: score}
:param variable_names: a list of variable names
:param scoring_strategy: a function to be used for determining optimal
variables. Should be of the form ([floats]) -> index
"""
if len(result) == 0:
return dict()
result_dict = dict()
rank = 0
while len(result) > 1:
var_idxs = list(result.keys())
idxs = np.argsort(var_idxs)
# Sort by indices to guarantee order
variables = list(np.array(var_idxs)[idxs])
scores = list(np.array(list(result.values()))[idxs])
best_var = variables[scoring_strategy(scores)]
score = result.pop(best_var)
result_dict[variable_names[best_var]] = (rank, score)
rank += 1
var, score = list(result.items())[0]
result_dict[variable_names[var]] = (rank, score)
return result_dict
def get_data_subset(data, rows=None, columns=None):
"""Returns a subset of the data corresponding to the desired rows and
columns
:param data: either a pandas dataframe or a numpy array
:param rows: a list of row indices
:param columns: a list of column indices
:returns: data_subset (same type as data)
"""
if rows is None:
rows = np.arange(data.shape[0])
if isinstance(data, pd.DataFrame):
if columns is None:
return data.iloc[rows]
else:
return data.iloc[rows, columns]
elif isinstance(data, np.ndarray):
if columns is None:
return data[rows]
else:
return data[np.ix_(rows, columns)]
else:
raise InvalidDataException(
data, "Data must be a pandas dataframe or numpy array"
)
def make_data_from_columns(columns_list, index=None):
"""Synthesizes a dataset out of a list of columns
:param columns_list: a list of either pandas series or numpy arrays
:returns: a pandas dataframe or a numpy array
"""
if len(columns_list) == 0:
raise InvalidDataException(
columns_list, "Must have at least one column to synthesize dataset"
)
if isinstance(columns_list[0], pd.DataFrame) or isinstance(
columns_list[0], pd.Series
):
df = pd.concat([c.reset_index(drop=True) for c in columns_list], axis=1)
if index is not None:
return df.set_index(index)
else:
return df
elif isinstance(columns_list[0], np.ndarray):
return np.column_stack(columns_list)
else:
raise InvalidDataException(
columns_list,
"Columns_list must come from a pandas dataframe or numpy arrays",
)
def conditional_permutations(data, n_bins, random_state):
"""
Conditionally permute each feature in a dataset.
Code appended to the PermutationImportance package by Montgomery Flora 2021.
Args:
-------------------
data : pd.DataFrame or np.ndarray shape=(n_examples, n_features,)
n_bins : interger
number of bins to divide a feature into. Based on a
percentile method to ensure that each bin receieves
a similar number of examples
random_state : np.random.RandomState instance
Pseudo-random number generator to control the permutations of each
feature.
Pass an int to get reproducible results across function calls.
Returns:
-------------------
permuted_data : a permuted version of data
"""
permuted_data = data.copy()
for i in range(np.shape(data)[1]):
# Get the bin values of feature
if isinstance(data, pd.DataFrame):
feature_values = data.iloc[:, i]
elif isinstance(data, np.ndarray):
feature_values = data[:, i]
else:
raise InvalidDataException(
data, "Data must be a pandas dataframe or numpy array"
)
bin_edges = np.unique(
np.percentile(
feature_values,
np.linspace(0, 100, n_bins + 1),
interpolation="lower",
)
)
bin_indices = np.clip(
np.digitize(feature_values, bin_edges, right=True) - 1, 0, None
)
shuffled_indices = bin_indices.copy()
unique_bin_values = np.unique(bin_indices)
# bin_indices is composed of bin indices for a corresponding value of feature_values
for bin_idx in unique_bin_values:
# idx is the actual index of indices where the bin index == i
idx = np.where(bin_indices == bin_idx)[0]
# Replace the bin indices with a permutation of the actual indices
shuffled_indices[idx] = random_state.permutation(idx)
if isinstance(data, pd.DataFrame):
permuted_data.iloc[:, i] = data.iloc[shuffled_indices, i]
else:
permuted_data[:, i] = data[shuffled_indices, i]
return permuted_data
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters. Function comes for sci-kit-learn.
----------
seed : None, int or instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
"%r cannot be used to seed a numpy.random.RandomState" " instance" % seed
)
def bootstrap_generator(n_bootstrap, seed=42):
"""
Create a repeatable bootstrap generator.
Will create the same set of random state generators given
a number of bootstrap iterations.
"""
base_random_state = np.random.RandomState(seed)
random_num_set = base_random_state.choice(10000, size=n_bootstrap, replace=False)
random_states = [np.random.RandomState(s) for s in random_num_set]
return random_states | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/utils.py | 0.79909 | 0.591045 | utils.py | pypi |
class InvalidStrategyException(Exception):
"""Thrown when a scoring strategy is invalid"""
def __init__(self, strategy, msg=None, options=None):
if msg is None:
msg = (
"%s is not a valid strategy for determining the optimal variable. "
% strategy
)
msg += "\nShould be a callable or a valid string option. "
if options is not None:
msg += "Valid options are\n%r" % options
super(InvalidStrategyException, self).__init__(msg)
self.strategy = strategy
self.options = None
class InvalidInputException(Exception):
"""Thrown when the input to the program does not match expectations"""
def __init__(self, value, msg=None):
if msg is None:
msg = "Input value does not match expectations: %s" % value
super(InvalidInputException, self).__init__(msg)
self.value = value
class InvalidDataException(Exception):
"""Thrown when the training or scoring data is not of the right type"""
def __init__(self, data, msg=None):
if msg is None:
msg = "Data is not of the right format"
super(InvalidDataException, self).__init__(msg)
self.data = data
class UnmatchedLengthPredictionsException(Exception):
"""Thrown when the number of predictions doesn't match truths"""
def __init__(self, truths, predictions, msg=None):
if msg is None:
msg = "Shapes of truths and predictions do not match: %r and %r" % (
truths.shape,
predictions.shape,
)
super(UnmatchedLengthPredictionsException, self).__init__(msg)
self.truths = truths
self.predictions = predictions
class UnmatchingProbabilisticForecastsException(Exception):
"""Thrown when the shape of probabilisic predictions doesn't match the truths"""
def __init__(self, truths, predictions, msg=None):
if msg is None:
msg = "Shapes of truths and predictions do not match: %r and %r" % (
truths.shape,
predictions.shape,
)
super(UnmatchingProbabilisticForecastsException, self).__init__(msg)
self.truths = truths
self.predictions = predictions
class AmbiguousProbabilisticForecastsException(Exception):
"""Thrown when classes were not provided for converting probabilistic
predictions to deterministic ones but are required"""
def __init__(self, truths, predictions, msg=None):
if msg is None:
msg = "Classes not provided for converting probabilistic predictions to deterministic ones"
super(AmbiguousProbabilisticForecastsException, self).__init__(msg)
self.truths = truths
self.predictions = predictions
class FullImportanceResultWarning(Warning):
"""Thrown when we try to add a result to a full
:class:`PermutationImportance.result.ImportanceResult`"""
pass | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/error_handling.py | 0.790611 | 0.24243 | error_handling.py | pypi |
import numpy as np
import pandas as pd
from .error_handling import InvalidDataException, InvalidInputException
try:
basestring
except NameError: # Python3
basestring = str
__all__ = ["verify_data", "determine_variable_names"]
def verify_data(data):
"""Verifies that the data tuple is of the right format and coerces it to
numpy arrays for the code under the hood
:param data: one of the following:
(pandas dataframe, string for target column),
(pandas dataframe for inputs, pandas dataframe for outputs),
(numpy array for inputs, numpy array for outputs)
:returns: (numpy array for input, numpy array for output) or
(pandas dataframe for input, pandas dataframe for output)
"""
try:
iter(data)
except TypeError:
raise InvalidDataException(data, "Data must be iterable")
else:
if len(data) != 2:
raise InvalidDataException(data, "Data must contain 2 elements")
else:
# check if the first element is pandas dataframe or numpy array
if isinstance(data[0], pd.DataFrame):
# check if the second element is string or pandas dataframe
if isinstance(data[1], basestring):
return (
data[0].loc[:, data[0].columns != data[1]],
data[0][[data[1]]],
)
elif isinstance(data[1], pd.DataFrame):
return data[0], data[1]
else:
raise InvalidDataException(
data,
"Second element of data must be a string for the target column or a pandas dataframe",
)
elif isinstance(data[0], np.ndarray):
if isinstance(data[1], np.ndarray):
return data[0], data[1]
else:
raise InvalidDataException(
data, "Second element of data must also be a numpy array"
)
else:
raise InvalidDataException(
data,
"First element of data must be a numpy array or pandas dataframe",
)
def determine_variable_names(data, variable_names):
"""Uses ``data`` and/or the ``variable_names`` to determine what the
variable names are. If ``variable_names`` is not specified and ``data`` is
not a pandas dataframe, defaults to the column indices
:param data: a 2-tuple where the input data is the first item
:param variable_names: either a list of variable names or None
:returns: a list of variable names
"""
if variable_names is not None:
try:
iter(variable_names)
except TypeError:
raise InvalidInputException(
variable_names, "Variable names must be iterable"
)
else:
if len(variable_names) != data[0].shape[1]:
raise InvalidInputException(
variable_names,
"Variable names should have length %i" % data[0].shape[1],
)
else:
return np.array(variable_names)
else:
if isinstance(data[0], pd.DataFrame):
return data[0].columns.values
else:
return np.arange(data[0].shape[1]) | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/data_verification.py | 0.619241 | 0.616936 | data_verification.py | pypi |
import numpy as np
from sklearn.base import clone
from .utils import get_data_subset, bootstrap_generator
from joblib import Parallel, delayed
__all__ = [
"model_scorer",
"score_untrained_sklearn_model",
"score_untrained_sklearn_model_with_probabilities",
"score_trained_sklearn_model",
"score_trained_sklearn_model_with_probabilities",
"train_model",
"get_model",
"predict_model",
"predict_proba_model",
]
def train_model(model, X_train, y_train):
"""Trains a scikit-learn model and returns the trained model"""
if X_train.shape[1] == 0:
# No data to train over, so don't bother
return None
cloned_model = clone(model)
return cloned_model.fit(X_train, y_train)
def get_model(model, X_train, y_train):
"""Just return the trained model"""
return model
def predict_model(model, X_score):
"""Uses a trained scikit-learn model to predict over the scoring data"""
return model.predict(X_score)
def predict_proba_model(model, X_score):
"""Uses a trained scikit-learn model to predict class probabilities for the
scoring data"""
pred = model.predict_proba(X_score)
# Binary classification.
if pred.shape[1] == 2:
return pred[:,1]
else:
return pred
def forward_permutations(X, inds, var_idx):
return np.array(
[
X[:, i] if i == var_idx else X[inds, i]
for i in range(X.shape[1])
]
).T
class model_scorer(object):
"""General purpose scoring method which takes a particular model, trains the
model over the given training data, uses the trained model to predict on the
given scoring data, and then evaluates those predictions using some
evaluation function. Additionally provides the tools for bootstrapping the
scores and providing a distribution of scores to be used for statistics.
NOTE: Since these method is used internally, the scoring inputs into
this method for different rounds of multipass permutation importance
are already permuted for the top most features. Thus, in any current
iteration, we need only permute a single column at a time.
"""
def __init__(
self,
model,
training_fn,
prediction_fn,
evaluation_fn,
nimportant_vars=1,
default_score=0.0,
n_permute=1,
subsample=1,
direction='backward',
**kwargs
):
"""Initializes the scoring object by storing the training, predicting,
and evaluation functions
:param model: a scikit-learn model
:param training_fn: a function for training a scikit-learn model. Must
be of the form ``(model, X_train, y_train) ->
trained_model | None``. If the function returns ``None``, then it is
assumed that the model training failed.
Probably :func:`PermutationImportance.sklearn_api.train_model` or
:func:`PermutationImportance.sklearn_api.get_model`
:param predicting_fn: a function for predicting on scoring data using a
scikit-learn model. Must be of the form ``(model, X_score) ->
predictions``. Predictions may be either deterministic or
probabilistic, depending on what the evaluation_fn accepts.
Probably :func:`PermutationImportance.sklearn_api.predict_model` or
:func:`PermutationImportance.sklearn_api.predict_proba_model`
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param default_score: value to return if the model cannot be trained
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults
to None, which will not perform bootstrapping
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
"""
self.model = model
self.training_fn = training_fn
self.prediction_fn = prediction_fn
self.evaluation_fn = evaluation_fn
self.default_score = default_score
self.n_permute = n_permute
self.subsample = subsample
self.direction = direction
self.kwargs = kwargs
self.random_seed = kwargs.get("random_seed", 42)
def _scorer(self, X, y):
predictions = self.prediction_fn(self.model, X)
return self.evaluation_fn(y,predictions)
def get_subsample_size(self, full_size):
return (
int(full_size * self.subsample)
if self.subsample <= 1
else self.subsample
)
def _train(self):
# Try to train model
trained_model = self.training_fn(self.model, X_train, y_train)
# If we didn't succeed in training (probably because there weren't any
# training predictors), return the default_score
if trained_model is None:
if self.n_permute == 1:
return [self.default_score]
else:
return np.full((self.n_permute,), self.default_score)
def get_permuted_data(self, idx, var_idx):
""" Get permuted data """
X_score_sub = self.X_score
y_score_sub = self.y_score
X_train_sub = self.X_train
inds = self.shuffled_indices[idx]
if len(self.rows[0]) != self.y_score.shape[0]:
X_score_sub = get_data_subset(self.X_score, self.rows[idx])
y_score_sub = get_data_subset(self.y_score, self.rows[idx])
if self.direction == 'forward':
X_train_sub = get_data_subset(self.X_train, self.rows[idx])
#inds = inds[idx]
if var_idx is None:
return X_score_sub, y_score_sub
# For the backward, X_score is mostly unpermuted expect for
# the top features. For the forward, X_score is all permuted
# expect for the top features.
X_perm = X_score_sub.copy()
if self.direction == 'backward':
X_perm[:,var_idx] = X_score_sub[inds, var_idx]
else:
X_perm[:,var_idx] = X_train_sub[:, var_idx]
return X_perm, y_score_sub
def __call__(self, training_data, scoring_data, var_idx):
"""Uses the training, predicting, and evaluation functions to score the
model given the training and scoring data
:param training_data: (training_input, training_output)
:param scoring_data: (scoring_input, scoring_output)
:returns: either a single value or an array of values
:param var_idx : integer
The column index of the variable being permuted. When computing the original
score, set var_idx==None.
"""
(self.X_train, self.y_train) = training_data
(self.X_score, self.y_score) = scoring_data
permuted_set = [self.get_permuted_data(idx, var_idx) for idx in range(self.n_permute)]
scores = np.array([self._scorer(*arg) for arg in permuted_set])
return np.array(scores)
def score_untrained_sklearn_model(
model, evaluation_fn, nbootstrap=None, subsample=1, **kwargs
):
"""A convenience method which uses the default training and the
deterministic prediction methods for scikit-learn to evaluate a model
:param model: a scikit-learn model
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs passed on to the evaluation_fn
:returns: a callable which accepts ``(training_data, scoring_data)`` and
returns some value (probably a float or an array of floats)
"""
return model_scorer(
model,
training_fn=train_model,
prediction_fn=predict_model,
evaluation_fn=evaluation_fn,
nbootstrap=nbootstrap,
subsample=subsample,
**kwargs
)
def score_untrained_sklearn_model_with_probabilities(
model, evaluation_fn, nbootstrap=None, subsample=1, **kwargs
):
"""A convenience method which uses the default training and the
probabilistic prediction methods for scikit-learn to evaluate a model
:param model: a scikit-learn model
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs passed on to the evaluation_fn
:returns: a callable which accepts ``(training_data, scoring_data)`` and
returns some value (probably a float or an array of floats)
"""
return model_scorer(
model,
training_fn=train_model,
prediction_fn=predict_proba_model,
evaluation_fn=evaluation_fn,
nbootstrap=nbootstrap,
subsample=subsample,
**kwargs
)
def score_trained_sklearn_model(
model, evaluation_fn, n_permute=1, subsample=1, direction='backward', **kwargs
):
"""A convenience method which does not retrain a scikit-learn model and uses
deterministic prediction methods to evaluate the model
:param model: a scikit-learn model
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs passed on to the evaluation_fn
:returns: a callable which accepts ``(training_data, scoring_data)`` and
returns some value (probably a float or an array of floats)
"""
return model_scorer(
model,
training_fn=get_model,
prediction_fn=predict_model,
evaluation_fn=evaluation_fn,
n_permute=n_permute,
subsample=subsample,
direction=direction,
**kwargs
)
def score_trained_sklearn_model_with_probabilities(
model, evaluation_fn, n_permute=1, subsample=1, direction='backward', **kwargs
):
"""A convenience method which does not retrain a scikit-learn model and uses
probabilistic prediction methods to evaluate the model
:param model: a scikit-learn model
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs passed on to the evaluation_fn
:returns: a callable which accepts ``(training_data, scoring_data)`` and
returns some value (probably a float or an array of floats)
"""
return model_scorer(
model,
training_fn=get_model,
prediction_fn=predict_proba_model,
evaluation_fn=evaluation_fn,
n_permute=n_permute,
subsample=subsample,
direction=direction,
**kwargs
) | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/sklearn_api.py | 0.903559 | 0.66888 | sklearn_api.py | pypi |
import warnings
try:
from itertools import izip as zip
except ImportError: # python3
pass
from .error_handling import FullImportanceResultWarning
class ImportanceResult(object):
"""Houses the result of any importance method, which consists of a
sequence of contexts and results. An individual result can only be truly
interpreted correctly in light of the corresponding context. This object
allows for indexing into the contexts and results and also provides
convenience methods for retrieving the results with no context and the
most complete context"""
def __init__(self, method, variable_names, original_score):
"""Initializes the results object with the method used and a list of
variable names
:param method: string for the type of variable importance used
:param variable_names: a list of names for variables
:param original_score: the score of the model when no variables are
important
"""
self.method = method
self.variable_names = variable_names
self.original_score = original_score
# The initial context is "empty"
self.contexts = [{}]
self.results = list()
self.complete = False
def add_new_results(self, new_results, next_important_variable=None):
"""Adds a new round of results. Warns if the ImportanceResult is already
complete
:param new_results: a dictionary with keys of variable names and values
of ``(rank, score)``
:param next_important_variable: variable name of the next most important
variable. If not given, will select the variable with the smallest
rank
"""
if not self.complete:
if next_important_variable is None:
next_important_variable = min(
new_results.keys(), key=lambda key: new_results[key][0]
)
self.results.append(new_results)
new_context = self.contexts[-1].copy()
self.contexts.append(new_context)
__, score = new_results[next_important_variable]
self.contexts[-1][next_important_variable] = (len(self.results) - 1, score)
# Check to see if this result could constitute the last possible one
if len(self.results) == len(self.variable_names):
self.results.append(dict())
self.complete = True
else:
warnings.warn(
"Cannot add new result to full ImportanceResult",
FullImportanceResultWarning,
)
def retrieve_singlepass(self):
"""Returns the singlepass results as a dictionary with keys of variable
names and values of ``(rank, score)``."""
return self.results[0]
def retrieve_all_iterations(self):
"""Returns the singlepass results for all multipass iterations"""
return self.results
def retrieve_multipass(self):
"""Returns the multipass results as a dictionary with keys of variable
names and values of ``(rank, score)``."""
return self.contexts[-1]
def __iter__(self):
"""Iterates over pairs of contexts and results"""
return zip(self.contexts, self.results)
def __getitem__(self, index):
"""Retrieves the ith pair of ``(context, result)``"""
if index < 0:
index = len(self.results) + index
return (self.contexts[index], self.results[index])
def __len__(self):
"""Returns the total number of results computed"""
return len(self.results) | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/result.py | 0.715821 | 0.439146 | result.py | pypi |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator, FormatStrFormatter, AutoMinorLocator
import matplotlib.ticker as mticker
from matplotlib import rcParams
from matplotlib.colors import ListedColormap
from matplotlib.gridspec import GridSpec
import matplotlib
import seaborn as sns
from ..common.utils import is_outlier
from ..common.contrib_utils import combine_like_features
import shap
class PlotStructure:
"""
Plot handles figure and subplot generation
"""
def __init__(self, BASE_FONT_SIZE=12, seaborn_kws=None):
GENERIC_FONT_SIZE_NAMES = [
"teensie",
"tiny",
"small",
"normal",
"big",
"large",
"huge",
]
FONT_SIZES_ARRAY = np.arange(-6, 8, 2) + BASE_FONT_SIZE
self.FONT_SIZES = {
name: size for name, size in zip(GENERIC_FONT_SIZE_NAMES, FONT_SIZES_ARRAY)
}
if seaborn_kws is None:
custom_params = {"axes.spines.right": False, "axes.spines.top": False}
sns.set_theme(style="ticks", rc=custom_params)
else:
if seaborn_kws is not None and isinstance(seaborn_kws, dict):
sns.set_theme(**seaborn_kws)
# Setting the font style to serif
rcParams["font.family"] = "serif"
plt.rc("font", size=self.FONT_SIZES["normal"]) # controls default text sizes
plt.rc("axes", titlesize=self.FONT_SIZES["tiny"]) # fontsize of the axes title
plt.rc(
"axes", labelsize=self.FONT_SIZES["normal"]
) # fontsize of the x and y labels
plt.rc(
"xtick", labelsize=self.FONT_SIZES["teensie"]
) # fontsize of the x-axis tick marks
plt.rc(
"ytick", labelsize=self.FONT_SIZES["teensie"]
) # fontsize of the y-axis tick marks
plt.rc("legend", fontsize=self.FONT_SIZES["teensie"]) # legend fontsize
plt.rc(
"figure", titlesize=self.FONT_SIZES["big"]
) # fontsize of the figure title
def get_fig_props(self, n_panels, **kwargs):
"""Determine appropriate figure properties"""
width_slope = 0.875
height_slope = 0.45
intercept = 3.0 - width_slope
figsize = (
min((n_panels * width_slope) + intercept, 19),
min((n_panels * height_slope) + intercept, 12),
)
wspace = (-0.03 * n_panels) + 0.85
hspace = (0.0175 * n_panels) + 0.3
n_columns = kwargs.get("n_columns", 3)
wspace = wspace + 0.25 if n_columns > 3 else wspace
kwargs["figsize"] = kwargs.get("figsize", figsize)
kwargs["wspace"] = kwargs.get("wspace", wspace)
kwargs["hspace"] = kwargs.get("hspace", hspace)
return kwargs
def create_subplots(self, n_panels, **kwargs):
"""
Create a series of subplots (MxN) based on the
number of panels and number of columns (optionally).
Args:
-----------------------
n_panels : int
Number of subplots to create
Optional keyword args:
n_columns : int
The number of columns for a plot (default=3 for n_panels >=3)
figsize: 2-tuple of figure size (width, height in inches)
wspace : float
the amount of width reserved for space between subplots,
expressed as a fraction of the average axis width
hspace : float
sharex : boolean
sharey : boolean
"""
# figsize = width, height in inches
figsize = kwargs.get("figsize", (6.4, 4.8))
wspace = kwargs.get("wspace", 0.4)
hspace = kwargs.get("hspace", 0.3)
sharex = kwargs.get("sharex", False)
sharey = kwargs.get("sharey", False)
delete = True
if n_panels <= 3:
n_columns = kwargs.get("n_columns", n_panels)
delete = True if n_panels != n_columns else False
else:
n_columns = kwargs.get("n_columns", 3)
n_rows = int(n_panels / n_columns)
extra_row = 0 if (n_panels % n_columns) == 0 else 1
fig, axes = plt.subplots(
n_rows + extra_row,
n_columns,
sharex=sharex,
sharey=sharey,
figsize=figsize,
dpi=300,
)
fig.patch.set_facecolor("white")
plt.subplots_adjust(wspace=wspace, hspace=hspace)
if delete:
n_axes_to_delete = len(axes.flat) - n_panels
if n_axes_to_delete > 0:
for i in range(n_axes_to_delete):
fig.delaxes(axes.flat[-(i + 1)])
return fig, axes
def _create_joint_subplots(self, n_panels, **kwargs):
"""
Create grid for multipanel drawing a bivariate plots with marginal
univariate plots on the top and right hand side.
"""
figsize = kwargs.get("figsize", (6.4, 4.8))
ratio = kwargs.get("ratio", 5)
n_columns = kwargs.get("n_columns", 3)
fig = plt.figure(figsize=figsize, dpi=300)
fig.patch.set_facecolor("white")
extra_row = 0 if (n_panels % n_columns) == 0 else 1
nrows = ratio * (int(n_panels / n_columns) + extra_row)
ncols = ratio * n_columns
gs = GridSpec(ncols=ncols, nrows=nrows)
main_ax_len = ratio - 1
main_axes = []
top_axes = []
rhs_axes = []
col_offset_idx = list(range(n_columns)) * int(nrows / ratio)
row_offset = 0
for i in range(n_panels):
col_offset = ratio * col_offset_idx[i]
row_idx = 1
if i % n_columns == 0 and i > 0:
row_offset += ratio
main_ax = fig.add_subplot(
gs[
row_idx + row_offset : main_ax_len + row_offset,
col_offset : main_ax_len + col_offset - 1,
],
frameon=False,
)
top_ax = fig.add_subplot(
gs[row_idx + row_offset - 1, col_offset : main_ax_len + col_offset - 1],
frameon=False,
sharex=main_ax,
)
rhs_ax = fig.add_subplot(
gs[
row_idx + row_offset : main_ax_len + row_offset,
main_ax_len + col_offset - 1,
],
frameon=False,
sharey=main_ax,
)
ax_marg = [top_ax, rhs_ax]
for ax in ax_marg:
# Turn off tick visibility for the measure axis on the marginal plots
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
# Turn off the ticks on the density axis for the marginal plots
plt.setp(ax.yaxis.get_majorticklines(), visible=False)
plt.setp(ax.xaxis.get_majorticklines(), visible=False)
plt.setp(ax.yaxis.get_minorticklines(), visible=False)
plt.setp(ax.xaxis.get_minorticklines(), visible=False)
ax.yaxis.grid(False)
ax.xaxis.grid(False)
for axis in [ax.xaxis, ax.yaxis]:
axis.label.set_visible(False)
main_axes.append(main_ax)
top_axes.append(top_ax)
rhs_axes.append(rhs_ax)
n_rows = int(nrows / ratio)
return fig, main_axes, top_axes, rhs_axes, n_rows
def axes_to_iterator(self, n_panels, axes):
"""Turns axes list into iterable"""
if isinstance(axes, list):
return axes
else:
ax_iterator = [axes] if n_panels == 1 else axes.flat
return ax_iterator
def set_major_axis_labels(
self,
fig,
xlabel=None,
ylabel_left=None,
ylabel_right=None,
title=None,
**kwargs,
):
"""
Generate a single X- and Y-axis labels for
a series of subplot panels. E.g.,
"""
fontsize = kwargs.get("fontsize", self.FONT_SIZES["normal"])
labelpad = kwargs.get("labelpad", 15)
ylabel_right_color = kwargs.get("ylabel_right_color", "k")
# add a big axis, hide frame
ax = fig.add_subplot(111, frameon=False)
# hide tick and tick label of the big axis
plt.tick_params(
labelcolor="none", top=False, bottom=False, left=False, right=False
)
# set axes labels
ax.set_xlabel(xlabel, fontsize=fontsize, labelpad=labelpad)
ax.set_ylabel(ylabel_left, fontsize=fontsize, labelpad=labelpad)
if ylabel_right is not None:
ax_right = fig.add_subplot(1, 1, 1, sharex=ax, frameon=False)
plt.tick_params(
labelcolor="none", top=False, bottom=False, left=False, right=False
)
ax_right.yaxis.set_label_position("right")
ax_right.set_ylabel(
ylabel_right,
labelpad=2 * labelpad,
fontsize=fontsize,
color=ylabel_right_color,
)
ax_right.grid(False)
ax.set_title(title)
ax.grid(False)
return ax
def set_row_labels(self, labels, axes, pos=-1, pad=1.15, rotation=270, **kwargs):
"""
Give a label to each row in a series of subplots
"""
colors = kwargs.get("colors", ["xkcd:darkish blue"] * len(labels))
fontsize = kwargs.get("fontsize", self.FONT_SIZES["small"])
if np.ndim(axes) == 2:
iterator = axes[:, pos]
else:
iterator = [axes[pos]]
for ax, row, color in zip(iterator, labels, colors):
ax.yaxis.set_label_position("right")
ax.annotate(
row,
xy=(1, 1),
xytext=(pad, 0.5),
xycoords=ax.transAxes,
rotation=rotation,
size=fontsize,
ha="center",
va="center",
color=color,
alpha=0.65,
)
def add_alphabet_label(self, n_panels, axes, pos=(0.9, 0.09), alphabet_fontsize=10, **kwargs):
"""
A alphabet character to each subpanel.
"""
alphabet_list = [chr(x) for x in range(ord("a"), ord("z") + 1)] + [
f"{chr(x)}{chr(x)}" for x in range(ord("a"), ord("z") + 1)
]
ax_iterator = self.axes_to_iterator(n_panels, axes)
for i, ax in enumerate(ax_iterator):
ax.text(
pos[0],
pos[1],
f"({alphabet_list[i]})",
fontsize=alphabet_fontsize,
alpha=0.8,
ha="center",
va="center",
transform=ax.transAxes,
)
def _to_sci_notation(self, ydata, ax=None, xdata=None, colorbar=False):
"""
Convert decimals (less 0.01) to 10^e notation
"""
# f = mticker.ScalarFormatter(useOffset=False, useMathText=True)
# g = lambda x, pos: "${}$".format(f._formatSciNotation("%1.10e" % x))
if colorbar and np.absolute(np.amax(ydata)) <= 0.01:
# colorbar.ax.yaxis.set_major_formatter(mticker.FuncFormatter(g))
colorbar.ax.ticklabel_format(
style="sci",
)
colorbar.ax.tick_params(axis="y", labelsize=5)
elif ax:
if np.absolute(np.amax(xdata)) <= 0.01:
ax.ticklabel_format(
style="sci",
)
# ax.xaxis.set_major_formatter(mticker.FuncFormatter(g))
ax.tick_params(axis="x", labelsize=5, rotation=45)
if np.absolute(np.amax(ydata)) <= 0.01:
# ax.yaxis.set_major_formatter(mticker.FuncFormatter(g))
ax.ticklabel_format(
style="sci",
)
ax.tick_params(axis="y", labelsize=5, rotation=45)
def calculate_ticks(
self,
nticks,
ax=None,
upperbound=None,
lowerbound=None,
round_to=5,
center=False,
):
"""
Calculate the y-axis ticks marks for the line plots
"""
if ax is not None:
upperbound = round(ax.get_ybound()[1], 5)
lowerbound = round(ax.get_ybound()[0], 5)
max_value = max(abs(upperbound), abs(lowerbound))
if 0 < max_value < 1:
if max_value < 0.1:
round_to = 3
else:
round_to = 5
elif 5 < max_value < 10:
round_to = 2
else:
round_to = 0
def round_to_a_base(a_number, base=5):
return base * round(a_number / base)
if max_value > 5:
max_value = round_to_a_base(max_value)
if center:
values = np.linspace(-max_value, max_value, nticks)
values = np.round(values, round_to)
else:
dy = upperbound - lowerbound
# deprecated 8 March 2022 by Monte.
if round_to > 2:
fit = np.floor(dy / (nticks - 1)) + 1
dy = (nticks - 1) * fit
values = np.linspace(lowerbound, lowerbound + dy, nticks)
values = np.round(values, round_to)
return values
def set_tick_labels(
self, ax, feature_names, display_feature_names, return_labels=False
):
"""
Setting the tick labels for the tree interpreter plots.
"""
if isinstance(display_feature_names, dict):
labels = [
display_feature_names.get(feature_name, feature_name)
for feature_name in feature_names
]
else:
labels = display_feature_names
if return_labels:
labels = [f"{l}" for l in labels]
return labels
else:
labels = [f"{l}" for l in labels]
ax.set_yticklabels(labels)
def set_axis_label(self, ax, xaxis_label=None, yaxis_label=None, **kwargs):
"""
Setting the x- and y-axis labels with fancy labels (and optionally
physical units)
"""
fontsize = kwargs.get("fontsize", self.FONT_SIZES["tiny"])
if xaxis_label is not None:
xaxis_label_pretty = self.display_feature_names.get(
xaxis_label, xaxis_label
)
units = self.display_units.get(xaxis_label, "")
if units == "":
xaxis_label_with_units = f"{xaxis_label_pretty}"
else:
xaxis_label_with_units = f"{xaxis_label_pretty} ({units})"
ax.set_xlabel(xaxis_label_with_units, fontsize=fontsize)
if yaxis_label is not None:
yaxis_label_pretty = self.display_feature_names.get(
yaxis_label, yaxis_label
)
units = self.display_units.get(yaxis_label, "")
if units == "":
yaxis_label_with_units = f"{yaxis_label_pretty}"
else:
yaxis_label_with_units = f"{yaxis_label_pretty} ({units})"
ax.set_ylabel(yaxis_label_with_units, fontsize=fontsize)
def set_legend(self, n_panels, fig, ax, major_ax=None, **kwargs):
"""
Set a single legend on the bottom of a figure
for a set of subplots.
"""
if major_ax is None:
major_ax = self.set_major_axis_labels(fig)
fontsize = kwargs.get("fontsize", "medium")
ncol = kwargs.get("ncol", 3)
handles = kwargs.get("handles", None)
labels = kwargs.get("labels", None)
if handles is None:
handles, _ = ax.get_legend_handles_labels()
if labels is None:
_, labels = ax.get_legend_handles_labels()
if n_panels > 3:
bbox_to_anchor = (0.5, -0.35)
else:
bbox_to_anchor = (0.5, -0.5)
bbox_to_anchor = kwargs.get("bbox_to_anchor", bbox_to_anchor)
# Shrink current axis's height by 10% on the bottom
box = major_ax.get_position()
major_ax.set_position(
[box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]
)
# Put a legend below current axis
major_ax.legend(
handles,
labels,
loc="lower center",
bbox_to_anchor=bbox_to_anchor,
fancybox=True,
shadow=True,
ncol=ncol,
fontsize=fontsize,
)
def set_minor_ticks(self, ax):
"""
Adds minor tick marks to the x- and y-axis to a subplot ax
to increase readability.
"""
ax.xaxis.set_minor_locator(AutoMinorLocator(n=3))
ax.yaxis.set_minor_locator(AutoMinorLocator(n=3))
def set_n_ticks(self, ax, option="y", nticks=5):
"""
Set the max number of ticks per x- and y-axis for a
subplot ax
"""
if option == "y" or option == "both":
ax.yaxis.set_major_locator(MaxNLocator(nticks))
if option == "x" or option == "both":
ax.xaxis.set_major_locator(MaxNLocator(nticks))
def make_twin_ax(self, ax):
"""
Create a twin axis on an existing axis with a shared x-axis
"""
# align the twinx axis
twin_ax = ax.twinx()
# Turn twin_ax grid off.
twin_ax.grid(False)
# Set ax's patch invisible
ax.patch.set_visible(False)
# Set axtwin's patch visible and colorize it in grey
twin_ax.patch.set_visible(True)
# move ax in front
ax.set_zorder(twin_ax.get_zorder() + 1)
return twin_ax
def despine_plt(self, ax):
"""
remove all four spines of plot
"""
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["bottom"].set_visible(False)
def annotate_bars(self, ax, num1, num2, y, width, dh=0.01, barh=0.05, delta=0):
"""
Annotate barplot with connections between correlated variables.
Parameters
----------------
num1: index of left bar to put bracket over
num2: index of right bar to put bracket over
y: centers of all bars (like plt.barh() input)
width: widths of all bars (like plt.barh() input)
dh: height offset over bar / bar + yerr in axes coordinates (0 to 1)
barh: bar height in axes coordinates (0 to 1)
delta : shifting of the annotation when multiple annotations would overlap
"""
lx, ly = y[num1], width[num1]
rx, ry = y[num2], width[num2]
ax_y0, ax_y1 = plt.gca().get_xlim()
dh *= (ax_y1 - ax_y0)
barh *= (ax_y1 - ax_y0)
y = max(ly, ry) + dh
barx = [lx, lx, rx, rx]
bary = [y, y+barh, y+barh, y]
mid = ((lx+rx)/2, y+barh)
ax.plot(np.array(bary)+delta, barx, alpha=0.8, clip_on=False)
'''
Deprecated 14 March 2022.
def annotate_bars(self, ax, bottom_idx, top_idx, x=0, **kwargs):
"""
Adds a square bracket that contains two points. Used to
connect predictors in the predictor ranking plot
for highly correlated pairs.
"""
color = kwargs.get("color", "xkcd:slate gray")
ax.annotate(
"",
xy=(x, bottom_idx),
xytext=(x, top_idx),
arrowprops=dict(
arrowstyle="<->,head_length=0.05,head_width=0.05",
ec=color,
connectionstyle="bar,fraction=0.2",
shrinkA=0.5,
shrinkB=0.5,
linewidth=0.5,
),
)
'''
def get_custom_colormap(self, vals, **kwargs):
"""Get a custom colormap"""
cmap = kwargs.get("cmap", matplotlib.cm.PuOr)
bounds = np.linspace(np.nanpercentile(vals, 0), np.nanpercentile(vals, 100), 10)
norm = matplotlib.colors.BoundaryNorm(
bounds,
cmap.N,
)
mappable = matplotlib.cm.ScalarMappable(
norm=norm,
cmap=cmap,
)
return mappable, bounds
def add_ice_colorbar(self, fig, ax, mappable, cb_label, cdata, fontsize, **kwargs):
"""Add a colorbar to the right of a panel to
accompany ICE color-coded plots"""
cb = plt.colorbar(mappable, ax=ax, pad=0.2)
cb.set_label(cb_label, size=fontsize)
cb.ax.tick_params(labelsize=fontsize)
cb.set_alpha(1)
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 20)
self._to_sci_notation(ax=None, colorbar=cb, ydata=cdata)
def add_colorbar(
self,
fig,
plot_obj,
colorbar_label,
ticks=MaxNLocator(5),
ax=None,
cax=None,
**kwargs,
):
"""Adds a colorbar to the right of a panel"""
# Add a colobar
orientation = kwargs.get("orientation", "vertical")
pad = kwargs.get("pad", 0.1)
shrink = kwargs.get("shrink", 1.1)
extend = kwargs.get("extend", "neither")
if cax:
cbar = plt.colorbar(
plot_obj,
cax=cax,
pad=pad,
ticks=ticks,
shrink=shrink,
orientation=orientation,
extend=extend,
)
else:
cbar = plt.colorbar(
plot_obj,
ax=ax,
pad=pad,
ticks=ticks,
shrink=shrink,
orientation=orientation,
extend=extend,
)
cbar.ax.tick_params(labelsize=self.FONT_SIZES["tiny"])
cbar.set_label(colorbar_label, size=self.FONT_SIZES["small"])
cbar.outline.set_visible(False)
# bbox = cbar.ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
# cbar.ax.set_aspect((bbox.height - 0.7) * 20)
def save_figure(self, fname, fig=None, bbox_inches="tight", dpi=300, aformat="png"):
"""Saves the current figure"""
plt.savefig(fname=fname, bbox_inches=bbox_inches, dpi=dpi, format=aformat) | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/plot/base_plotting.py | 0.727395 | 0.518973 | base_plotting.py | pypi |
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.colors import ListedColormap
from scipy.stats import gaussian_kde
from scipy.ndimage import gaussian_filter
import scipy
import itertools
import numpy as np
import matplotlib as mpl
from scipy.ndimage import gaussian_filter
from .base_plotting import PlotStructure
gray4 = (189 / 255.0, 189 / 255.0, 189 / 255.0)
gray5 = (150 / 255.0, 150 / 255.0, 150 / 255.0)
blue2 = (222 / 255.0, 235 / 255.0, 247 / 255.0)
blue5 = (107 / 255.0, 174 / 255.0, 214 / 255.0)
orange3 = (253 / 255.0, 208 / 255.0, 162 / 255.0)
orange5 = (253 / 255.0, 141 / 255.0, 60 / 255.0)
red5 = (251 / 255.0, 106 / 255.0, 74 / 255.0)
red6 = (239 / 255.0, 59 / 255.0, 44 / 255.0)
purple5 = (158 / 255.0, 154 / 255.0, 200 / 255.0)
purple6 = (128 / 255.0, 125 / 255.0, 186 / 255.0)
purple9 = (63 / 255.0, 0 / 255.0, 125 / 255.0)
custom_cmap = ListedColormap(
[
gray4,
gray5,
blue2,
blue5,
orange3,
orange5,
red5,
red6,
purple5,
purple6,
purple9,
]
)
class PlotScatter(PlotStructure):
"""
PlotScatter handles plotting 2D scatter between a set of features.
It will also optionally overlay KDE contours of the target variable,
which is a first-order method for evaluating possible feature interactions
and whether the learned relationships are consistent with the data.
"""
oranges = ListedColormap(
["xkcd:peach", "xkcd:orange", "xkcd:bright orange", "xkcd:rust brown"]
)
blues = ListedColormap(
["xkcd:periwinkle blue", "xkcd:clear blue", "xkcd:navy blue"]
)
def __init__(self, BASE_FONT_SIZE=12):
super().__init__(BASE_FONT_SIZE=BASE_FONT_SIZE)
def plot_scatter(
self,
estimators,
X,
y,
features,
display_feature_names={},
display_units={},
subsample=1.0,
peak_val=None,
kde=False,
**kwargs,
):
"""
Plot KDE between two features and color-code by the target variable
"""
# TODO: Plot relationships for multiple features!!
estimator_names = list(estimators.keys())
n_panels = len(estimator_names)
only_one_model = len(estimator_names) == 1
predictions = np.zeros((n_panels, len(y)), dtype=np.float16)
j = 0
for estimator_name, estimator in estimators.items():
if hasattr(estimator, "predict_proba"):
predictions[j, :] = estimator.predict_proba(X)[:, 1]
else:
predictions[j, :] = estimator.predict(X)
j += 1
kwargs = self.get_fig_props(n_panels, **kwargs)
# create subplots, one for each feature
fig, axes = self.create_subplots(
n_panels=n_panels,
sharex=False,
sharey=False,
**kwargs,
)
ax_iterator = self.axes_to_iterator(n_panels, axes)
vmax = np.max(predictions)
for i, ax in enumerate(ax_iterator):
cf = self.scatter_(
ax=ax,
X=X,
features=features,
predictions=predictions[i, :],
vmax=vmax,
**kwargs,
)
if subsample < 1.0:
size = min(int(subsample * len(y)), len(y))
idxs = np.random.choice(len(y), size=size, replace=False)
var1 = X[features[0]].values[idxs]
var2 = X[features[1]].values[idxs]
y1 = y.values[idxs]
else:
var1 = X[features[0]].values
var2 = X[features[1]].values
y1 = np.copy(y)
# Shuffle values
index = np.arange(len(var1))
np.random.shuffle(index)
var1 = var1[index]
var2 = var2[index]
y1 = y1[index]
ax.set_xlabel(display_feature_names.get(features[0], features[0]))
ax.set_ylabel(display_feature_names.get(features[1], features[1]))
ax.grid(color="#2A3459", alpha=0.6, linestyle="dashed", linewidth=0.5)
# bluish dark grey, but slightly lighter than background
if kde:
cmap_set = [
self.oranges,
self.blues,
"Reds",
"jet",
]
classes = np.unique(y1)
idx_sets = [np.where(y1 == c) for c in classes]
for idxs, cmap in zip(idx_sets, cmap_set):
# Plot positive cases
cs = self.plot_kde_contours(
ax,
dy=var2[idxs],
dx=var1[idxs],
target=y1[idxs],
cmap=cmap,
)
handles_set = [cs.legend_elements()[-1]]
labels = classes
legend = ax.legend(handles_set, labels, framealpha=0.5)
# Hide the right and top spines
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
if not only_one_model:
ax.set_title(estimator_names[i])
if n_panels == 1:
ax_ = axes
else:
ax_ = axes.ravel().tolist()
if hasattr(estimator, "predict_proba"):
cbar_label = "Probability"
else:
cbar_label = "Response"
cbar_label = kwargs.get("cbar_label", cbar_label)
fig.colorbar(cf, ax=ax_, label=cbar_label, orientation="horizontal")
return fig, axes
def scatter_(self, ax, features, X, predictions, vmax, **kwargs):
"""
Plot 2D scatter of ML predictions;
Only plots a random 20000 points
"""
size = min(20000, len(X))
idxs = np.random.choice(len(X), size=size, replace=False)
x_val = X[features[0]].values[idxs]
y_val = X[features[1]].values[idxs]
z_val = predictions[idxs]
# Show highest predictions on top!
index = np.argsort(z_val)
# index = np.random.choice(len(z_val), size=len(z_val), replace=False)
x_val = x_val[index]
y_val = y_val[index]
z_val = z_val[index]
cmap = kwargs.get("cmap", custom_cmap)
zmax = vmax + 0.05 if vmax < 1.0 else 1.1
delta = 0.05 if vmax < 0.5 else 0.1
levels = [0, 0.05] + list(np.arange(0.1, zmax, delta))
norm = mpl.colors.BoundaryNorm(levels, cmap.N)
sca = ax.scatter(
x_val,
y_val,
c=z_val,
cmap=cmap,
alpha=0.6,
s=3,
norm=norm,
)
return sca
def kernal_density_estimate(self, dy, dx):
dy_min = np.amin(dy)
dx_min = np.amin(dx)
dy_max = np.amax(dy)
dx_max = np.amax(dx)
x, y = np.mgrid[dx_min:dx_max:100j, dy_min:dy_max:100j]
positions = np.vstack([x.ravel(), y.ravel()])
values = np.vstack([dx, dy])
kernel = gaussian_kde(values)
f = np.reshape(kernel(positions).T, x.shape)
return x, y, f
def plot_kde_contours(
self,
ax,
dy,
dx,
target,
cmap,
):
x, y, f = self.kernal_density_estimate(dy, dx)
temp_linewidths = [0.85, 1.0, 1.25, 1.75]
temp_thresh = [75.0, 90.0, 95.0, 97.5]
temp_levels = [0.0, 0.0, 0.0, 0.0]
for i in range(0, len(temp_thresh)):
temp_levels[i] = np.percentile(f.ravel(), temp_thresh[i])
# masked_f = np.ma.masked_where(f < 1.6e-5, f)
cs = ax.contour(
x,
y,
f,
levels=temp_levels,
cmap=cmap,
linewidths=temp_linewidths,
alpha=1.0,
)
fmt = {}
for l, s in zip(cs.levels, temp_thresh[::-1]):
fmt[l] = f"{int(s)}%"
ax.clabel(
cs, cs.levels, inline=True, fontsize=self.FONT_SIZES["teensie"], fmt=fmt
)
return cs | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/plot/_kde_2d.py | 0.665084 | 0.351589 | _kde_2d.py | pypi |
import numpy as np
import collections
from ..common.importance_utils import find_correlated_pairs_among_top_features
from ..common.utils import is_list, is_correlated
from .base_plotting import PlotStructure
import random
class PlotImportance(PlotStructure):
"""
PlotImportance handles plotting feature ranking plotting. The class
is designed to be generic enough to handle all possible ranking methods
computed within Scikit-Explain.
"""
SINGLE_VAR_METHODS = [
"backward_multipass",
"backward_singlepass",
"forward_multipass",
"forward_singlepass",
"ale_variance",
"coefs",
"shap_sum",
"gini",
"combined",
"sage",
"grouped",
"grouped_only",
"lime",
"tree_interpreter",
"sobol_total",
"sobol_1st",
"sobol_interact"
]
DISPLAY_NAMES_DICT = {
"backward_multipass": "Backward Multi-Pass",
"backward_singlepass": "Backward Single-Pass",
"forward_multipass": "Forward Multi-Pass",
"forward_singlepass": "Forward Single-Pass",
"perm_based": "Perm.-based Interact.",
"ale_variance": "ALE-Based Import.",
"ale_variance_interactions": "ALE-Based Interac.",
"coefs": "Coef.",
"shap_sum": "SHAP",
"hstat": "H-Stat",
"gini": "Gini",
"combined": "Method-Average Ranking",
"sage": "SAGE Importance Scores",
"grouped": "Grouped Importance",
"grouped_only": "Grouped Only Importance",
"sobol_total" : 'Sobol Total',
"sobol_1st" : 'Sobol 1st Order',
"sobol_interact" : 'Sobol Higher Orders',
}
def __init__(self, BASE_FONT_SIZE=12, seaborn_kws=None):
super().__init__(BASE_FONT_SIZE=BASE_FONT_SIZE, seaborn_kws=seaborn_kws)
def is_bootstrapped(self, scores):
"""Check if the permutation importance results are bootstrapped"""
return np.ndim(scores) > 1
def _get_axes(self, n_panels, **kwargs):
"""
Determine how many axes are required.
"""
if n_panels == 1:
kwargs["figsize"] = kwargs.get("figsize", (3, 2.5))
elif n_panels == 2 or n_panels == 3:
kwargs["figsize"] = kwargs.get("figsize", (6, 2.5))
else:
figsize = kwargs.get("figsize", (8, 5))
# create subplots, one for each feature
fig, axes = self.create_subplots(n_panels=n_panels, **kwargs)
return fig, axes
def _check_for_estimators(self, data, estimator_names):
"""Check that each estimator is in data"""
for ds in data:
if not (
collections.Counter(ds.attrs["estimators used"])
== collections.Counter(estimator_names)
):
raise AttributeError(
"""
The estimator names given do not match the estimators used to create
given data
"""
)
def plot_variable_importance(
self,
data,
panels,
display_feature_names={},
feature_colors=None,
num_vars_to_plot=10,
estimator_output="raw",
plot_correlated_features=False,
**kwargs,
):
"""Plots any variable importance method for a particular estimator
Parameters
-----------------
data : xarray.Dataset or list of xarray.Dataset
Permutation importance dataset for one or more metrics
panels: list of 2-tuples of estimator names and rank method
E.g., panels = [('singlepass', 'Random Forest',
('multipass', 'Random Forest') ]
will plot the singlepass and multipass results for the
random forest model.
Possible methods include 'multipass', 'singlepass',
'perm_based', 'ale_variance', or 'ale_variance_interactions'
display_feature_names : dict
A dict mapping feature names to readable, "pretty" feature names
feature_colors : dict
A dict mapping features to various colors. Helpful for color coding groups of features
num_vars_to_plot : int
Number of top variables to plot (defalut is None and will use number of multipass results)
kwargs:
- xlabels
- ylabel
- xticks
- p_values
- colinear_features
- rho_threshold
"""
xlabels = kwargs.get("xlabels", None)
ylabels = kwargs.get("ylabels", None)
xticks = kwargs.get("xticks", None)
title = kwargs.get("title", "")
p_values = kwargs.get("p_values", None)
colinear_features = kwargs.get("colinear_features", None)
rho_threshold = kwargs.get("rho_threshold", 0.8)
plot_reference_score = kwargs.get("plot_reference_score", True)
plot_error = kwargs.get('plot_error', True)
only_one_method = all([m[0] == panels[0][0] for m in panels])
only_one_estimator = all([m[1] == panels[0][1] for m in panels])
if not only_one_method:
kwargs["hspace"] = kwargs.get("hspace", 0.6)
if plot_correlated_features:
X = kwargs.get("X", None)
if X is None or X.empty:
raise ValueError(
"Must provide X to InterpretToolkit to compute the correlations!"
)
corr_matrix = X.corr().abs()
data = [data] if not is_list(data) else data
n_panels = len(panels)
fig, axes = self._get_axes(n_panels, **kwargs)
ax_iterator = self.axes_to_iterator(n_panels, axes)
for i, (panel, ax) in enumerate(zip(panels, ax_iterator)):
# Set the facecolor.
#ax.set_facecolor(kwargs.get("facecolor", (0.95, 0.95, 0.95)))
method, estimator_name = panel
results = data[i]
if xlabels is not None:
ax.set_xlabel(xlabels[i], fontsize=self.FONT_SIZES["small"])
else:
if not only_one_method:
ax.set_xlabel(
self.DISPLAY_NAMES_DICT.get(method, method),
fontsize=self.FONT_SIZES["small"],
)
if not only_one_estimator:
ax.set_title(estimator_name)
sorted_var_names = list(
results[f"{method}_rankings__{estimator_name}"].values
)
if num_vars_to_plot is None:
num_vars_to_plot == len(sorted_var_names)
sorted_var_names = sorted_var_names[
: min(num_vars_to_plot, len(sorted_var_names))
]
sorted_var_names = sorted_var_names[::-1]
scores = results[f"{method}_scores__{estimator_name}"].values
scores = scores[: min(num_vars_to_plot, len(sorted_var_names))]
# Reverse the order.
scores = scores[::-1]
# Set very small values to zero.
scores = np.where(np.absolute(np.round(scores, 17)) < 1e-15, 0, scores)
# Get the colors for the plot
colors_to_plot = [
self.variable_to_color(var, feature_colors) for var in sorted_var_names
]
# Get the predictor names
variable_names_to_plot = [
f" {var}"
for var in self.convert_vars_to_readable(
sorted_var_names,
display_feature_names,
)
]
if method == "combined":
scores_to_plot = np.nanpercentile(scores, 50, axis=1)
# Compute the confidence intervals (ci)
ci = np.abs(
np.nanpercentile(scores, 50, axis=1)
- np.nanpercentile(scores, [25, 75], axis=1)
)
else:
scores_to_plot = np.nanmean(scores, axis=1)
ci = np.abs(
np.nanpercentile(scores, 50, axis=1)
- np.nanpercentile(scores, [2.5, 97.5], axis=1)
)
if plot_reference_score:
if 'forward' in method:
ax.axvline(results[f'all_permuted_score__{estimator_name}'].mean(),color='k',ls=':')
elif 'backward' in method:
ax.axvline(results[f'original_score__{estimator_name}'].mean(),color='k',ls='--')
# Despine
self.despine_plt(ax)
elinewidth = 0.9 if n_panels <= 3 else 0.5
if plot_error:
ax.barh(
np.arange(len(scores_to_plot)),
scores_to_plot,
linewidth=1.75,
edgecolor="white",
alpha=0.5,
color=colors_to_plot,
xerr=ci,
capsize=3.0,
ecolor="k",
error_kw=dict(
alpha=0.2,
elinewidth=elinewidth,
),
zorder=2,
)
else:
ax.barh(
np.arange(len(scores_to_plot)),
scores_to_plot,
linewidth=1.75,
edgecolor="white",
alpha=0.5,
color=colors_to_plot,
zorder=2,
)
if plot_correlated_features:
self._add_correlated_brackets(
ax, np.arange(len(scores_to_plot)),
scores_to_plot,
corr_matrix, sorted_var_names, rho_threshold
)
if num_vars_to_plot >= 20:
size = kwargs.get("fontsize", self.FONT_SIZES["teensie"] - 3)
elif num_vars_to_plot > 10:
size = kwargs.get("fontsize", self.FONT_SIZES["teensie"] - 2)
else:
size = kwargs.get("fontsize", self.FONT_SIZES["teensie"] - 1)
# Put the variable names _into_ the plot
if method not in self.SINGLE_VAR_METHODS and plot_correlated_features:
pass
# This is code is not flexible at the moment.
#results_dict = is_correlated(
# corr_matrix, sorted_var_names, rho_threshold=rho_threshold
#)
if colinear_features is None:
fontweight = ["light"] * len(variable_names_to_plot)
colors = ["k"] * len(variable_names_to_plot)
else:
# Bold text if the VIF > threshold (indicates a multicolinear predictor)
fontweight = [
"bold" if v in colinear_features else "light" for v in sorted_var_names
]
# Bold text if value is insignificant.
colors = ["xkcd:medium blue" if v in colinear_features else "k" for v in sorted_var_names]
ax.set_yticks(range(len(variable_names_to_plot)))
ax.set_yticklabels(variable_names_to_plot)
labels = ax.get_yticklabels()
# Bold var names
##[label.set_fontweight(opt) for opt, label in zip(fontweight, labels)]
[label.set_color(c) for c, label in zip(colors, labels)]
ax.tick_params(axis="both", which="both", length=0)
if xticks is not None:
ax.set_xticks(xticks)
else:
self.set_n_ticks(ax, option="x")
xlabel = (
self.DISPLAY_NAMES_DICT.get(method, method)
if (only_one_method and xlabels is None)
else ""
)
major_ax = self.set_major_axis_labels(
fig,
xlabel=xlabel,
ylabel_left="",
ylabel_right="",
title=title,
fontsize=self.FONT_SIZES["small"],
**kwargs,
)
if ylabels is not None:
self.set_row_labels(
labels=ylabels, axes=axes, pos=-1, pad=1.15, rotation=270, **kwargs
)
self.add_alphabet_label(
n_panels, axes, pos=kwargs.get("alphabet_pos", (0.9, 0.09)),
alphabet_fontsize = kwargs.get("alphabet_fontsize", 10)
)
# Necessary to make sure that the tick labels for the feature names
# do overlap another ax.
fig.tight_layout()
return fig, axes
def _add_correlated_brackets(self, ax, y, width, corr_matrix, top_features, rho_threshold):
"""
Add bracket connecting features above a given correlation threshold.
Parameters
------------------
ax : matplotlib.ax.Axes object
y :
width :
corr_matrix:
top_features:
rho_threshold:
"""
get_colors = lambda n: list(
map(lambda i: "#" + "%06x" % random.randint(0, 0xFFFFFF), range(n))
)
_, pair_indices = find_correlated_pairs_among_top_features(
corr_matrix,
top_features,
rho_threshold=rho_threshold,
)
colors = get_colors(len(pair_indices))
top_indices, bottom_indices = [], []
for p, color in zip(pair_indices, colors):
delta=0
if p[0] > p[1]:
bottom_idx = p[1]
top_idx = p[0]
else:
bottom_idx = p[0]
top_idx = p[1]
# If a feature has already shown up in a correlated pair,
# then we want to shift the brackets slightly for ease of
# interpretation.
if bottom_idx in bottom_indices or bottom_idx in top_indices:
delta += 0.1
if top_idx in top_indices or top_idx in bottom_indices:
delta += 0.1
top_indices.append(top_idx)
bottom_indices.append(bottom_idx)
self.annotate_bars(ax, bottom_idx, top_idx, y=y, width=width, delta=delta)
# You can fill this in by using a dictionary with {var_name: legible_name}
def convert_vars_to_readable(self, variables_list, VARIABLE_NAMES_DICT):
"""Substitutes out variable names for human-readable ones
:param variables_list: a list of variable names
:returns: a copy of the list with human-readable names
"""
human_readable_list = list()
for var in variables_list:
if var in VARIABLE_NAMES_DICT:
human_readable_list.append(VARIABLE_NAMES_DICT[var])
else:
human_readable_list.append(var)
return human_readable_list
# This could easily be expanded with a dictionary
def variable_to_color(self, var, VARIABLES_COLOR_DICT):
"""
Returns the color for each variable.
"""
if var == "No Permutations":
return "xkcd:pastel red"
else:
if VARIABLES_COLOR_DICT is None:
return "xkcd:powder blue"
elif not isinstance(VARIABLES_COLOR_DICT, dict) and isinstance(
VARIABLES_COLOR_DICT, str
):
return VARIABLES_COLOR_DICT
else:
return VARIABLES_COLOR_DICT[var] | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/plot/plot_permutation_importance.py | 0.730578 | 0.371479 | plot_permutation_importance.py | pypi |
import matplotlib.pyplot as plt
import seaborn as sns
def rounding(v):
"""Rounding for pretty plots"""
if v > 100:
return int(round(v))
elif v > 0 and v < 100:
return round(v, 1)
elif v >= 0.1 and v < 1:
return round(v, 1)
elif v >= 0 and v < 0.1:
return round(v, 3)
def box_and_whisker(
X_train, top_preds, example, display_feature_names={}, display_units={}, **kwargs
):
"""Create interpretability graphic"""
color = kwargs.get("bar_color", "lightblue")
f, axes = plt.subplots(dpi=300, nrows=len(top_preds), figsize=(4, 5))
sns.despine(
fig=f,
ax=axes,
top=True,
right=True,
left=True,
bottom=False,
offset=None,
trim=False,
)
box_plots = []
for ax, v in zip(axes, top_preds):
box_plot = ax.boxplot(
x=X_train[v], vert=False, whis=[0, 100], patch_artist=True, widths=0.35
)
box_plots.append(box_plot)
ax.annotate(
display_feature_names.get(v, v) + " (" + display_units.get(v, v) + ")",
xy=(0.9, 1.15),
xycoords="axes fraction",
)
ax.annotate(
rounding(example[v]),
xy=(0.9, 0.7),
xycoords="axes fraction",
fontsize=6,
color="red",
)
# plot vertical lines
ax.axvline(x=example[v], color="red", zorder=5)
# Remove y tick labels
ax.set_yticks(
[],
)
# fill with colors
for bplot in box_plots:
for patch in bplot["boxes"]:
patch.set_facecolor(color)
for line in bplot["means"]:
line.set_color(color)
plt.subplots_adjust(wspace=5.75)
f.suptitle("Training Set Distribution for Top Predictors")
axes[0].set_title(
"Vertical red bars show current values for this object",
fontsize=8,
pad=25,
color="red",
)
f.tight_layout()
return f, axes | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/plot/_box_and_whisker.py | 0.754192 | 0.526404 | _box_and_whisker.py | pypi |
from functools import partial
from sklearn.metrics._base import _average_binary_score
from sklearn.utils.multiclass import type_of_target
from sklearn.metrics import (
brier_score_loss,
average_precision_score,
precision_recall_curve,
)
import numpy as np
def brier_skill_score(y_values, forecast_probabilities):
"""Computes the brier skill score"""
climo = np.mean((y_values - np.mean(y_values)) ** 2)
return 1.0 - brier_score_loss(y_values, forecast_probabilities) / climo
def modified_precision(precision, known_skew, new_skew):
"""
Modify the success ratio according to equation (3) from
Lampert and Gancarski (2014).
"""
precision[precision < 1e-5] = 1e-5
term1 = new_skew / (1.0 - new_skew)
term2 = (1 / precision) - 1.0
denom = known_skew + ((1 - known_skew) * term1 * term2)
return known_skew / denom
def calc_sr_min(skew):
pod = np.linspace(0, 1, 100)
sr_min = (skew * pod) / (1 - skew + (skew * pod))
return sr_min
def _binary_uninterpolated_average_precision(
y_true, y_score, known_skew, new_skew, pos_label=1, sample_weight=None
):
precision, recall, _ = precision_recall_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight
)
# Return the step function integral
# The following works because the last entry of precision is
# guaranteed to be 1, as returned by precision_recall_curve
if known_skew is not None:
precision = modified_precision(precision, known_skew, new_skew)
return -np.sum(np.diff(recall) * np.array(precision)[:-1])
def min_aupdc(
y_true, pos_label, average, sample_weight=None, known_skew=None, new_skew=None
):
"""
Compute the minimum possible area under the performance
diagram curve. Essentially, a vote of NO for all predictions.
"""
min_score = np.zeros((len(y_true)))
average_precision = partial(
_binary_uninterpolated_average_precision,
known_skew=known_skew,
new_skew=new_skew,
pos_label=pos_label,
)
ap_min = _average_binary_score(
average_precision, y_true, min_score, average, sample_weight=sample_weight
)
return ap_min
def norm_aupdc(
y_true,
y_score,
known_skew=None,
*,
average="macro",
pos_label=1,
sample_weight=None,
min_method="random",
):
"""
Compute the normalized modified average precision. Normalization removes
the no-skill region either based on skew or random classifier performance.
Modification alters success ratio to be consistent with a known skew.
Parameters:
-------------------
y_true, array of (n_samples,)
Binary, truth labels (0,1)
y_score, array of (n_samples,)
Model predictions (either determinstic or probabilistic)
known_skew, float between 0 and 1
Known or reference skew (# of 1 / n_samples) for
computing the modified success ratio.
min_method, 'skew' or 'random'
If 'skew', then the normalization is based on the minimum AUPDC
formula presented in Boyd et al. (2012).
If 'random', then the normalization is based on the
minimum AUPDC for a random classifier, which is equal
to the known skew.
Boyd, 2012: Unachievable Region in Precision-Recall Space and Its Effect on Empirical Evaluation, ArXiv
"""
new_skew = np.mean(y_true)
if known_skew is None:
known_skew = new_skew
y_type = type_of_target(y_true)
if y_type == "multilabel-indicator" and pos_label != 1:
raise ValueError(
"Parameter pos_label is fixed to 1 for "
"multilabel-indicator y_true. Do not set "
"pos_label or set pos_label to 1."
)
elif y_type == "binary":
# Convert to Python primitive type to avoid NumPy type / Python str
# comparison. See https://github.com/numpy/numpy/issues/6784
present_labels = np.unique(y_true).tolist()
if len(present_labels) == 2 and pos_label not in present_labels:
raise ValueError(
f"pos_label={pos_label} is not a valid label. It should be "
f"one of {present_labels}"
)
average_precision = partial(
_binary_uninterpolated_average_precision,
known_skew=known_skew,
new_skew=new_skew,
pos_label=pos_label,
)
ap = _average_binary_score(
average_precision, y_true, y_score, average, sample_weight=sample_weight
)
if min_method == "random":
ap_min = known_skew
elif min_method == "skew":
ap_min = min_aupdc(
y_true,
pos_label,
average,
sample_weight=sample_weight,
known_skew=known_skew,
new_skew=new_skew,
)
naupdc = (ap - ap_min) / (1.0 - ap_min)
return naupdc | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/common/metrics.py | 0.920567 | 0.582996 | metrics.py | pypi |
import xarray as xr
import numpy as np
from skexplain.common.utils import compute_bootstrap_indices
import pandas as pd
def method_average_ranking(data, features, methods, estimator_names, n_features=12):
"""
Compute the median ranking across the results of different ranking methods.
Also, include the 25-75th percentile ranking uncertainty.
Parameters
------------
data : list of xarray.Dataset
The set of predictor ranking results to average over.
methods : list of string
The ranking methods to use from the data (see plot_importance for examples)
estimator_names : string or list of strings
Name of the estimator(s).
Returns
--------
rankings_dict_avg : dict
feature : median ranking pairs
rankings_sorted : np.array
Sorted median rankings (lower values indicates higher rank)
feature_sorted : np.array
The features corresponding the ranks in ``rankings_sorted``
xerr
"""
rankings_dict = {f: [] for f in features}
for d, method in zip(data, methods):
for estimator_name in estimator_names:
features = d[f"{method}_rankings__{estimator_name}"].values[:n_features]
rankings = {f: i for i, f in enumerate(features)}
for f in features:
try:
rankings_dict[f].append(rankings[f])
except:
rankings_dict[f].append(np.nan)
max_len = np.max([len(rankings_dict[k]) for k in rankings_dict.keys()])
for k in rankings_dict.keys():
l = rankings_dict[k]
if len(l) < max_len:
delta = max_len - len(l)
rankings_dict[k] = l + [np.nan] * delta
rankings_dict_avg = {
f: np.nanpercentile(rankings_dict[f], 50) for f in rankings_dict.keys()
}
features = np.array(list(rankings_dict_avg.keys()))
rankings = np.array([rankings_dict_avg[f] for f in features])
idxs = np.argsort(rankings)
rankings_sorted = rankings[idxs]
features_ranked = features[idxs]
scores = np.array([rankings_dict[f] for f in features_ranked])
data = {}
data[f"combined_rankings__{estimator_name}"] = (
[f"n_vars_avg"],
features_ranked,
)
data[f"combined_scores__{estimator_name}"] = (
[f"n_vars_avg", "n_bootstrap"],
scores,
)
data = xr.Dataset(data)
return data
def non_increasing(L):
# Check for decreasing scores.
return all(x >= y for x, y in zip(L, L[1:]))
def compute_importance(results, scoring_strategy, direction):
"""
Compute the importance scores from the permutation importance results.
The importance score varies depending on the orientation of the
loss metric and whether it is multipass or singlepass.
Parameters
--------------
results : InterpretToolkit.permutation_importance results
xr.Dataset
scoring_strategy : 'minimize' or 'maximize'
Whether the strategy for assessing importance was
based on minimizing or maximing the performance metric
after permuting features (e.g., the goal is to 'maximize'
loss metrics like MSE, but 'minimize' rank metrics like AUC)
direction : 'forward' or 'backward'
Whether the permutation method was 'forward' or 'backward'.
Returns
--------------
results : xarray.Dataset
scores for each estimator and multi/singlepass are
converted to proper importance scores.
"""
if scoring_strategy == 'argmin_of_mean':
scoring_strategy = 'minimize'
elif scoring_strategy == 'argmax_of_mean':
scoring_strategy = 'maximize'
print(direction, scoring_strategy)
estimators = results.attrs["estimators used"]
for estimator in estimators:
orig_score = results[f"original_score__{estimator}"].values
if direction == 'forward':
all_permuted_score = results[f"all_permuted_score__{estimator}"].values
for mode in ["singlepass", "multipass"]:
permute_scores = results[f"{mode}_scores__{estimator}"].values
if direction == 'forward':
# For a loss metric, forward importance is generically defined
# as the error(X_J') - error(X_j') where J is the set of
# all features while j is some subset of J or a single feature.
if scoring_strategy == 'maximize':
# E.g., AUC, NAUPDC, CSI, BSS
imp = permute_scores - all_permuted_score
elif scoring_strategy == 'minimize':
# For a rank-based metric, importance is defined opposite
# of the loss metric [ error(X_j') - error(X_J') ]
# E.g., MSE, BS, etc.
print('This happened for forward!')
imp = all_permuted_score - permute_scores
elif direction == 'backward':
# For a loss metric, backward importance is generically defined
# as the error(X_j') - error(X_j) where j is some subset or
# a single feature.
if scoring_strategy == 'minimize':
imp = orig_score - permute_scores
elif scoring_strategy == 'maximize':
# For a rank-based metric, it is defined opposite of that above.
# i.e., error(X_j) - error(X_j')
print('this happened for backward!')
imp = permute_scores - orig_score
"""
decreasing = non_increasing(np.mean(permute_scores, axis=1))
if decreasing:
if orientation == "negative":
# Singlepass MSE
imp = permute_scores - orig_score
else:
# Backward Multipass on AUC/AUPDC (permuted_score - (1-orig_score)).
# Most positively-oriented metrics top off at 1.
top = np.max(permute_scores)
imp = permute_scores - (top - orig_score)
else:
if orientation == "negative":
# Forward Multipass MSE
top = np.max(permute_scores)
imp = (top + orig_score) - permute_scores
else:
# Singlepass AUC/NAUPDC
imp = orig_score - permute_scores
"""
# Normalize the importance score so that range is [0,1]
imp = imp / (np.percentile(imp, 99) - np.percentile(imp, 1))
results[f"{mode}_scores__{estimator}"] = (
[f"n_vars_{mode}", "n_bootstrap"],
imp,
)
return results
def to_skexplain_importance(
importances, estimator_name,
feature_names,
method,
normalize=False,
bootstrap_axis=0,
):
"""
Convert feature ranking-based scores from non-permutation-importance methods
computed by scikit-explain or other methods into a skexplain-style
datset to leverage the built-in plotting code. This method handles the
ranking and sorting of the importance values by assuming higher values
equal higher importance.
Caution: This method assumes that higher values equal higher importance!
Parameters
---------------
importances : 1d or 2d array-like
The feature importance scores. The code assumes that 2D arrays are the result
of bootstrapping. Users can declare the bootstrapping axis with `bootstrap_axis`.
By default, the first axis (=0) is the bootstrap axis for skexplain methods
bootstrap_axis : int (default=0)
estimator_name : str
The estimator name. Used for plotting and creating the dataset.
feature_names : array-like of shape (n_features)
The feature names. Used for plotting and creating the dataset.
method : 'sage', 'coefs', 'shap_std', 'shap_sum', 'tree_interpreter', 'lime' or str
The name of the feature ranking method. The named method perform specific
operations. For example, local methods like 'shap_sum', 'tree_interpreter',
'lime' will sum the importance values to determine feature ranking.
normalize : True/False (default=False)
If True, normalize the feature importance values using min-max scaling.
This is useful when comparing importance across different methods.
"""
bootstrap = False
if method == "sage":
importances_std = importances.std
importances = importances.values
elif method == "coefs":
importances = np.absolute(importances)
elif method == "shap_std":
# Compute the std(SHAP)
importances = np.nanstd(importances, axis=0)
elif method == "shap_sum" or method == "tree_interpreter" or method == 'lime':
# Compute sum of abs values
importances = np.nansum(np.absolute(importances), axis=0)
else:
if np.ndim(importances) == 2:
# average over bootstrapping
bootstrap = True
importances_to_save = importances.copy()
importances = np.nanmean(importances, axis=bootstrap_axis)
# Sort from higher score to lower score
ranked_indices = np.argsort(importances)[::-1]
if bootstrap:
scores_ranked = importances_to_save[ranked_indices, :]
else:
scores_ranked = importances[ranked_indices]
if method == "sage":
std_ranked = importances_std[ranked_indices]
features_ranked = np.array(feature_names)[ranked_indices]
data = {}
data[f"{method}_rankings__{estimator_name}"] = (
[f"n_vars_{method}"],
features_ranked,
)
if not bootstrap:
scores_ranked = scores_ranked.reshape(len(scores_ranked), 1)
importances = importances.reshape(len(importances), 1)
if normalize:
# Normalize the importance score so that range is [0,1]
scores_ranked = scores_ranked / (
np.percentile(scores_ranked, 99) - np.percentile(scores_ranked, 1)
)
data[f"{method}_scores__{estimator_name}"] = (
[f"n_vars_{method}", "n_bootstrap"],
scores_ranked,
)
if method == "sage":
data[f"sage_scores_std__{estimator_name}"] = (
[f"n_vars_sage"],
std_ranked,
)
data = xr.Dataset(data)
data.attrs["estimators used"] = estimator_name
data.attrs["estimator output"] = "probability"
return data
def combine_top_features(results_dict, n_vars=None):
"""Combines the list of top features from different estimators
into a single list where duplicates are removed.
Args:
-------------
results_dict : dict
n_vars : integer
"""
if n_vars is None:
n_vars = 1000
combined_features = []
for estimator_name in results_dict.keys():
features = results_dict[estimator_name]
combined_features.append(features)
unique_features = list(set.intersection(*map(set, combined_features)))[:n_vars]
return unique_features
def retrieve_important_vars(results, estimator_names, multipass=True):
"""
Return a list of the important features stored in the
ImportanceObject
Args:
-------------------
results : python object
ImportanceObject from PermutationImportance
multipass : boolean
if True, returns the multipass permutation importance results
else returns the singlepass permutation importance results
Returns:
top_features : list
a list of features with order determined by
the permutation importance method
"""
perm_method = "multipass" if multipass else "singlepass"
direction = results.attrs['direction']
important_vars_dict = {}
for estimator_name in estimator_names:
top_features = list(results[f"{direction}_{perm_method}_rankings__{estimator_name}"].values)
important_vars_dict[estimator_name] = top_features
return important_vars_dict
def find_correlated_pairs_among_top_features(
corr_matrix,
top_features,
rho_threshold=0.8,
):
"""
Of the top features, find correlated pairs above some
linear correlation coefficient threshold
Args:
----------------------
corr_matrix : pandas.DataFrame
top_features : list of strings
rho_threshold : float
"""
top_feature_indices = {f: i for i, f in enumerate(top_features)}
sub_corr_matrix = corr_matrix[top_features].loc[top_features]
pairs = []
for feature in top_features:
# try:
most_corr_feature = (
sub_corr_matrix[feature].sort_values(ascending=False).index[1]
)
# except:
# continue
most_corr_value = sub_corr_matrix[feature].sort_values(ascending=False)[1]
if round(most_corr_value, 5) >= rho_threshold:
pairs.append((feature, most_corr_feature))
pairs = list(set([tuple(sorted(t)) for t in pairs]))
pair_indices = [
(top_feature_indices[p[0]], top_feature_indices[p[1]]) for p in pairs
]
return pairs, pair_indices
def all_permuted_score(estimator, X, y, evaluation_fn, n_permute, subsample, random_seed=123, class_index=1):
random_state = np.random.RandomState(random_seed)
inds = random_state.permutation(len(X))
if isinstance(X, pd.DataFrame):
X = X.values
inds_set = compute_bootstrap_indices(X, subsample=1.0, n_bootstrap=n_permute, seed=90)
scores = []
for inds in inds_set:
X_sampled = X[inds, :]
X_permuted = np.array([ X_sampled[inds, i] for i in range(X.shape[1])]).T
if hasattr(estimator, 'predict_proba'):
predictions = estimator.predict_proba(X_permuted)[:]
# For binary classification problems.
if predictions.shape[1] == 2:
predictions = predictions[:,1]
#print (predictions.shape)
elif hasattr(estimator, 'predict'):
predictions = estimator.predict(X_permuted)[:]
else:
raise AttributeError(f'{estimator} does not have .predict or .predict_proba!')
scores.append(evaluation_fn(y, predictions))
return np.array(scores) | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/common/importance_utils.py | 0.885749 | 0.579311 | importance_utils.py | pypi |
import numpy as np
import xarray as xr
import pandas as pd
from collections import ChainMap
from statsmodels.distributions.empirical_distribution import ECDF
from scipy.stats import t
from sklearn.linear_model import Ridge
class MissingFeaturesError(Exception):
""" Raised when features are missing.
E.g., All features are require for
IAS or MEC
"""
def __init__(self, estimator_name, missing_features):
self.message = f"""ALE for {estimator_name} was not computed for all features.
These features were missing: {missing_features}"""
super().__init__(self.message)
def check_all_features_for_ale(ale, estimator_names, features):
""" Is there ALE values for each feature """
data_vars = ale.data_vars
for estimator_name in estimator_names:
_list = [True if f'{f}__{estimator_name}__ale' in data_vars else False for f in features]
if not all(_list):
missing_features = np.array(features)[np.where(~np.array(_list))[0]]
raise MissingFeaturesError(estimator_name, missing_features)
def flatten_nested_list(list_of_lists):
"""Turn a list of list into a single, flatten list"""
all_elements_are_lists = all([is_list(item) for item in list_of_lists])
if not all_elements_are_lists:
new_list_of_lists = []
for item in list_of_lists:
if is_list(item):
new_list_of_lists.append(item)
else:
new_list_of_lists.append([item])
list_of_lists = new_list_of_lists
return [item for elem in list_of_lists for item in elem]
def is_dataset(data):
return isinstance(data, xr.Dataset)
def is_dataframe(data):
return isinstance(data, pd.DataFrame)
def check_is_permuted(X, X_permuted):
permuted_features = []
for f in X.columns:
if not np.array_equal(X.loc[:, f], X_permuted.loc[:, f]):
permuted_features.append(f)
return permuted_features
def is_correlated(corr_matrix, feature_pairs, rho_threshold=0.8):
"""
Returns dict where the key are the feature pairs and the items
are booleans of whether the pair is linearly correlated above the
given threshold.
"""
results = {}
for pair in feature_pairs:
f1, f2 = pair.split("__")
corr = corr_matrix[f1][f2]
results[pair] = round(corr, 3) >= rho_threshold
return results
def is_fitted(estimator):
"""
Checks if a scikit-learn estimator/transformer has already been fit.
Parameters
----------
estimator: scikit-learn estimator (e.g. RandomForestClassifier)
or transformer (e.g. MinMaxScaler) object
Returns
-------
Boolean that indicates if ``estimator`` has already been fit (True) or not (False).
"""
attrs = [v for v in vars(estimator) if v.endswith("_") and not v.startswith("__")]
return len(attrs) != 0
def determine_feature_dtype(X, features):
"""
Determine if any features are categorical.
"""
feature_names = list(X.columns)
non_cat_features = []
cat_features = []
for f in features:
if f not in feature_names:
raise KeyError(f"'{f}' is not a valid feature.")
if str(X.dtypes[f]) == "category":
cat_features.append(f)
else:
non_cat_features.append(f)
return non_cat_features, cat_features
def cartesian(array, out=None):
"""Generate a cartesian product of input array.
Parameters
Codes comes directly from sklearn/utils/extmath.py
----------
array : list of array-like
1-D array to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(array)) containing cartesian products
formed of input array.
X
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
array = [np.asarray(x) for x in array]
shape = (len(x) for x in array)
dtype = array[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(array), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(array):
out[:, n] = array[n][ix[:, n]]
return out
def to_dataframe(results, estimator_names, feature_names):
"""
Convert the feature contribution results to a pandas.DataFrame
with nested indexing.
"""
# results[0] = dict of avg. contributions per estimator
# results[1] = dict of avg. feature values per estimator
contrib_names = feature_names.copy()
contrib_names += ["Bias"]
nested_key = results[0][estimator_names[0]].keys()
dframes = []
for key in nested_key:
data = []
for name in estimator_names:
contribs_dict = results[0][name][key]
vals_dict = results[1][name][key]
data.append(
[contribs_dict[f] for f in contrib_names]
+ [vals_dict[f] for f in feature_names]
)
column_names = [f + "_contrib" for f in contrib_names] + [
f + "_val" for f in feature_names
]
df = pd.DataFrame(data, columns=column_names, index=estimator_names)
dframes.append(df)
result = pd.concat(dframes, keys=list(nested_key))
return result
def to_xarray(data):
"""Converts data dict to xarray.Dataset"""
ds = xr.Dataset(data)
return ds
def is_str(a):
"""Check if argument is a string"""
return isinstance(a, str)
def is_list(a):
"""Check if argument is a list"""
return isinstance(a, list)
def to_list(a):
"""Convert argument to a list"""
return [a]
def is_tuple(a):
"""Check if argument is a tuple"""
return isinstance(a, tuple)
def is_valid_feature(features, official_feature_list):
"""Check if a feature is valid"""
for f in features:
if isinstance(f, tuple):
for sub_f in f:
if sub_f not in official_feature_list:
raise Exception(f"Feature {sub_f} is not a valid feature!")
else:
if f not in official_feature_list:
raise Exception(f"Feature {f} is not a valid feature!")
def is_classifier(estimator):
"""Return True if the given estimator is (probably) a classifier.
Parameters
Function from base.py in sklearn
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Return True if the given estimator is (probably) a regressor.
Parameters
Functions from base.py in sklearn
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
def is_all_dict(alist):
"""Check if every element of a list are dicts"""
return all([isinstance(l, dict) for l in alist])
def compute_bootstrap_indices(X, subsample=1.0, n_bootstrap=1, seed=90):
"""
Routine to generate the indices for bootstrapped X.
Args:
----------------
X : pandas.DataFrame, numpy.array
subsample : float or integer
n_bootstrap : integer
Return:
----------------
bootstrap_indices : list
list of indices of the size of subsample or subsample*len(X)
"""
base_random_state = np.random.RandomState(seed=seed)
random_num_set = base_random_state.choice(10000, size=n_bootstrap, replace=False)
random_states = [np.random.RandomState(s) for s in random_num_set]
n_samples = len(X)
size = int(n_samples * subsample) if subsample <= 1.0 else subsample
bootstrap_indices = [
random_state.choice(range(n_samples), size=size).tolist()
for random_state in random_states
]
return bootstrap_indices
def merge_dict(dicts):
"""Merge a list of dicts into a single dict"""
return dict(ChainMap(*dicts))
def merge_nested_dict(dicts):
"""
Merge a list of nested dicts into a single dict
"""
merged_dict = {}
for d in dicts:
for key in d.keys():
for subkey in d[key].keys():
if key not in list(merged_dict.keys()):
merged_dict[key] = {subkey: {}}
merged_dict[key][subkey] = d[key][subkey]
return merged_dict
def is_outlier(points, thresh=3.5):
"""
Returns a boolean array with True if points are outliers and False
otherwise.
Parameters:
-----------
points : An numobservations by numdimensions array of observations
thresh : The modified z-score to use as a threshold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
Returns:
--------
mask : A numobservations-length boolean array.
References:
----------
Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, Edward F. Mykytka, Ph.D., Editor.
"""
if len(points.shape) == 1:
points = points[:, None]
median = np.median(points, axis=0)
diff = np.sum((points - median) ** 2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
def cmds(D, k=2):
"""Classical multidimensional scaling
Theory and code references:
https://en.wikipedia.org/wiki/Multidimensional_scaling#Classical_multidimensional_scaling
http://www.nervouscomputer.com/hfs/cmdscale-in-python/
Arguments:
D -- A squared matrix-like object (array, DataFrame, ....), usually a distance matrix
"""
n = D.shape[0]
if D.shape[0] != D.shape[1]:
raise Exception("The matrix D should be squared")
if k > (n - 1):
raise Exception("k should be an integer <= D.shape[0] - 1")
# (1) Set up the squared proximity matrix
D_double = np.square(D)
# (2) Apply double centering: using the centering matrix
# centering matrix
center_mat = np.eye(n) - np.ones((n, n)) / n
# apply the centering
B = -(1 / 2) * center_mat.dot(D_double).dot(center_mat)
# (3) Determine the m largest eigenvalues
# (where m is the number of dimensions desired for the output)
# extract the eigenvalues
eigenvals, eigenvecs = np.linalg.eigh(B)
# sort descending
idx = np.argsort(eigenvals)[::-1]
eigenvals = eigenvals[idx]
eigenvecs = eigenvecs[:, idx]
# (4) Now, X=eigenvecs.dot(eigen_sqrt_diag),
# where eigen_sqrt_diag = diag(sqrt(eigenvals))
eigen_sqrt_diag = np.diag(np.sqrt(eigenvals[0:k]))
ret = eigenvecs[:, 0:k].dot(eigen_sqrt_diag)
return ret
def order_groups(X, feature):
"""Assign an order to the values of a categorical feature.
The function returns an order to the unique values in X[feature] according to
their similarity based on the other features.
The distance between two categories is the sum over the distances of each feature.
Arguments:
X -- A pandas DataFrame containing all the features to considering in the ordering
(including the categorical feature to be ordered).
feature -- String, the name of the column holding the categorical feature to be ordered.
"""
features = X.columns
# groups = X[feature].cat.categories.values
groups = X[feature].unique()
D_cumu = pd.DataFrame(0, index=groups, columns=groups)
K = len(groups)
for j in set(features) - set([feature]):
D = pd.DataFrame(index=groups, columns=groups)
# discrete/factor feature j
# e.g. j = 'color'
if (X[j].dtypes.name == "category") | (
(len(X[j].unique()) <= 10) & ("float" not in X[j].dtypes.name)
):
# counts and proportions of each value in j in each group in 'feature'
cross_counts = pd.crosstab(X[feature], X[j])
cross_props = cross_counts.div(np.sum(cross_counts, axis=1), axis=0)
for i in range(K):
group = groups[i]
D_values = abs(cross_props - cross_props.loc[group]).sum(axis=1) / 2
D.loc[group, :] = D_values
D.loc[:, group] = D_values
else:
# continuous feature j
# e.g. j = 'length'
# extract the 1/100 quantiles of the feature j
seq = np.arange(0, 1, 1 / 100)
q_X_j = X[j].quantile(seq).to_list()
# get the ecdf (empiricial cumulative distribution function)
# compute the function from the data points in each group
X_ecdf = X.groupby(feature)[j].agg(ECDF)
# apply each of the functions on the quantiles
# i.e. for each quantile value get the probability that j will take
# a value less than or equal to this value.
q_ecdf = X_ecdf.apply(lambda x: x(q_X_j))
for i in range(K):
group = groups[i]
D_values = q_ecdf.apply(lambda x: max(abs(x - q_ecdf[group])))
D.loc[group, :] = D_values
D.loc[:, group] = D_values
D_cumu = D_cumu + D
# To avoid numpy.core._exceptions._UFuncInputCastingError, convert to dtype float32
D_cumu = D_cumu.astype(float)
# reduce the dimension of the cumulative distance matrix to 1
D1D = cmds(D_cumu, 1).flatten()
# order groups based on the values
order_idx = D1D.argsort()
groups_ordered = D_cumu.index[D1D.argsort()]
return pd.Series(range(K), index=groups_ordered)
def quantile_ied(x_vec, q):
"""
Inverse of empirical distribution function (quantile R type 1).
More details in
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.mquantiles.html
https://stat.ethz.ch/R-manual/R-devel/library/stats/html/quantile.html
https://en.wikipedia.org/wiki/Quantile
Arguments:
x_vec -- A pandas series containing the values to compute the quantile for
q -- An array of probabilities (values between 0 and 1)
"""
x_vec = x_vec.sort_values()
n = len(x_vec) - 1
m = 0
j = (n * q + m).astype(int) # location of the value
g = n * q + m - j
gamma = (g != 0).astype(int)
quant_res = (1 - gamma) * x_vec.shift(1, fill_value=0).iloc[j] + gamma * x_vec.iloc[
j
]
quant_res.index = q
# add min at quantile zero and max at quantile one (if needed)
if 0 in q:
quant_res.loc[0] = x_vec.min()
if 1 in q:
quant_res.loc[1] = x_vec.max()
return quant_res
def CI_estimate(x_vec, C=0.95):
"""Estimate the size of the confidence interval of a data sample.
The confidence interval of the given data sample (x_vec) is
[mean(x_vec) - returned value, mean(x_vec) + returned value].
"""
alpha = 1 - C
n = len(x_vec)
stand_err = x_vec.std() / np.sqrt(n)
critical_val = 1 - (alpha / 2)
z_star = stand_err * t.ppf(critical_val, n - 1)
return z_star
dict_disc_to_bin = {
'quartile': [25, 50, 75],
'quintile': [20, 40, 60, 80],
'decile': [10, 20, 30, 40, 50, 60, 70, 80, 90]
}
def ridge_solve(tup):
data_synthetic_onehot, model_pred, weights = tup
solver = Ridge(alpha=1, fit_intercept=True)
solver.fit(data_synthetic_onehot,
model_pred,
sample_weight=weights.ravel())
# Get explanations
importance = solver.coef_[
data_synthetic_onehot[0].toarray().ravel() == 1].ravel()
bias = solver.intercept_
return importance, bias
def kernel_fn(distances, kernel_width):
return np.sqrt(np.exp(-(distances ** 2) / kernel_width ** 2))
def discretize(X, percentiles=[25, 50, 75], all_bins=None):
if all_bins is None:
all_bins = np.percentile(X, percentiles, axis=0).T
return (np.array([np.digitize(a, bins)
for (a, bins) in zip(X.T, all_bins)]).T, all_bins) | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/common/utils.py | 0.792304 | 0.412767 | utils.py | pypi |
import numpy as np
import time
import _pickle as cPickle
from sklearn.metrics.scorer import _BaseScorer
class TimeScorer(_BaseScorer):
def _score(self, method_caller, estimator, X, y_true=None, n_iter=1, unit=True, scoring=None, tradeoff=None, sample_weight=None):
"""
Evaluate prediction latency.
Parameters
----------
method_caller : callable
Returns predictions given an estimator, method name, and other
arguments, potentially caching results.
estimator : object
Trained estimator to use for scoring.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like, default None
Gold standard target values for X. Not necessary
for _TimeScorer.
n_iter : int, default 1
Number of timing runs.
unit : bool, default True
Use per-unit latency or total latency.
scoring: scorer object, default None
Scorer used for trade-off.
tradeoff: float, default None
Multiplier for tradeoff.
Returns
-------
score : float
Custom score combining scoring method (optional)
and estimator prediction latency (ms).
"""
# overwrite kwargs from _kwargs
if "n_iter" in self._kwargs.keys():
n_iter = self._kwargs["n_iter"]
if "unit" in self._kwargs.keys():
unit = self._kwargs["unit"]
if "scoring" in self._kwargs.keys():
scoring = self._kwargs["scoring"]
if "tradeoff" in self._kwargs.keys():
tradeoff = self._kwargs["tradeoff"]
# run timing iterations
count = 0
time_sum = 0
while count < n_iter:
count += 1
if unit:
time_sum += np.sum([
self._elapsed(estimator, [x])
for x in X])
else:
time_sum += self._elapsed(estimator, X)
unit_time = 1000 * float((time_sum / float(n_iter)) / float(len(X)))
if scoring and tradeoff:
scoring_score = scoring(estimator, X, y_true)
return scoring_score - (tradeoff * unit_time)
else:
return 1. / unit_time
def _elapsed(self, estimator, X):
"""
Return elapsed time for predict method of estimator
on X.
"""
start_time = time.time()
y_pred = estimator.predict(X)
end_time = time.time()
return end_time - start_time
class MemoryScorer(_BaseScorer):
def _score(self, method_caller, estimator, X=None, y_true=None, scoring=None, tradeoff=None, sample_weight=None):
"""
Score using estimated memory of pickled estimator object.
Parameters
----------
method_caller : callable
Returns predictions given an estimator, method name, and other
arguments, potentially caching results.
estimator : object
Trained estimator to use for scoring.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
Not necessary for _MemoryScorer.
y_true : array-like, default None
Gold standard target values for X. Not necessary
for _MemoryScorer.
scoring: scorer object, default None
Scorer used for trade-off.
tradeoff: float, default None
Multiplier for tradeoff.
Returns
-------
score : float
Custom score combining scoring method (optional)
and estimator memory (MB).
"""
# overwrite kwargs from _kwargs
if "scoring" in self._kwargs.keys():
scoring = self._kwargs["scoring"]
if "tradeoff" in self._kwargs.keys():
tradeoff = self._kwargs["tradeoff"]
obj_size = (0.000001 * float(len(cPickle.dumps(estimator))))
if scoring and tradeoff:
scoring_score = scoring(estimator, X, y_true)
return scoring_score - (tradeoff * obj_size)
else:
return 1. / obj_size
class CombinedScorer(_BaseScorer):
def _score(self, method_caller, estimator, X=None, y_true=None, scoring=None, sample_weight=None):
"""
Combine multiple scorers using the average of their scores.
Parameters
----------
method_caller : callable
Returns predictions given an estimator, method name, and other
arguments, potentially caching results.
estimator : object
Trained estimator to use for scoring.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
Not necessary for _MemoryScorer.
y_true : array-like, default None
Gold standard target values for X. Not necessary
for _MemoryScorer.
scoring: list of scorer objects, default None
List of scorers to average.
Returns
-------
score : float
Custom score combining input scoring methods
using the mean score..
"""
# overwrite kwargs from _kwargs
if "scoring" in self._kwargs.keys():
scoring = self._kwargs["scoring"]
if (not isinstance(scoring, list)) and (not isinstance(scoring, tuple)):
scoring = [scoring]
return np.mean([x(estimator, X, y_true) for x in scoring])
def cluster_distribution_score(X, labels):
"""
Scoring function which scores the resulting cluster distribution accross classes.
A more even distribution indicates a higher score.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Cluster Distribution score.
"""
n_clusters = float(len(np.unique(labels)))
max_count = float(np.max(np.bincount(labels)))
return 1.0 / ((max_count / len(labels)) / (1.0 / n_clusters)) | /scikit-ext-0.1.16.tar.gz/scikit-ext-0.1.16/scikit_ext/scorers.py | 0.910438 | 0.401219 | scorers.py | pypi |
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from scipy.stats import rankdata
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import normalize
from sklearn.base import (
BaseEstimator, ClassifierMixin,
is_classifier, clone)
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.model_selection._split import check_cv
from sklearn.model_selection._search import BaseSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.exceptions import NotFittedError
from sklearn.metrics import calinski_harabasz_score
from sklearn.pipeline import Pipeline
class ZoomGridSearchCV(GridSearchCV):
"""
Fits multiple ``GridSearchCV`` models, updating
the ``param_grid`` after each iteration. The update
looks at successful parameter values for each
grid key. A new list of values is created which
expands the resolution of the search values centered
around the best performing value of the previous fit.
This allows the standard grid search process to start
with a small number of distant values for each parameter,
and zoom in as the better performing corner of the
hyperparameter search space becomes clear.
The process only updates paramter keys whose values
are all of type ``int`` or are all of type ``float``. Any
other data type valued parameters or mixed parameters will
simply be copied and reused for each iteration. The only
stopping criteria for iterations is the ``n_iter`` parameter.
Inherits ``GridSearchCV`` so all methods
and attributes are identical except for ``fit``
which is overriden by a method looping through
the ``fit`` method of ``GridSearchCV``. Ultimately,
the class exactly resembles a fitted ``GridSearchCV``
after ``fit`` is run. Running ``fit`` with
``n_iter = 0`` is identical to funning ``fit``
with ``GridSearchCV``.
"""
def __init__(self, estimator, param_grid,
n_iter=1, **kwargs):
GridSearchCV.__init__(
self, estimator, param_grid, **kwargs)
self._fit = GridSearchCV.fit
self.n_iter=n_iter
def fit(self, X, y=None, groups=None, **fit_params):
"""
Run fit with all sets of parameters. For ``n_iter``
iterations, zoom in on successful parameters, creating
a new parameter grid and refitting.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
n = -1
while n < self.n_iter:
if n > -1:
self._update_grid()
if self.verbose > 0:
print("Grid Updated on Iteration {0}:".format(n))
print(self.param_grid)
else:
if self.verbose > 0:
print("Initial Grid:")
print(self.param_grid)
GridSearchCV.fit(self, X, y=y, groups=groups, **fit_params)
n += 1
def _update_grid(self):
""" Update parameter grid based on previous fit results """
results = pd.DataFrame(self.cv_results_)
# get parameters to update
update_params = {}
for key, value in self.param_grid.items():
if all(isinstance(x, int) for x in value):
updated_value = self._update_elements(
results, key, value, dtype=int)
elif all(isinstance(x, float) for x in value):
updated_value = self._update_elements(
results, key, value, dtype=float)
else:
updated_value = value
if len(updated_value) > 0:
update_params[key] = updated_value
# update parameter grid attribute
self.param_grid = update_params
def _update_elements(self, results, key, value, dtype=int):
""" Update elements of a single param_grid key, value pair """
tmp = (results.loc[~pd.isnull(results["param_{0}".format(key)]),
["param_{0}".format(key), "rank_test_score"]]
.sort_values("rank_test_score"))
best_val = tmp["param_{0}".format(key)].values[0]
value_range = (np.max(value) - np.min(value)) / 5.0
val = (
list(
np.linspace(best_val, best_val + value_range,
int(round(len(value) / 2.0)))) +
list(
np.linspace(best_val - value_range, best_val,
int(round(len(value) / 2.0)))))
val = list(np.unique([dtype(x) for x in val]))
if all(x >= 0 for x in value):
val = [x for x in val if x >= 0]
elif all(x <= 0 for x in value):
val = [x for x in val if x <= 0]
return val
class PrunedPipeline(Pipeline):
"""
A standard sklearn feature Pipeline with additional
pruning method. After fitting, the pruning method is
applied to the fitted pipeline. This applies the
feature selection directly to the fitted vocabulary
(and idf values if applicable), removing all elements of
these attributes that will not ultimately survive the
feature selection filter.
The ``PrunedPipeline`` will make idential predictions
as a similarly trained ``Pipeline``. However, it will require
less memory and will make faster predictions.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
memory : None, str or object with the joblib.Memory interface, optional
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
vectorizer_name : str, default vec
Name of ``Pipeline`` step which performs feature extraction. Any
transformer with a ``vocabulary_``dictionary can be the step
with this name.
Ideal transformers are of types
sklearn.feature_extraction.text.CountVectorizer or
sklearn.feature_extraction.text.TfidfVectorizer.
selector_name : str, default select
Name of ``Pipeline`` step which performs feature selection. Any
transformer with a ``get_support`` method returning an iterable
of booleans with length ``len(vocabulary_)`` can be the step with this name.
Ideal transformers are of type sklearn.feature_selection.univariate_selection._BaseFilter.
Attributes
----------
named_steps : bunch object, a dictionary with attribute access
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
"""
def __init__(self, steps, memory=None,
vectorizer_name="vec",
selector_name="select",
verbose=False):
self.steps = steps
self.memory = memory
self.verbose = verbose
self.vectorizer_name = vectorizer_name
self.selector_name = selector_name
self._validate_steps()
self._validate_prune()
def fit(self, X, y=None, **fit_params):
"""
Fit the model
Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Perform prune after standard pipeline fit.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
self : PrunedPipeline
This estimator
"""
# standard Pipeline fit method
self._validate_prune()
Xt, fit_params = self._fit(X, y, **fit_params)
if self._final_estimator is not None:
self._final_estimator.fit(Xt, y, **fit_params)
# prune pipeline
if self.selector_name and self.vectorizer_name:
self._prune()
return self
def _validate_prune(self):
""" Validate prune step inputs """
names, estimators = zip(*self.steps)
for name in [self.selector_name, self.vectorizer_name]:
if name:
if not name in names:
raise ValueError(
"Name {0} should exist in steps".format(
name))
self.selector_index = names.index(self.selector_name)
self.vectorizer_index = names.index(self.vectorizer_name)
def _prune(self):
"""
Prune fitted ``Pipeline`` object. The pruner runs the
``get_support`` method from the designated feature
selector, returning the selector mask. Then the ``vocabulary_``
(and optional ``idf_`` if exists) attribute is pruned
to only contain elements who survive the selector mask. The
selector step is then removed from the pipeline.
Transform methods on the pipeline will then reflect these
changes, reducing the size of the vectorizer and effectively
skipping the selector step.
"""
# collect pipeline step data
voc = self.steps[self.vectorizer_index][1].vocabulary_
if hasattr(self.steps[self.vectorizer_index][1], "idf_"):
idf = self.steps[self.vectorizer_index][1].idf_
else:
idf = None
support = self.steps[self.selector_index][1].get_support()
# restructure vocabulary
terms = []
indices = []
for key, value in voc.items():
terms.append(key)
indices.append(value)
sort_mask = np.argsort(indices)
terms = np.array(terms)[sort_mask]
# rebuild vocabulary dictionary
new_vocab = {}
new_idf = []
count = 0
for index in range(len(terms)):
if support[index]:
new_vocab[terms[index]] = count
if idf is not None:
new_idf.append(idf[index])
count += 1
# replace vocabulary
self.steps[self.vectorizer_index][1].vocabulary_ = new_vocab
if idf is not None:
self.steps[self.vectorizer_index][1]._tfidf._idf_diag = csr_matrix(np.diag(new_idf))
removed_step = self.steps.pop(self.selector_index)
class MultiGridSearchCV(BaseSearchCV):
"""
An iterator through multiple GridSearchCV
models using various ``estimators`` and associated ``param_grids``.
Providing two equal length iterables as required arguments
containing estimators and paraeter grids, as well as keyword arguments for
GridSearchCV, will then simply iterate through and fit multiple
GridSearchCV models, fitting them sequentially.
Then the maximum ``best_score_`` is compared accross the
GridSearchCV models, and the best one is identified. The best
estimator is set as an attribute, ``best_estimator_`` and
the best GridSearchCV model is set as an attribute,
``best_grid_search_cv_``.
"""
def __init__(self, estimators, param_grids, gs_estimator=GridSearchCV, **kwargs):
self.estimators=estimators
self.param_grids=param_grids
self.gs_estimator=gs_estimator
self.gs_kwargs=kwargs
BaseSearchCV.__init__(
self, None, **kwargs)
def fit(self, X, y=None):
"""
Iterate through estimators and param_grids, fitting
each, and then chosing the best.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
# Iterate through estimators fitting each
models = []
for index in range(len(self.estimators)):
model = self.gs_estimator(
self.estimators[index],
self.param_grids[index],
**self.gs_kwargs)
model.fit(X, y)
models.append(model)
# Generate cross validation results
cv_df = pd.DataFrame()
for index in range(len(models)):
tmpDf = pd.DataFrame(models[index].cv_results_)
tmpDf["grid_search_index"] = index
cv_df = cv_df.append(tmpDf, sort=True)
cv_df.index = range(len(cv_df))
cv_df = cv_df[[c for c in cv_df.columns if "param_" not in c]]
cv_df["rank_test_score"] = map(int,
(len(cv_df) + 1) -
rankdata(cv_df["mean_test_score"], method="ordinal"))
self.cv_results_ = {}
for col in cv_df.columns:
self.cv_results_[col] = list(cv_df[col].values)
# Find best model and set associated attributes
self.scores_ = [x.best_score_ for x in models]
self.best_index_ = np.argmax(self.scores_)
self.best_score_ = models[self.best_index_].best_score_
self.best_grid_search_cv_ = models[self.best_index_]
self.best_estimator_ = models[self.best_index_].best_estimator_
self.scorer_ = self.best_grid_search_cv_.scorer_
self.multimetric_ = self.best_grid_search_cv_.multimetric_
self.n_splits_ = self.best_grid_search_cv_.n_splits_
return self
class IterRandomEstimator(BaseEstimator, ClassifierMixin):
"""
Meta-Estimator intended primarily for unsupervised
estimators whose fitted model can be heavily dependent
on an arbitrary random initialization state. It is
best used for problems where a ``fit_predict`` method
is intended, so the only data used for prediction will be
the same data on which the model was fitted.
The ``fit`` method will fit multiple iterations of the same
base estimator, varying the ``random_state`` argument
for each iteration. The iterations will stop either
when ``max_iter`` is reached, or when the target
score is obtained.
The model does not use cross validation to find the best
estimator. It simply fits and scores on the entire input
data set. A hyperparaeter is not being optimized here,
only random initialization states. The idea is to find
the best fitted model, and keep that exact model, rather
than to find the best hyperparameter set.
"""
def __init__(self, estimator, target_score=None,
max_iter=10, random_state=None,
scoring=calinski_harabasz_score,
fit_params=None, verbose=0):
self.estimator=estimator
self.target_score=target_score
self.max_iter=max_iter
self.random_state=random_state
if not self.random_state:
self.random_state = np.random.randint(100)
self.fit_params=fit_params
self.verbose=verbose
self.scoring=scoring
def fit(self, X, y=None, **fit_params):
"""
Run fit on the estimator attribute multiple times
with various ``random_state`` arguments and choose
the fitted estimator with the best score.
Uses ``calinski_harabasz_score`` if no scoring is provided.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
estimator.verbose = self.verbose
if self.verbose > 0:
if not self.target_score:
print("Fitting {0} estimators unless a target "
"score of {1} is reached".format(
self.max_iter, self.target_score))
else:
print("Fitting {0} estimators".format(
self.max_iter))
count = 0
scores = []
estimators = []
states = []
random_state = self.random_state
if not random_state:
random_state = n
while count < self.max_iter:
estimator = clone(estimator)
if random_state:
random_state = random_state + 1
estimator.random_state = random_state
estimator.fit(X, y, **fit_params)
labels = estimator.labels_
score = self.scoring(X, labels)
scores.append(score)
estimators.append(estimator)
states.append(random_state)
if self.target_score is not None and score > self.target_score:
break
count += 1
self.best_estimator_ = estimators[np.argmax(scores)]
self.best_score_ = np.max(scores)
self.best_index_ = np.argmax(scores)
self.best_params_ = self.best_estimator_.get_params()
self.scores_ = scores
self.random_states_ = states
class OptimizedEnsemble(BaseSearchCV):
"""
An optimized ensemble class. Will find the optimal ``n_estimators``
parameter for the given ensemble estimator, according to the
specified input parameters.
The ``fit`` method will iterate through n_estimators options,
starting with n_estimators_init, and using the step_function
reursively from there. Stop at max_iter or when the score
gain between iterations is less than threshold.
The OptimizedEnsemble class can then itself be used
as an Estimator, or the ``best_estimator_`` attribute
can be accessed directly, which is a fitted version of the input
estimator with the optimal parameters.
"""
def __init__(self, estimator, n_estimators_init=5,
threshold=0.01, max_iter=10,
step_function=lambda x: x*2,
**kwargs):
self.n_estimators_init=n_estimators_init
self.threshold=threshold
self.step_function=step_function
self.max_iter=max_iter
BaseSearchCV.__init__(
self, estimator, **kwargs)
def fit(self, X, y, **fit_params):
"""
Find the optimal ``n_estimators`` parameter using a custom
optimization routine.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
n_splits = cv.get_n_splits(X, y, groups=None)
if self.verbose > 0:
print("Fitting {0} folds for each n_estimators candidate, "
"for a maximum of {1} candidates, totalling"
" a maximum of {2} fits".format(n_splits,
self.max_iter, self.max_iter * n_splits))
count = 0
scores = []
n_estimators = []
n_est = self.n_estimators_init
while count < self.max_iter:
estimator = clone(estimator)
estimator.n_estimators = n_est
score = np.mean(cross_val_score(
estimator, X, y, cv=self.cv,
scoring=self.scoring,
fit_params=fit_params,
verbose=self.verbose,
n_jobs=self.n_jobs,
pre_dispatch=self.pre_dispatch))
scores.append(score)
n_estimators.append(n_est)
if (count > 0 and
(scores[count] - scores[count - 1]) < self.threshold):
break
else:
best_estimator = estimator
count += 1
n_est = self.step_function(n_est)
self.scores_ = scores
self.n_estimators_list_ = n_estimators
if self.refit:
self.best_estimator_ = clone(best_estimator)
if y is not None:
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
self.best_index_ = count - 1
self.best_score_ = self.scores_[count - 1]
self.best_n_estimators_ = self.n_estimators_list_[count - 1]
self.best_params_ = self.best_estimator_.get_params()
return self
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def score(self, X, y=None):
"""
Call score on the estimator with the best found parameters.
Only available if the underlying estimator supports ``score``.
This uses the score defined by the ``best_estimator_.score`` method.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
"""
self._check_is_fitted('score')
return self.best_estimator_.score(X, y)
class OneVsRestAdjClassifier(OneVsRestClassifier):
"""
One-vs-the-rest (OvR) multiclass strategy
Also known as one-vs-all, this strategy consists in fitting one classifier per class.
For each classifier, the class is fitted against all the other classes.
In addition to its computational efficiency (only n_classes classifiers are needed),
one advantage of this approach is its interpretability.
Since each class is represented by one and one classifier only, it is possible to gain
knowledge about the class by inspecting its corresponding classifier.
This is the most commonly used strategy for multiclass classification and is a fair default choice.
The adjusted version is a custom extension which overwrites the inherited predict_proba() method with
a more flexible method allowing custom normalization for the predicted probabilities. Any norm
argument that can be passed directly to sklearn.preprocessing.normalize is allowed. Additionally,
norm=None will skip the normalization step alltogeter. To mimick the inherited OneVsRestClassfier
behavior, set norm='l2'. All other methods are inherited from OneVsRestClassifier.
Parameters
----------
estimator : estimator object
An estimator object implementing fit and one of decision_function or predict_proba.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful for debugging.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used.
norm: str, optional, default: None
Normalization method to be passed straight into sklearn.preprocessing.normalize as the norm
input. A value of None (default) will skip the normalization step.
Attributes
----------
estimators_ : list of n_classes estimators
Estimators used for predictions.
classes_ : array, shape = [n_classes]
Class labels.
label_binarizer_ : LabelBinarizer object
Object used to transform multiclass labels to binary labels and vice-versa.
multilabel_ : boolean
Whether a OneVsRestClassifier is a multilabel classifier.
"""
def __init__(self, estimator, norm=None, **kwargs):
OneVsRestClassifier.__init__(
self, estimator, **kwargs)
self.norm = norm
def predict_proba(self, X):
"""
Probability estimates.
The returned estimates for all classes are ordered by label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in self.classes_.
"""
probs = []
for index in range(len(self.estimators_)):
probs.append(self.estimators_[index].predict_proba(X)[:,1])
out = np.array([
[probs[y][index] for y in range(len(self.estimators_))]
for index in range(len(probs[0]))])
if self.norm:
return normalize(out, norm=self.norm)
else:
return out | /scikit-ext-0.1.16.tar.gz/scikit-ext-0.1.16/scikit_ext/estimators.py | 0.886131 | 0.543833 | estimators.py | pypi |
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
from skfair.common import as_list
def scalar_projection(vec, unto):
return vec.dot(unto) / unto.dot(unto)
def vector_projection(vec, unto):
return scalar_projection(vec, unto) * unto
class InformationFilter(BaseEstimator, TransformerMixin):
"""
The `InformationFilter` uses a variant of the gram smidt process
to filter information out of the dataset. This can be useful if you
want to filter information out of a dataset because of fairness.
To explain how it works: given a training matrix :math:`X` that contains
columns :math:`x_1, ..., x_k`. If we assume columns :math:`x_1` and :math:`x_2`
to be the sensitive columns then the information-filter will
remove information by applying these transformations;
.. math::
\\begin{split}
v_1 & = x_1 \\\\
v_2 & = x_2 - \\frac{x_2 v_1}{v_1 v_1}\\\\
v_3 & = x_3 - \\frac{x_k v_1}{v_1 v_1} - \\frac{x_2 v_2}{v_2 v_2}\\\\
... \\\\
v_k & = x_k - \\frac{x_k v_1}{v_1 v_1} - \\frac{x_2 v_2}{v_2 v_2}
\\end{split}
Concatenating our vectors (but removing the sensitive ones) gives us
a new training matrix :math:`X_{fair} = [v_3, ..., v_k]`.
:param columns: the columns to filter out this can be a sequence of either int
(in the case of numpy) or string (in the case of pandas).
:param alpha: parameter to control how much to filter, for alpha=1 we filter out
all information while for alpha=0 we don't apply any.
"""
def __init__(self, columns, alpha=1):
self.columns = columns
self.alpha = alpha
def _check_coltype(self, X):
for col in as_list(self.columns):
if isinstance(col, str):
if isinstance(X, np.ndarray):
raise ValueError(
f"column {col} is a string but datatype receive is numpy."
)
if isinstance(X, pd.DataFrame):
if col not in X.columns:
raise ValueError(f"column {col} is not in {X.columns}")
if isinstance(col, int):
if col not in range(np.atleast_2d(np.array(X)).shape[1]):
raise ValueError(
f"column {col} is out of bounds for input shape {X.shape}"
)
def _col_idx(self, X, name):
if isinstance(name, str):
if isinstance(X, np.ndarray):
raise ValueError(
"You cannot have a column of type string on a numpy input matrix."
)
return {name: i for i, name in enumerate(X.columns)}[name]
return name
def _make_v_vectors(self, X, col_ids):
vs = np.zeros((X.shape[0], len(col_ids)))
for i, c in enumerate(col_ids):
vs[:, i] = X[:, col_ids[i]]
for j in range(0, i):
vs[:, i] = vs[:, i] - vector_projection(vs[:, i], vs[:, j])
return vs
def fit(self, X, y=None):
"""Learn the projection required to make the dataset orthogonal to sensitive columns."""
self._check_coltype(X)
self.col_ids_ = [
v if isinstance(v, int) else self._col_idx(X, v)
for v in as_list(self.columns)
]
X = check_array(X, estimator=self)
X_fair = X.copy()
v_vectors = self._make_v_vectors(X, self.col_ids_)
# gram smidt process but only on sensitive attributes
for i, col in enumerate(X_fair.T):
for v in v_vectors.T:
X_fair[:, i] = X_fair[:, i] - vector_projection(X_fair[:, i], v)
# we want to learn matrix P: X P = X_fair
# this means we first need to create X_fair in order to learn P
self.projection_, resid, rank, s = np.linalg.lstsq(X, X_fair, rcond=None)
return self
def transform(self, X):
"""Transforms X by applying the information filter."""
check_is_fitted(self, ["projection_", "col_ids_"])
self._check_coltype(X)
X = check_array(X, estimator=self)
# apply the projection and remove the column we won't need
X_fair = X @ self.projection_
X_removed = np.delete(X_fair, self.col_ids_, axis=1)
X_orig = np.delete(X, self.col_ids_, axis=1)
return self.alpha * np.atleast_2d(X_removed) + (1 - self.alpha) * np.atleast_2d(
X_orig
) | /scikit-fairness-0.0.1.tar.gz/scikit-fairness-0.0.1/skfair/preprocessing/informationfilter.py | 0.883104 | 0.645888 | informationfilter.py | pypi |
.. image:: https://raw.githubusercontent.com/GAA-UAM/scikit-fda/develop/docs/logos/title_logo/title_logo.png
:alt: scikit-fda: Functional Data Analysis in Python
scikit-fda: Functional Data Analysis in Python
===================================================
|python|_ |build-status| |docs| |Codecov|_ |PyPIBadge|_ |license|_ |doi|
Functional Data Analysis, or FDA, is the field of Statistics that analyses
data that depend on a continuous parameter.
This package offers classes, methods and functions to give support to FDA
in Python. Includes a wide range of utils to work with functional data, and its
representation, exploratory analysis, or preprocessing, among other tasks
such as inference, classification, regression or clustering of functional data.
See documentation for further information on the features included in the
package.
Documentation
=============
The documentation is available at
`fda.readthedocs.io/en/stable/ <https://fda.readthedocs.io/en/stable/>`_, which
includes detailed information of the different modules, classes and methods of
the package, along with several examples showing different functionalities.
The documentation of the latest version, corresponding with the develop
version of the package, can be found at
`fda.readthedocs.io/en/latest/ <https://fda.readthedocs.io/en/latest/>`_.
Installation
============
Currently, *scikit-fda* is available in Python 3.6 and 3.7, regardless of the
platform.
The stable version can be installed via PyPI_:
.. code::
pip install scikit-fda
Installation from source
------------------------
It is possible to install the latest version of the package, available in the
develop branch, by cloning this repository and doing a manual installation.
.. code:: bash
git clone https://github.com/GAA-UAM/scikit-fda.git
pip install ./scikit-fda
Make sure that your default Python version is currently supported, or change
the python and pip commands by specifying a version, such as ``python3.6``:
.. code:: bash
git clone https://github.com/GAA-UAM/scikit-fda.git
python3.6 -m pip install ./scikit-fda
Requirements
------------
*scikit-fda* depends on the following packages:
* `cython <https://github.com/cython/cython>`_ - Python to C compiler
* `fdasrsf <https://github.com/jdtuck/fdasrsf_python>`_ - SRSF framework
* `findiff <https://github.com/maroba/findiff>`_ - Finite differences
* `matplotlib <https://github.com/matplotlib/matplotlib>`_ - Plotting with Python
* `multimethod <https://github.com/coady/multimethod>`_ - Multiple dispatch
* `numpy <https://github.com/numpy/numpy>`_ - The fundamental package for scientific computing with Python
* `pandas <https://github.com/pandas-dev/pandas>`_ - Powerful Python data analysis toolkit
* `rdata <https://github.com/vnmabus/rdata>`_ - Reader of R datasets in .rda format in Python
* `scikit-datasets <https://github.com/daviddiazvico/scikit-datasets>`_ - Scikit-learn compatible datasets
* `scikit-learn <https://github.com/scikit-learn/scikit-learn>`_ - Machine learning in Python
* `scipy <https://github.com/scipy/scipy>`_ - Scientific computation in Python
* `setuptools <https://github.com/pypa/setuptools>`_ - Python Packaging
The dependencies are automatically installed.
Contributions
=============
All contributions are welcome. You can help this project grow in multiple ways,
from creating an issue, reporting an improvement or a bug, to doing a
repository fork and creating a pull request to the development branch.
The people involved at some point in the development of the package can be
found in the `contributors
file <https://github.com/GAA-UAM/scikit-fda/blob/develop/THANKS.txt>`_.
.. Citation
========
If you find this project useful, please cite:
.. todo:: Include citation to scikit-fda paper.
License
=======
The package is licensed under the BSD 3-Clause License. A copy of the
license_ can be found along with the code.
.. _examples: https://fda.readthedocs.io/en/latest/auto_examples/index.html
.. _PyPI: https://pypi.org/project/scikit-fda/
.. |python| image:: https://img.shields.io/pypi/pyversions/scikit-fda.svg
.. _python: https://badge.fury.io/py/scikit-fda
.. |build-status| image:: https://travis-ci.org/GAA-UAM/scikit-fda.svg?branch=develop
:alt: build status
:scale: 100%
:target: https://travis-ci.com/GAA-UAM/scikit-fda
.. |docs| image:: https://readthedocs.org/projects/fda/badge/?version=latest
:alt: Documentation Status
:scale: 100%
:target: http://fda.readthedocs.io/en/latest/?badge=latest
.. |Codecov| image:: https://codecov.io/gh/GAA-UAM/scikit-fda/branch/develop/graph/badge.svg
.. _Codecov: https://codecov.io/github/GAA-UAM/scikit-fda?branch=develop
.. |PyPIBadge| image:: https://badge.fury.io/py/scikit-fda.svg
.. _PyPIBadge: https://badge.fury.io/py/scikit-fda
.. |license| image:: https://img.shields.io/badge/License-BSD%203--Clause-blue.svg
.. _license: https://github.com/GAA-UAM/scikit-fda/blob/master/LICENSE.txt
.. |doi| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3468127.svg
:target: https://doi.org/10.5281/zenodo.3468127
| /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/README.rst | 0.904158 | 0.76947 | README.rst | pypi |
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Generic, TypeVar, overload
import sklearn.base
if TYPE_CHECKING:
from ..typing._numpy import NDArrayFloat, NDArrayInt
SelfType = TypeVar("SelfType")
TransformerNoTarget = TypeVar(
"TransformerNoTarget",
bound="TransformerMixin[Any, Any, None]",
)
Input = TypeVar("Input", contravariant=True)
Output = TypeVar("Output", covariant=True)
Target = TypeVar("Target", contravariant=True)
TargetPrediction = TypeVar("TargetPrediction")
class BaseEstimator( # noqa: D101
ABC,
sklearn.base.BaseEstimator, # type: ignore[misc]
):
pass # noqa: WPS604
class TransformerMixin( # noqa: D101
ABC,
Generic[Input, Output, Target],
sklearn.base.TransformerMixin, # type: ignore[misc]
):
@overload
def fit(
self: TransformerNoTarget,
X: Input,
) -> TransformerNoTarget:
pass
@overload
def fit(
self: SelfType,
X: Input,
y: Target,
) -> SelfType:
pass
def fit( # noqa: D102
self: SelfType,
X: Input,
y: Target | None = None,
) -> SelfType:
return self
@overload
def fit_transform(
self: TransformerNoTarget,
X: Input,
) -> Output:
pass
@overload
def fit_transform(
self,
X: Input,
y: Target,
) -> Output:
pass
def fit_transform( # noqa: D102
self,
X: Input,
y: Target | None = None,
**fit_params: Any,
) -> Output:
if y is None:
return self.fit( # type: ignore[no-any-return]
X,
**fit_params,
).transform(X)
return self.fit( # type: ignore[no-any-return]
X,
y,
**fit_params,
).transform(X)
class InductiveTransformerMixin( # noqa: D101
TransformerMixin[Input, Output, Target],
):
@abstractmethod
def transform( # noqa: D102
self: SelfType,
X: Input,
) -> Output:
pass
class OutlierMixin( # noqa: D101
ABC,
Generic[Input],
sklearn.base.OutlierMixin, # type: ignore[misc]
):
def fit_predict( # noqa: D102
self,
X: Input,
y: object = None,
) -> NDArrayInt:
return self.fit(X, y).predict(X) # type: ignore[no-any-return]
class ClassifierMixin( # noqa: D101
ABC,
Generic[Input, TargetPrediction],
sklearn.base.ClassifierMixin, # type: ignore[misc]
):
def fit( # noqa: D102
self: SelfType,
X: Input,
y: TargetPrediction,
) -> SelfType:
return self
@abstractmethod
def predict( # noqa: D102
self: SelfType,
X: Input,
) -> TargetPrediction:
pass
def score( # noqa: D102
self,
X: Input,
y: Target,
sample_weight: NDArrayFloat | None = None,
) -> float:
return super().score( # type: ignore[no-any-return]
X,
y,
sample_weight=sample_weight,
)
class ClusterMixin( # noqa: D101
ABC,
Generic[Input],
sklearn.base.ClusterMixin, # type: ignore[misc]
):
def fit_predict( # noqa: D102
self,
X: Input,
y: object = None,
) -> NDArrayInt:
return super().fit_predict(X, y) # type: ignore[no-any-return]
class RegressorMixin( # noqa: D101
ABC,
Generic[Input, TargetPrediction],
sklearn.base.RegressorMixin, # type: ignore[misc]
):
def fit( # noqa: D102
self: SelfType,
X: Input,
y: TargetPrediction,
) -> SelfType:
return self
@abstractmethod
def predict( # noqa: D102
self: SelfType,
X: Input,
) -> TargetPrediction:
pass
def score( # noqa: D102
self,
X: Input,
y: TargetPrediction,
sample_weight: NDArrayFloat | None = None,
) -> float:
from ..misc.scoring import r2_score
y_pred = self.predict(X)
return r2_score(
y,
y_pred,
sample_weight=sample_weight,
) | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/_utils/_sklearn_adapter.py | 0.875814 | 0.271692 | _sklearn_adapter.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
import numpy as np
from scipy.interpolate import PchipInterpolator
from ..typing._base import DomainRangeLike
from ..typing._numpy import ArrayLike, NDArrayFloat
if TYPE_CHECKING:
from ..representation import FDataGrid
def invert_warping(
warping: FDataGrid,
*,
output_points: Optional[ArrayLike] = None,
) -> FDataGrid:
r"""
Compute the inverse of a diffeomorphism.
Let :math:`\gamma : [a,b] \rightarrow [a,b]` be a function strictly
increasing, calculates the corresponding inverse
:math:`\gamma^{-1} : [a,b] \rightarrow [a,b]` such that
:math:`\gamma^{-1} \circ \gamma = \gamma \circ \gamma^{-1} = \gamma_{id}`.
Uses a PCHIP interpolator to compute approximately the inverse.
Args:
warping: Functions to be inverted.
output_points: Set of points where the
functions are interpolated to obtain the inverse, by default uses
the sample points of the fdatagrid.
Returns:
Inverse of the original functions.
Raises:
ValueError: If the functions are not strictly increasing or are
multidimensional.
Examples:
>>> import numpy as np
>>> from skfda import FDataGrid
We will construct the warping :math:`\gamma : [0,1] \rightarrow [0,1]`
wich maps t to t^3.
>>> t = np.linspace(0, 1)
>>> gamma = FDataGrid(t**3, t)
>>> gamma
FDataGrid(...)
We will compute the inverse.
>>> inverse = invert_warping(gamma)
>>> inverse
FDataGrid(...)
The result of the composition should be approximately the identity
function .
>>> identity = gamma.compose(inverse)
>>> identity([0, 0.25, 0.5, 0.75, 1]).round(3)
array([[[ 0. ],
[ 0.25],
[ 0.5 ],
[ 0.75],
[ 1. ]]])
"""
from ..misc.validation import check_fdata_dimensions
check_fdata_dimensions(
warping,
dim_domain=1,
dim_codomain=1,
)
output_points = (
warping.grid_points[0]
if output_points is None
else np.asarray(output_points)
)
y = warping(output_points)[..., 0]
data_matrix = np.empty((warping.n_samples, len(output_points)))
for i in range(warping.n_samples):
data_matrix[i] = PchipInterpolator(y[i], output_points)(output_points)
return warping.copy(data_matrix=data_matrix, grid_points=output_points)
def normalize_scale(
t: NDArrayFloat,
a: float = 0,
b: float = 1,
) -> NDArrayFloat:
"""
Perfoms an afine translation to normalize an interval.
Args:
t: Array of dim 1 or 2 with at least 2 values.
a: Starting point of the new interval. Defaults 0.
b: Stopping point of the new interval. Defaults 1.
Returns:
Array with the transformed interval.
"""
t = t.T # Broadcast to normalize multiple arrays
t1 = np.array(t, copy=True)
t1 -= t[0] # Translation to [0, t[-1] - t[0]]
t1 *= (b - a) / (t[-1] - t[0]) # Scale to [0, b-a]
t1 += a # Translation to [a, b]
t1[0] = a # Fix possible round errors
t1[-1] = b
return t1.T
def normalize_warping(
warping: FDataGrid,
domain_range: Optional[DomainRangeLike] = None,
) -> FDataGrid:
r"""
Rescale a warping to normalize their :term:`domain`.
Given a set of warpings :math:`\gamma_i:[a,b]\rightarrow [a,b]` it is
used an affine traslation to change the domain of the transformation to
other domain, :math:`\tilde \gamma_i:[\tilde a,\tilde b] \rightarrow
[\tilde a, \tilde b]`.
Args:
warping: Set of warpings to rescale.
domain_range: New domain range of the warping. By
default it is used the same domain range.
Returns:
Normalized warpings.
"""
from ..misc.validation import validate_domain_range
domain_range_tuple = (
warping.domain_range[0]
if domain_range is None
else validate_domain_range(domain_range)[0]
)
data_matrix = normalize_scale(
warping.data_matrix[..., 0],
*domain_range_tuple,
)
grid_points = normalize_scale(warping.grid_points[0], *domain_range_tuple)
return warping.copy(
data_matrix=data_matrix,
grid_points=grid_points,
domain_range=domain_range,
) | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/_utils/_warping.py | 0.970099 | 0.665635 | _warping.py | pypi |
from __future__ import annotations
import functools
import numbers
from functools import singledispatch
from typing import (
TYPE_CHECKING,
Any,
Callable,
Iterable,
List,
Optional,
Sequence,
Sized,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
import numpy as np
import scipy.integrate
from pandas.api.indexers import check_array_indexer
from sklearn.preprocessing import LabelEncoder
from sklearn.utils.multiclass import check_classification_targets
from typing_extensions import Literal, ParamSpec, Protocol
from ..typing._base import GridPoints, GridPointsLike
from ..typing._numpy import NDArrayAny, NDArrayFloat, NDArrayInt, NDArrayStr
from ._sklearn_adapter import BaseEstimator
ArrayDTypeT = TypeVar("ArrayDTypeT", bound="np.generic")
if TYPE_CHECKING:
from ..representation import FData, FDataGrid
from ..representation.basis import Basis
from ..representation.extrapolation import ExtrapolationLike
T = TypeVar("T", bound=FData)
Input = TypeVar("Input", bound=Union[FData, NDArrayFloat])
Output = TypeVar("Output", bound=Union[FData, NDArrayFloat])
Target = TypeVar("Target", bound=NDArrayInt)
_MapAcceptableSelf = TypeVar(
"_MapAcceptableSelf",
bound="_MapAcceptable",
)
class _MapAcceptable(Protocol, Sized):
def __getitem__(
self: _MapAcceptableSelf,
__key: Union[slice, NDArrayInt], # noqa: WPS112
) -> _MapAcceptableSelf:
pass
@property
def nbytes(self) -> int:
pass
_MapAcceptableT = TypeVar(
"_MapAcceptableT",
bound=_MapAcceptable,
contravariant=True,
)
MapFunctionT = TypeVar("MapFunctionT", covariant=True)
P = ParamSpec("P")
class _MapFunction(Protocol[_MapAcceptableT, P, MapFunctionT]):
"""Protocol for functions that can be mapped over several arrays."""
def __call__(
self,
*args: _MapAcceptableT,
**kwargs: P.kwargs,
) -> MapFunctionT:
pass
class _PairwiseFunction(Protocol[_MapAcceptableT, P, MapFunctionT]):
"""Protocol for pairwise array functions."""
def __call__(
self,
__arg1: _MapAcceptableT, # noqa: WPS112
__arg2: _MapAcceptableT, # noqa: WPS112
**kwargs: P.kwargs, # type: ignore[name-defined]
) -> MapFunctionT:
pass
def _to_grid(
X: FData,
y: FData,
eval_points: Optional[NDArrayFloat] = None,
) -> Tuple[FDataGrid, FDataGrid]:
"""Transform a pair of FDatas in grids to perform calculations."""
from .. import FDataGrid
x_is_grid = isinstance(X, FDataGrid)
y_is_grid = isinstance(y, FDataGrid)
if eval_points is not None:
X = X.to_grid(eval_points)
y = y.to_grid(eval_points)
elif x_is_grid and not y_is_grid:
y = y.to_grid(X.grid_points[0])
elif not x_is_grid and y_is_grid:
X = X.to_grid(y.grid_points[0])
elif not x_is_grid and not y_is_grid:
X = X.to_grid()
y = y.to_grid()
return X, y
def _to_grid_points(grid_points_like: GridPointsLike) -> GridPoints:
"""Convert to grid points.
If the original list is one-dimensional (e.g. [1, 2, 3]), return list to
array (in this case [array([1, 2, 3])]).
If the original list is two-dimensional (e.g. [[1, 2, 3], [4, 5]]), return
a list containing other one-dimensional arrays (in this case
[array([1, 2, 3]), array([4, 5])]).
In any other case the behaviour is unespecified.
"""
unidimensional = False
if not isinstance(grid_points_like, Iterable):
grid_points_like = [grid_points_like]
if not isinstance(grid_points_like[0], Iterable):
unidimensional = True
if unidimensional:
return (_int_to_real(np.asarray(grid_points_like)),)
return tuple(_int_to_real(np.asarray(i)) for i in grid_points_like)
@overload
def _cartesian_product(
axes: Sequence[np.typing.NDArray[ArrayDTypeT]],
*,
flatten: bool = True,
return_shape: Literal[False] = False,
) -> np.typing.NDArray[ArrayDTypeT]:
pass
@overload
def _cartesian_product(
axes: Sequence[np.typing.NDArray[ArrayDTypeT]],
*,
flatten: bool = True,
return_shape: Literal[True],
) -> Tuple[np.typing.NDArray[ArrayDTypeT], Tuple[int, ...]]:
pass
def _cartesian_product( # noqa: WPS234
axes: Sequence[np.typing.NDArray[ArrayDTypeT]],
*,
flatten: bool = True,
return_shape: bool = False,
) -> (
np.typing.NDArray[ArrayDTypeT]
| Tuple[np.typing.NDArray[ArrayDTypeT], Tuple[int, ...]]
):
"""
Compute the cartesian product of the axes.
Computes the cartesian product of the axes and returns a numpy array of
1 dimension with all the possible combinations, for an arbitrary number of
dimensions.
Args:
axes: List with axes.
flatten: Whether to return the flatten array or keep one dimension per
axis.
return_shape: If ``True`` return the shape of the array before
flattening.
Returns:
Numpy 2-D array with all the possible combinations.
The entry (i,j) represent the j-th coordinate of the i-th point.
If ``return_shape`` is ``True`` returns also the shape of the array
before flattening.
Examples:
>>> from skfda._utils import _cartesian_product
>>> axes = [[0,1],[2,3]]
>>> _cartesian_product(axes)
array([[0, 2],
[0, 3],
[1, 2],
[1, 3]])
>>> axes = [[0,1],[2,3],[4]]
>>> _cartesian_product(axes)
array([[0, 2, 4],
[0, 3, 4],
[1, 2, 4],
[1, 3, 4]])
>>> axes = [[0,1]]
>>> _cartesian_product(axes)
array([[0],
[1]])
"""
cartesian = np.stack(np.meshgrid(*axes, indexing='ij'), -1)
shape = cartesian.shape
if flatten:
cartesian = cartesian.reshape(-1, len(axes))
if return_shape:
return cartesian, shape
return cartesian # type: ignore[no-any-return]
def _same_domain(fd: Union[Basis, FData], fd2: Union[Basis, FData]) -> bool:
"""Check if the domain range of two objects is the same."""
return np.array_equal(fd.domain_range, fd2.domain_range)
def _one_grid_to_points(
axes: GridPointsLike,
*,
dim_domain: int,
) -> Tuple[NDArrayFloat, Tuple[int, ...]]:
"""
Convert a list of ndarrays, one per domain dimension, in the points.
Returns also the shape containing the information of how each point
is formed.
"""
axes = _to_grid_points(axes)
if len(axes) != dim_domain:
raise ValueError(
f"Length of axes should be {dim_domain}",
)
cartesian, shape = _cartesian_product(axes, return_shape=True)
# Drop domain size dimension, as it is not needed to reshape the output
shape = shape[:-1]
return cartesian, shape
class EvaluateMethod(Protocol):
"""Evaluation method."""
def __call__(
self,
__eval_points: NDArrayFloat, # noqa: WPS112
extrapolation: Optional[ExtrapolationLike],
aligned: bool,
) -> NDArrayFloat:
"""Evaluate a function."""
pass
@overload
def _evaluate_grid(
axes: GridPointsLike,
*,
evaluate_method: EvaluateMethod,
n_samples: int,
dim_domain: int,
dim_codomain: int,
extrapolation: Optional[ExtrapolationLike] = None,
aligned: Literal[True] = True,
) -> NDArrayFloat:
pass
@overload
def _evaluate_grid(
axes: Iterable[GridPointsLike],
*,
evaluate_method: EvaluateMethod,
n_samples: int,
dim_domain: int,
dim_codomain: int,
extrapolation: Optional[ExtrapolationLike] = None,
aligned: Literal[False],
) -> NDArrayFloat:
pass
@overload
def _evaluate_grid(
axes: Union[GridPointsLike, Iterable[GridPointsLike]],
*,
evaluate_method: EvaluateMethod,
n_samples: int,
dim_domain: int,
dim_codomain: int,
extrapolation: Optional[ExtrapolationLike] = None,
aligned: bool,
) -> NDArrayFloat:
pass
def _evaluate_grid( # noqa: WPS234
axes: Union[GridPointsLike, Iterable[GridPointsLike]],
*,
evaluate_method: EvaluateMethod,
n_samples: int,
dim_domain: int,
dim_codomain: int,
extrapolation: Optional[ExtrapolationLike] = None,
aligned: bool = True,
) -> NDArrayFloat:
"""
Evaluate the functional object in the cartesian grid.
This method is called internally by :meth:`evaluate` when the argument
`grid` is True.
Evaluates the functional object in the grid generated by the cartesian
product of the axes. The length of the list of axes should be equal
than the domain dimension of the object.
If the list of axes has lengths :math:`n_1, n_2, ..., n_m`, where
:math:`m` is equal than the dimension of the domain, the result of the
evaluation in the grid will be a matrix with :math:`m+1` dimensions and
shape :math:`n_{samples} x n_1 x n_2 x ... x n_m`.
If `aligned` is false each sample is evaluated in a
different grid, and the list of axes should contain a list of axes for
each sample.
If the domain dimension is 1, the result of the behaviour of the
evaluation will be the same than :meth:`evaluate` without the grid
option, but with worst performance.
Args:
axes: List of axes to generated the grid where the
object will be evaluated.
evaluate_method: Function used to evaluate the functional object.
n_samples: Number of samples.
dim_domain: Domain dimension.
dim_codomain: Codomain dimension.
extrapolation: Controls the
extrapolation mode for elements outside the domain range. By
default it is used the mode defined during the instance of the
object.
aligned: If False evaluates each sample
in a different grid.
evaluate_method: method to use to evaluate the points
n_samples: number of samples
dim_domain: dimension of the domain
dim_codomain: dimensions of the codomain
Returns:
Numpy array with dim_domain + 1 dimensions with
the result of the evaluation.
Raises:
ValueError: If there are a different number of axes than the domain
dimension.
"""
# Compute intersection points and resulting shapes
if aligned:
axes = cast(GridPointsLike, axes)
eval_points, shape = _one_grid_to_points(axes, dim_domain=dim_domain)
else:
axes_per_sample = cast(Iterable[GridPointsLike], axes)
axes_per_sample = list(axes_per_sample)
eval_points_tuple, shape_tuple = zip(
*[
_one_grid_to_points(a, dim_domain=dim_domain)
for a in axes_per_sample
],
)
if len(eval_points_tuple) != n_samples:
raise ValueError(
"Should be provided a list of axis per sample",
)
eval_points = np.asarray(eval_points_tuple)
# Evaluate the points
evaluated = evaluate_method(
eval_points,
extrapolation=extrapolation,
aligned=aligned,
)
# Reshape the result
if aligned:
res = evaluated.reshape(
[n_samples] + list(shape) + [dim_codomain],
)
else:
res = np.asarray([
r.reshape(list(s) + [dim_codomain])
for r, s in zip(evaluated, shape_tuple)
])
return res
def nquad_vec(
func: Callable[[NDArrayFloat], NDArrayFloat],
ranges: Sequence[Tuple[float, float]],
) -> NDArrayFloat:
"""Perform multiple integration of vector valued functions."""
initial_depth = len(ranges) - 1
def integrate(*args: Any, depth: int) -> NDArrayFloat: # noqa: WPS430
if depth == 0:
f = functools.partial(func, *args)
else:
f = functools.partial(integrate, *args, depth=depth - 1)
return scipy.integrate.quad_vec( # type: ignore[no-any-return]
f,
*ranges[initial_depth - depth],
)[0]
return integrate(depth=initial_depth)
def _map_in_batches(
function: _MapFunction[_MapAcceptableT, P, np.typing.NDArray[ArrayDTypeT]],
arguments: Tuple[_MapAcceptableT, ...],
indexes: Tuple[NDArrayInt, ...],
memory_per_batch: Optional[int] = None,
*args: P.args, # Should be empty
**kwargs: P.kwargs,
) -> np.typing.NDArray[ArrayDTypeT]:
"""
Map a function over samples of FData or ndarray tuples efficiently.
This function prevents a large set of indexes to use all available
memory and hang the PC.
"""
if memory_per_batch is None:
# 256MB is not too big
memory_per_batch = 256 * 1024 * 1024 # noqa: WPS432
memory_per_element = sum(a.nbytes // len(a) for a in arguments)
n_elements_per_batch_allowed = memory_per_batch // memory_per_element
if n_elements_per_batch_allowed < 1:
raise ValueError("Too few memory allowed for the operation")
n_indexes = len(indexes[0])
assert all(n_indexes == len(i) for i in indexes)
batches: List[np.typing.NDArray[ArrayDTypeT]] = []
for pos in range(0, n_indexes, n_elements_per_batch_allowed):
batch_args = tuple(
a[i[pos:pos + n_elements_per_batch_allowed]]
for a, i in zip(arguments, indexes)
)
batches.append(function(*batch_args, **kwargs))
return np.concatenate(batches, axis=0)
def _pairwise_symmetric(
function: _PairwiseFunction[
_MapAcceptableT,
P,
np.typing.NDArray[ArrayDTypeT],
],
arg1: _MapAcceptableT,
arg2: Optional[_MapAcceptableT] = None,
memory_per_batch: Optional[int] = None,
*args: P.args, # Should be empty
**kwargs: P.kwargs,
) -> np.typing.NDArray[ArrayDTypeT]:
"""Compute pairwise a commutative function."""
def map_function(
*args: _MapAcceptableT,
**kwargs: P.kwargs,
) -> np.typing.NDArray[ArrayDTypeT]:
"""Just to keep Mypy happy."""
return function(args[0], args[1], **kwargs)
dim1 = len(arg1)
if arg2 is None or arg2 is arg1:
triu_indices = np.triu_indices(dim1)
triang_vec = _map_in_batches(
map_function,
(arg1, arg1),
triu_indices,
memory_per_batch,
**kwargs, # type: ignore[arg-type]
)
matrix = np.empty((dim1, dim1), dtype=triang_vec.dtype)
# Set upper matrix
matrix[triu_indices] = triang_vec
# Set lower matrix
matrix[(triu_indices[1], triu_indices[0])] = triang_vec
return matrix
dim2 = len(arg2)
indices = np.indices((dim1, dim2))
vec = _map_in_batches(
map_function,
(arg1, arg2),
(indices[0].ravel(), indices[1].ravel()),
memory_per_batch=memory_per_batch,
**kwargs, # type: ignore[arg-type]
)
return np.reshape(vec, (dim1, dim2))
def _int_to_real(array: Union[NDArrayInt, NDArrayFloat]) -> NDArrayFloat:
"""Convert integer arrays to floating point."""
return array + 0.0
def _check_array_key(array: NDArrayAny, key: Any) -> Any:
"""Check a getitem key."""
key = check_array_indexer(array, key)
if isinstance(key, tuple):
non_ellipsis = [i for i in key if i is not Ellipsis]
if len(non_ellipsis) > 1:
raise KeyError(key)
key = non_ellipsis[0]
if isinstance(key, numbers.Integral): # To accept also numpy ints
key = int(key)
if key < 0:
key = len(array) + key
if not 0 <= key < len(array):
raise IndexError("index out of bounds")
return slice(key, key + 1)
return key
def _check_estimator(estimator: Type[BaseEstimator]) -> None:
from sklearn.utils.estimator_checks import (
check_get_params_invariance,
check_set_params,
)
name = estimator.__name__
instance = estimator()
check_get_params_invariance(name, instance)
check_set_params(name, instance)
def _classifier_get_classes(
y: NDArrayStr | NDArrayInt,
) -> Tuple[NDArrayStr | NDArrayInt, NDArrayInt]:
check_classification_targets(y)
le = LabelEncoder()
y_ind = le.fit_transform(y)
classes = le.classes_
if classes.size < 2:
raise ValueError(
f'The number of classes has to be greater than'
f'one; got {classes.size} class',
)
return classes, y_ind | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/_utils/_utils.py | 0.922067 | 0.426919 | _utils.py | pypi |
from __future__ import annotations
import abc
import math
from typing import TypeVar
import numpy as np
import scipy.stats
import sklearn
from scipy.special import comb
from typing_extensions import Literal
from ..._utils._sklearn_adapter import BaseEstimator, InductiveTransformerMixin
from ...typing._numpy import NDArrayFloat, NDArrayInt
T = TypeVar("T", contravariant=True)
SelfType = TypeVar("SelfType")
_Side = Literal["left", "right"]
Input = TypeVar("Input", contravariant=True)
class _DepthOrOutlyingness(
BaseEstimator,
InductiveTransformerMixin[Input, NDArrayFloat, object],
):
"""Abstract class representing a depth or outlyingness function."""
def fit(self: SelfType, X: Input, y: object = None) -> SelfType:
"""
Learn the distribution from the observations.
Args:
X: Functional dataset from which the distribution of the data is
inferred.
y: Unused. Kept only for convention.
Returns:
Fitted estimator.
"""
return self
@abc.abstractmethod
def transform(self, X: Input) -> NDArrayFloat:
"""
Compute the depth or outlyingness inside the learned distribution.
Args:
X: Points whose depth is going to be evaluated.
Returns:
Depth of each observation.
"""
pass
def fit_transform(self, X: Input, y: object = None) -> NDArrayFloat:
"""
Compute the depth or outlyingness of each observation.
This computation is done with respect to the whole dataset.
Args:
X: Dataset.
y: Unused. Kept only for convention.
Returns:
Depth of each observation.
"""
return self.fit(X).transform(X)
def __call__(
self,
X: Input,
*,
distribution: Input | None = None,
) -> NDArrayFloat:
"""
Allow the depth or outlyingness to be used as a function.
Args:
X: Points whose depth is going to be evaluated.
distribution: Functional dataset from which the distribution of
the data is inferred. If ``None`` it is the same as ``X``.
Returns:
Depth of each observation.
"""
copy: _DepthOrOutlyingness[Input] = sklearn.base.clone(self)
if distribution is None:
return copy.fit_transform(X)
return copy.fit(distribution).transform(X)
@property
def max(self) -> float:
"""
Maximum (or supremum if there is no maximum) of the possibly predicted
values.
"""
return 1
@property
def min(self) -> float:
"""
Minimum (or infimum if there is no maximum) of the possibly predicted
values.
"""
return 0
class Depth(_DepthOrOutlyingness[T]):
"""Abstract class representing a depth function."""
class Outlyingness(_DepthOrOutlyingness[T]):
"""Abstract class representing an outlyingness function."""
def _searchsorted_one_dim(
array: NDArrayFloat,
values: NDArrayFloat,
*,
side: _Side = 'left',
) -> NDArrayInt:
return np.searchsorted(array, values, side=side)
_searchsorted_vectorized = np.vectorize(
_searchsorted_one_dim,
signature='(n),(m),()->(m)',
excluded='side',
)
def _searchsorted_ordered(
array: NDArrayFloat,
values: NDArrayFloat,
*,
side: _Side = 'left',
) -> NDArrayInt:
return _searchsorted_vectorized( # type: ignore[no-any-return]
array,
values,
side=side,
)
def _cumulative_distribution(column: NDArrayFloat) -> NDArrayFloat:
"""
Calculate the cumulative distribution function at each point.
Args:
column: Array containing the values over which the
distribution function is calculated.
Returns:
Array containing the evaluation at each point of the
distribution function.
Examples:
>>> _cumulative_distribution(np.array([1, 4, 5, 1, 2, 2, 4, 1, 1, 3]))
array([ 0.4, 0.9, 1. , 0.4, 0.6, 0.6, 0.9, 0.4, 0.4, 0.7])
"""
return _searchsorted_ordered(
np.sort(column),
column,
side='right',
) / len(column)
class _UnivariateFraimanMuniz(Depth[NDArrayFloat]):
r"""
Univariate depth used to compute the Fraiman an Muniz depth.
Each column is considered as the samples of an aleatory variable.
The univariate depth of each of the samples of each column is calculated
as follows:
.. math::
D(x) = 1 - \left\lvert \frac{1}{2}- F(x)\right\rvert
Where :math:`F` stands for the marginal univariate distribution function of
each column.
"""
def fit(self: SelfType, X: NDArrayFloat, y: object = None) -> SelfType:
self._sorted_values = np.sort(X, axis=0)
return self
def transform(self, X: NDArrayFloat) -> NDArrayFloat:
cum_dist = _searchsorted_ordered(
np.moveaxis(self._sorted_values, 0, -1),
np.moveaxis(X, 0, -1),
side='right',
).astype(X.dtype) / len(self._sorted_values)
assert cum_dist.shape[-2] == 1
ret = 0.5 - np.moveaxis(cum_dist, -1, 0)[..., 0]
ret = - np.abs(ret)
ret += 1
return ret
@property
def min(self) -> float:
return 1 / 2
class SimplicialDepth(Depth[NDArrayFloat]):
r"""
Simplicial depth.
The simplicial depth of a point :math:`x` in :math:`\mathbb{R}^p` given a
distribution :math:`F` is the probability that a random simplex with its
:math:`p + 1` points sampled from :math:`F` contains :math:`x`.
References:
Liu, R. Y. (1990). On a Notion of Data Depth Based on Random
Simplices. The Annals of Statistics, 18(1), 405–414.
"""
def fit( # noqa: D102
self,
X: NDArrayFloat,
y: object = None,
) -> SimplicialDepth:
self._dim = X.shape[-1]
if self._dim == 1:
self.sorted_values = np.sort(X, axis=0)
else:
raise NotImplementedError(
"SimplicialDepth is currently only "
"implemented for one-dimensional data.",
)
return self
def transform(self, X: NDArrayFloat) -> NDArrayFloat: # noqa: D102
assert self._dim == X.shape[-1]
if self._dim == 1:
positions_left = _searchsorted_ordered(
np.moveaxis(self.sorted_values, 0, -1),
np.moveaxis(X, 0, -1),
)
positions_left = np.moveaxis(positions_left, -1, 0)[..., 0]
positions_right = _searchsorted_ordered(
np.moveaxis(self.sorted_values, 0, -1),
np.moveaxis(X, 0, -1),
side='right',
)
positions_right = np.moveaxis(positions_right, -1, 0)[..., 0]
num_strictly_below = positions_left
num_strictly_above = len(self.sorted_values) - positions_right
total_pairs = comb(len(self.sorted_values), 2)
return ( # type: ignore[no-any-return]
total_pairs - comb(num_strictly_below, 2)
- comb(num_strictly_above, 2)
) / total_pairs
class OutlyingnessBasedDepth(Depth[T]):
r"""
Computes depth based on an outlyingness measure.
An outlyingness function :math:`O(x)` can be converted to a depth
function as
.. math::
D(x) = \frac{1}{1 + O(x)}
if :math:`O(x)` is unbounded or as
.. math::
D(x) = 1 - \frac{O(x)}{\sup O(x)}
if :math:`O(x)` is bounded. If the infimum value of the
outlyiness function is not zero, it is subtracted beforehand.
Args:
outlyingness (Outlyingness): Outlyingness object.
References:
Serfling, R. (2006). Depth functions in nonparametric
multivariate inference. DIMACS Series in Discrete Mathematics and
Theoretical Computer Science, 72, 1.
"""
def __init__(self, outlyingness: Outlyingness[T]):
self.outlyingness = outlyingness
def fit( # noqa: D102
self,
X: T,
y: object = None,
) -> OutlyingnessBasedDepth[T]:
self.outlyingness.fit(X)
return self
def transform(self, X: T) -> NDArrayFloat: # noqa: D102
outlyingness_values = self.outlyingness.transform(X)
min_val = self.outlyingness.min
max_val = self.outlyingness.max
if math.isinf(max_val):
return 1 / (1 + outlyingness_values - min_val)
return 1 - (outlyingness_values - min_val) / (max_val - min_val)
class StahelDonohoOutlyingness(Outlyingness[NDArrayFloat]):
r"""
Computes Stahel-Donoho outlyingness.
Stahel-Donoho outlyingness is defined as
.. math::
\sup_{\|u\|=1} \frac{|u^T x - \text{Med}(u^T X))|}{\text{MAD}(u^TX)}
where :math:`\text{X}` is a sample with distribution :math:`F`,
:math:`\text{Med}` is the median and :math:`\text{MAD}` is the
median absolute deviation.
References:
Zuo, Y., Cui, H., & He, X. (2004). On the Stahel-Donoho
estimator and depth-weighted means of multivariate data. Annals of
Statistics, 32(1), 167–188. https://doi.org/10.1214/aos/1079120132
"""
def fit( # noqa: D102
self,
X: NDArrayFloat,
y: object = None,
) -> StahelDonohoOutlyingness:
dim = X.shape[-1]
if dim == 1:
self._location = np.median(X, axis=0)
self._scale = scipy.stats.median_abs_deviation(X, axis=0)
else:
raise NotImplementedError("Only implemented for one dimension")
return self
def transform(self, X: NDArrayFloat) -> NDArrayFloat: # noqa: D102
dim = X.shape[-1]
if dim == 1:
# Special case, can be computed exactly
diff: NDArrayFloat = np.abs(X - self._location) / self._scale
return diff[..., 0]
raise NotImplementedError("Only implemented for one dimension")
@property
def max(self) -> float:
return math.inf
class ProjectionDepth(OutlyingnessBasedDepth[NDArrayFloat]):
"""
Computes Projection depth.
It is defined as the depth induced by the
:class:`Stahel-Donoho outlyingness <StahelDonohoOutlyingness>`.
See also:
:class:`StahelDonohoOutlyingness`: Stahel-Donoho outlyingness.
References:
Zuo, Y., Cui, H., & He, X. (2004). On the Stahel-Donoho
estimator and depth-weighted means of multivariate data. Annals of
Statistics, 32(1), 167–188. https://doi.org/10.1214/aos/1079120132
"""
def __init__(self) -> None:
super().__init__(outlyingness=StahelDonohoOutlyingness()) | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/depth/multivariate.py | 0.946312 | 0.6372 | multivariate.py | pypi |
from __future__ import annotations
import itertools
from typing import TypeVar
import numpy as np
import scipy.integrate
from ..._utils._sklearn_adapter import BaseEstimator
from ...misc.metrics import l2_distance
from ...misc.metrics._utils import _fit_metric
from ...representation import FData, FDataGrid
from ...typing._metric import Metric
from ...typing._numpy import NDArrayFloat
from .multivariate import Depth, SimplicialDepth, _UnivariateFraimanMuniz
T = TypeVar("T", bound=FData)
class IntegratedDepth(Depth[FDataGrid]):
r"""
Functional depth as the integral of a multivariate depth.
Args:
multivariate_depth (Depth): Multivariate depth to integrate.
By default it is the one used by Fraiman and Muniz, that is,
.. math::
D(x) = 1 - \left\lvert \frac{1}{2}- F(x)\right\rvert
Examples:
>>> import skfda
>>>
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> depth = skfda.exploratory.depth.IntegratedDepth()
>>> depth(fd)
array([ 0.5 , 0.75 , 0.925, 0.875])
References:
Fraiman, R., & Muniz, G. (2001). Trimmed means for functional
data. Test, 10(2), 419–440. https://doi.org/10.1007/BF02595706
"""
def __init__(
self,
*,
multivariate_depth: Depth[NDArrayFloat] | None = None,
) -> None:
self.multivariate_depth = multivariate_depth
def fit( # noqa: D102
self,
X: FDataGrid,
y: object = None,
) -> IntegratedDepth:
self.multivariate_depth_: Depth[NDArrayFloat]
if self.multivariate_depth is None:
self.multivariate_depth_ = _UnivariateFraimanMuniz()
else:
self.multivariate_depth_ = self.multivariate_depth
self._domain_range = X.domain_range
self._grid_points = X.grid_points
self.multivariate_depth_.fit(X.data_matrix)
return self
def transform(self, X: FDataGrid) -> NDArrayFloat: # noqa: D102
pointwise_depth = self.multivariate_depth_.transform(X.data_matrix)
interval_len = (
self._domain_range[0][1]
- self._domain_range[0][0]
)
integrand = pointwise_depth
for d, s in zip(X.domain_range, X.grid_points):
integrand = scipy.integrate.simps(
integrand,
x=s,
axis=1,
)
interval_len = d[1] - d[0]
integrand /= interval_len
return integrand
@property
def max(self) -> float:
if self.multivariate_depth is None:
return 1
return self.multivariate_depth.max
@property
def min(self) -> float:
if self.multivariate_depth is None:
return 1 / 2
return self.multivariate_depth.min
class ModifiedBandDepth(IntegratedDepth):
"""
Implementation of Modified Band Depth for functional data.
The band depth of each sample is obtained by computing the fraction of time
its graph is contained in the bands determined by two sample curves.
In the case the fdatagrid :term:`domain` dimension is 2, instead of curves,
surfaces determine the bands. In larger dimensions, the hyperplanes
determine the bands.
Examples:
>>> import skfda
>>>
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> depth = skfda.exploratory.depth.ModifiedBandDepth()
>>> values = depth(fd)
>>> values.round(2)
array([ 0.5 , 0.83, 0.73, 0.67])
References:
López-Pintado, S., & Romo, J. (2009). On the Concept of
Depth for Functional Data. Journal of the American Statistical
Association, 104(486), 718–734.
https://doi.org/10.1198/jasa.2009.0108
"""
def __init__(self) -> None:
super().__init__(multivariate_depth=SimplicialDepth())
class BandDepth(Depth[FDataGrid]):
"""
Implementation of Band Depth for functional data.
The band depth of each sample is obtained by computing the fraction of the
bands determined by two sample curves containing the whole graph of the
first one. In the case the fdatagrid :term:`domain` dimension is 2, instead
of curves, surfaces determine the bands. In larger dimensions, the
hyperplanes determine the bands.
Examples:
>>> import skfda
>>>
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> depth = skfda.exploratory.depth.BandDepth()
>>> depth(fd)
array([ 0.5 , 0.83333333, 0.5 , 0.5 ])
References:
López-Pintado, S., & Romo, J. (2009). On the Concept of
Depth for Functional Data. Journal of the American Statistical
Association, 104(486), 718–734.
https://doi.org/10.1198/jasa.2009.0108
"""
def fit(self, X: FDataGrid, y: object = None) -> BandDepth: # noqa: D102
if X.dim_codomain != 1:
raise NotImplementedError(
"Band depth not implemented for vector valued functions",
)
self._distribution = X
return self
def transform(self, X: FDataGrid) -> NDArrayFloat: # noqa: D102
num_in = np.zeros(shape=len(X), dtype=X.data_matrix.dtype)
n_total = 0
for f1, f2 in itertools.combinations(self._distribution, 2):
between_range_1 = (
(f1.data_matrix <= X.data_matrix)
& (X.data_matrix <= f2.data_matrix)
)
between_range_2 = (
(f2.data_matrix <= X.data_matrix)
& (X.data_matrix <= f1.data_matrix)
)
between_range = between_range_1 | between_range_2
num_in += np.all(
between_range,
axis=tuple(range(1, X.data_matrix.ndim)),
)
n_total += 1
return num_in / n_total
class DistanceBasedDepth(Depth[FDataGrid], BaseEstimator):
r"""
Functional depth based on a metric.
Parameters:
metric:
The metric to use as M in the following depth calculation
.. math::
D(x) = [1 + M(x, \mu)]^{-1}.
as explained in :footcite:`serfling+zuo_2000_depth_function`.
Examples:
>>> import skfda
>>> from skfda.exploratory.depth import DistanceBasedDepth
>>> from skfda.misc.metrics import MahalanobisDistance
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> depth = DistanceBasedDepth(MahalanobisDistance(2))
>>> depth(fd)
array([ 0.41897777, 0.8058132 , 0.31097392, 0.31723619])
References:
.. footbibliography::
"""
def __init__(
self,
metric: Metric[T] = l2_distance,
) -> None:
self.metric = metric
def fit( # noqa: D102
self,
X: T,
y: object = None,
) -> DistanceBasedDepth:
"""Fit the model using X as training data.
Args:
X: FDataGrid with the training data or array matrix with shape
(n_samples, n_samples) if metric='precomputed'.
y: Ignored.
Returns:
self
"""
_fit_metric(self.metric, X)
self.mean_ = X.mean()
return self
def transform(self, X: T) -> NDArrayFloat: # noqa: D102
"""Compute the depth of given observations.
Args:
X: FDataGrid with the observations to use in the calculation.
Returns:
Array with the depths.
"""
return 1 / (1 + self.metric(X, self.mean_)) | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/depth/_depth.py | 0.933081 | 0.494629 | _depth.py | pypi |
from __future__ import annotations
from builtins import isinstance
from typing import TypeVar, Union
import numpy as np
from scipy import integrate
from scipy.stats import rankdata
from ...misc.metrics._lp_distances import l2_distance
from ...representation import FData, FDataGrid
from ...typing._metric import Metric
from ...typing._numpy import NDArrayFloat
from ..depth import Depth, ModifiedBandDepth
F = TypeVar('F', bound=FData)
T = TypeVar('T', bound=Union[NDArrayFloat, FData])
def mean(
X: F,
weights: NDArrayFloat | None = None,
) -> F:
"""
Compute the mean of all the samples in a FData object.
Args:
X: Object containing all the samples whose mean is wanted.
weights: Sample weight. By default, uniform weight are used.
Returns:
Mean of all the samples in the original object, as a
:term:`functional data object` with just one sample.
"""
if weights is None:
return X.mean()
weight = (1 / np.sum(weights)) * weights
return (X * weight).sum()
def var(X: FData) -> FDataGrid:
"""
Compute the variance of a set of samples in a FData object.
Args:
X: Object containing all the set of samples whose variance is desired.
Returns:
Variance of all the samples in the original object, as a
:term:`functional data object` with just one sample.
"""
return X.var() # type: ignore[no-any-return]
def gmean(X: FDataGrid) -> FDataGrid:
"""
Compute the geometric mean of all the samples in a FDataGrid object.
Args:
X: Object containing all the samples whose geometric mean is wanted.
Returns:
Geometric mean of all the samples in the original object, as a
:term:`functional data object` with just one sample.
"""
return X.gmean()
def cov(X: FData) -> FDataGrid:
"""
Compute the covariance.
Calculates the covariance matrix representing the covariance of the
functional samples at the observation points.
Args:
X: Object containing different samples of a functional variable.
Returns:
Covariance of all the samples in the original object, as a
:term:`functional data object` with just one sample.
"""
return X.cov() # type: ignore[no-any-return]
def modified_epigraph_index(X: FDataGrid) -> NDArrayFloat:
"""
Calculate the Modified Epigraph Index of a FDataGrid.
The MEI represents the mean time a curve stays below other curve.
In this case we will calculate the MEI for each curve in relation
with all the other curves of our dataset.
"""
interval_len = (
X.domain_range[0][1]
- X.domain_range[0][0]
)
# Array containing at each point the number of curves
# are above it.
num_functions_above: NDArrayFloat = rankdata(
-X.data_matrix,
method='max',
axis=0,
) - 1
integrand = num_functions_above
for d, s in zip(X.domain_range, X.grid_points):
integrand = integrate.simps(
integrand,
x=s,
axis=1,
)
interval_len = d[1] - d[0]
integrand /= interval_len
integrand /= X.n_samples
return integrand.flatten()
def depth_based_median(
X: T,
depth_method: Depth[T] | None = None,
) -> T:
"""
Compute the median based on a depth measure.
The depth based median is the deepest curve given a certain
depth measure.
Args:
X: Object containing different samples of a
functional variable.
depth_method: Depth method used to order the data. Defaults to
:func:`modified band
depth <skfda.exploratory.depth.ModifiedBandDepth>`.
Returns:
Object containing the computed depth_based median.
See also:
:func:`geometric_median`
"""
depth_method_used: Depth[T]
if depth_method is None:
assert isinstance(X, FDataGrid)
depth_method_used = ModifiedBandDepth()
else:
depth_method_used = depth_method
depth = depth_method_used(X)
indices_descending_depth = (-depth).argsort(axis=0)
# The median is the deepest curve
return X[indices_descending_depth[0]]
def _weighted_average(X: T, weights: NDArrayFloat) -> T:
if isinstance(X, FData):
return (X * weights).sum()
return (X.T * weights).T.sum(axis=0) # type: ignore[no-any-return]
def geometric_median(
X: T,
*,
tol: float = 1.e-8,
metric: Metric[T] = l2_distance,
) -> T:
r"""
Compute the geometric median.
The sample geometric median is the point that minimizes the :math:`L_1`
norm of the vector of distances to all observations:
.. math::
\underset{y \in L(\mathcal{T})}{\arg \min}
\sum_{i=1}^N \left \| x_i-y \right \|
The geometric median in the functional case is also described in
:footcite:`gervini_2008_estimation`.
Instead of the proposed algorithm, however, the current implementation
uses the corrected Weiszfeld algorithm to compute the median.
Args:
X: Object containing different samples of a
functional variable.
tol: tolerance used to check convergence.
metric: metric used to compute the vector of distances. By
default is the :math:`L_2` distance.
Returns:
Object containing the computed geometric median.
Example:
>>> from skfda import FDataGrid
>>> data_matrix = [[0.5, 1, 2, .5], [1.5, 1, 4, .5]]
>>> X = FDataGrid(data_matrix)
>>> median = geometric_median(X)
>>> median.data_matrix[0, ..., 0]
array([ 1. , 1. , 3. , 0.5])
See also:
:func:`depth_based_median`
References:
.. footbibliography::
"""
weights = np.full(len(X), 1 / len(X))
median = _weighted_average(X, weights)
distances = metric(X, median)
while True:
zero_distances = (distances == 0)
n_zeros = np.sum(zero_distances)
weights_new = (
(1 / distances) / np.sum(1 / distances) if n_zeros == 0
else (1 / n_zeros) * zero_distances
)
median_new = _weighted_average(X, weights_new)
if l2_distance(median_new, median) < tol:
return median_new
distances = metric(X, median_new)
weights, median = (weights_new, median_new)
def trim_mean(
X: F,
proportiontocut: float,
*,
depth_method: Depth[F] | None = None,
) -> FDataGrid:
"""Compute the trimmed means based on a depth measure.
The trimmed means consists in computing the mean function without a
percentage of least deep curves. That is, we first remove the least deep
curves and then we compute the mean as usual.
Note that in scipy the leftmost and rightmost proportiontocut data are
removed. In this case, as we order the data by the depth, we only remove
those that have the least depth values.
Args:
X: Object containing different samples of a
functional variable.
proportiontocut: Indicates the percentage of functions to
remove. It is not easy to determine as it varies from dataset to
dataset.
depth_method: Method used to order the data. Defaults to
:func:`modified band depth
<skfda.exploratory.depth.ModifiedBandDepth>`.
Returns:
Object containing the computed trimmed mean.
"""
if depth_method is None:
depth_method = ModifiedBandDepth()
n_samples_to_keep = (len(X) - int(len(X) * proportiontocut))
# compute the depth of each curve and store the indexes in descending order
depth = depth_method(X)
indices_descending_depth = (-depth).argsort(axis=0)
trimmed_curves = X[indices_descending_depth[:n_samples_to_keep]]
return trimmed_curves.mean() | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/stats/_stats.py | 0.980186 | 0.679205 | _stats.py | pypi |
from __future__ import annotations
import copy
import itertools
from functools import partial
from typing import Generator, List, Sequence, Tuple, Type, cast
import numpy as np
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.backend_bases import Event
from matplotlib.figure import Figure
from matplotlib.widgets import Slider, Widget
from ._baseplot import BasePlot
from ._utils import _get_axes_shape, _get_figure_and_axes, _set_figure_layout
def _set_val_noevents(widget: Widget, val: float) -> None:
e = widget.eventson
widget.eventson = False
widget.set_val(val)
widget.eventson = e
class MultipleDisplay:
"""
MultipleDisplay class used to combine and interact with plots.
This module is used to combine different BasePlot objects that
represent the same curves or surfaces, and represent them
together in the same figure. Besides this, it includes
the functionality necessary to interact with the graphics
by clicking the points, hovering over them... Picking the points allow
us to see our selected function standing out among the others in all
the axes. It is also possible to add widgets to interact with the
plots.
Args:
displays: Baseplot objects that will be plotted in the fig.
criteria: Sequence of criteria used to order the points in the
slider widget. The size should be equal to sliders, as each
criterion is for one slider.
sliders: Sequence of widgets that will be plotted.
label_sliders: Label of each of the sliders.
chart: Figure over with the graphs are plotted or axis over
where the graphs are plotted. If None and ax is also
None, the figure is initialized.
fig: Figure over with the graphs are plotted in case ax is not
specified. If None and ax is also None, the figure is
initialized.
axes: Axis where the graphs are plotted. If None, see param fig.
Attributes:
length_data: Number of instances or curves of the different displays.
clicked: Boolean indicating whether a point has being clicked.
selected_sample: Index of the function selected with the interactive
module or widgets.
"""
def __init__(
self,
displays: BasePlot | Sequence[BasePlot],
criteria: Sequence[float] | Sequence[Sequence[float]] = (),
sliders: Type[Widget] | Sequence[Type[Widget]] = (),
label_sliders: str | Sequence[str] | None = None,
chart: Figure | Axes | None = None,
fig: Figure | None = None,
axes: Sequence[Axes] | None = None,
):
if isinstance(displays, BasePlot):
displays = (displays,)
self.displays = [copy.copy(d) for d in displays]
self._n_graphs = sum(d.n_subplots for d in self.displays)
self.length_data = next(
d.n_samples
for d in self.displays
if d.n_samples is not None
)
self.sliders: List[Widget] = []
self.selected_sample: int | None = None
if len(criteria) != 0 and not isinstance(criteria[0], Sequence):
criteria = cast(Sequence[float], criteria)
criteria = (criteria,)
criteria = cast(Sequence[Sequence[float]], criteria)
self.criteria = criteria
if not isinstance(sliders, Sequence):
sliders = (sliders,)
if isinstance(label_sliders, str):
label_sliders = (label_sliders,)
if len(criteria) != len(sliders):
raise ValueError(
f"Size of criteria, and sliders should be equal "
f"(have {len(criteria)} and {len(sliders)}).",
)
self._init_axes(
chart,
fig=fig,
axes=axes,
extra=len(criteria),
)
self._create_sliders(
criteria=criteria,
sliders=sliders,
label_sliders=label_sliders,
)
def _init_axes(
self,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Sequence[Axes] | None = None,
extra: int = 0,
) -> None:
"""
Initialize the axes and figure.
Args:
chart: Figure over with the graphs are plotted or axis over
where the graphs are plotted. If None and ax is also
None, the figure is initialized.
fig: Figure over with the graphs are plotted in case ax is not
specified. If None and ax is also None, the figure is
initialized.
axes: Axis where the graphs are plotted. If None, see param fig.
extra: integer indicating the extra axes needed due to the
necessity for them to plot the sliders.
"""
widget_aspect = 1 / 8
fig, axes = _get_figure_and_axes(chart, fig, axes)
if len(axes) not in {0, self._n_graphs + extra}:
raise ValueError("Invalid number of axes.")
n_rows, n_cols = _get_axes_shape(self._n_graphs + extra)
dim = list(
itertools.chain.from_iterable(
[d.dim] * d.n_subplots
for d in self.displays
),
) + [2] * extra
number_axes = n_rows * n_cols
fig, axes = _set_figure_layout(
fig=fig,
axes=axes,
n_axes=self._n_graphs + extra,
dim=dim,
)
for i in range(self._n_graphs, number_axes):
if i >= self._n_graphs + extra:
axes[i].set_visible(False)
else:
axes[i].set_box_aspect(widget_aspect)
self.fig = fig
self.axes = axes
def _create_sliders(
self,
*,
criteria: Sequence[Sequence[float]],
sliders: Sequence[Type[Widget]],
label_sliders: Sequence[str] | None = None,
) -> None:
"""
Create the sliders with the criteria selected.
Args:
criteria: Different criterion for each of the sliders.
sliders: Widget types.
label_sliders: Sequence of the names of each slider.
"""
for c in criteria:
if len(c) != self.length_data:
raise ValueError(
"Slider criteria should be of the same size as data",
)
for k, criterion in enumerate(criteria):
label = label_sliders[k] if label_sliders else None
self.add_slider(
axes=self.axes[self._n_graphs + k],
criterion=criterion,
widget_class=sliders[k],
label=label,
)
def plot(self) -> Figure:
"""
Plot Multiple Display method.
Plot the different BasePlot objects and widgets selected.
Activates the interactivity functionality of clicking and
hovering points. When clicking a point, the rest will be
made partially transparent in all the corresponding graphs.
Returns:
fig: figure object in which the displays and
widgets will be plotted.
"""
if self._n_graphs > 1:
for d in self.displays[1:]:
if (
d.n_samples is not None
and d.n_samples != self.length_data
):
raise ValueError(
"Length of some data sets are not equal ",
)
for ax in self.axes[:self._n_graphs]:
ax.clear()
int_index = 0
for disp in self.displays:
axes_needed = disp.n_subplots
end_index = axes_needed + int_index
disp._set_figure_and_axes(axes=self.axes[int_index:end_index])
disp.plot()
int_index = end_index
self.fig.canvas.mpl_connect('pick_event', self.pick)
self.fig.suptitle("Multiple display")
self.fig.tight_layout()
return self.fig
def pick(self, event: Event) -> None:
"""
Activate interactive functionality when picking a point.
Callback method that is activated when a point is picked.
If no point was clicked previously, all the points but the
one selected will be more transparent in all the graphs.
If a point was clicked already, this new point will be the
one highlighted among the rest. If the same point is clicked,
the initial state of the graphics is restored.
Args:
event: event object containing the artist of the point
picked.
"""
selected_sample = self._sample_from_artist(event.artist)
if selected_sample is not None:
if self.selected_sample == selected_sample:
self._deselect_samples()
else:
self._select_sample(selected_sample)
def _sample_from_artist(self, artist: Artist) -> int | None:
"""Return the sample corresponding to an artist."""
for d in self.displays:
if d.artists is None:
continue
for i, a in enumerate(d.axes_):
if a == artist.axes:
if len(d.axes_) == 1:
return np.where( # type: ignore[no-any-return]
d.artists == artist,
)[0][0]
else:
return np.where( # type: ignore[no-any-return]
d.artists[:, i] == artist,
)[0][0]
return None
def _visit_artists(self) -> Generator[Tuple[int, Artist], None, None]:
for i in range(self.length_data):
for d in self.displays:
if d.artists is None:
continue
yield from ((i, artist) for artist in np.ravel(d.artists[i]))
def _select_sample(self, selected_sample: int) -> None:
"""Reduce the transparency of all the points but the selected one."""
for i, artist in self._visit_artists():
artist.set_alpha(1.0 if i == selected_sample else 0.1)
for criterion, slider in zip(self.criteria, self.sliders):
val_widget = criterion[selected_sample]
_set_val_noevents(slider, val_widget)
self.selected_sample = selected_sample
self.fig.canvas.draw_idle()
def _deselect_samples(self) -> None:
"""Restore the original transparency of all the points."""
for _, artist in self._visit_artists():
artist.set_alpha(1)
self.selected_sample = None
self.fig.canvas.draw_idle()
def add_slider(
self,
axes: Axes,
criterion: Sequence[float],
widget_class: Type[Widget] = Slider,
label: str | None = None,
) -> None:
"""
Add the slider to the MultipleDisplay object.
Args:
axes: Axes for the widget.
criterion: Criterion used for the slider.
widget_class: Widget type.
label: Name of the slider.
"""
full_desc = "" if label is None else label
ordered_criterion_values, ordered_criterion_indexes = zip(
*sorted(zip(criterion, range(self.length_data))),
)
widget = widget_class(
ax=axes,
label=full_desc,
valmin=ordered_criterion_values[0],
valmax=ordered_criterion_values[-1],
valinit=ordered_criterion_values[0],
valstep=ordered_criterion_values,
valfmt="%.3g",
)
self.sliders.append(widget)
axes.annotate(
f"{ordered_criterion_values[0]:.3g}",
xy=(0, -0.5),
xycoords='axes fraction',
annotation_clip=False,
)
axes.annotate(
f"{ordered_criterion_values[-1]:.3g}",
xy=(0.95, -0.5),
xycoords='axes fraction',
annotation_clip=False,
)
on_changed_function = partial(
self._value_updated,
ordered_criterion_values=ordered_criterion_values,
ordered_criterion_indexes=ordered_criterion_indexes,
)
widget.on_changed(on_changed_function)
def _value_updated(
self,
value: float,
ordered_criterion_values: Sequence[float],
ordered_criterion_indexes: Sequence[int],
) -> None:
"""
Update the graphs when a widget is clicked.
Args:
value: Current value of the widget.
ordered_criterion_values: Ordered values of the criterion.
ordered_criterion_indexes: Sample numbers ordered using the
criterion.
"""
value_index = int(np.searchsorted(ordered_criterion_values, value))
self.selected_sample = ordered_criterion_indexes[value_index]
self._select_sample(self.selected_sample) | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/visualization/_multiple_display.py | 0.948858 | 0.437042 | _multiple_display.py | pypi |
from __future__ import annotations
from typing import Any, Sequence
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.colors import Colormap
from matplotlib.figure import Figure
from matplotlib.patches import Ellipse
from ...representation import FDataGrid
from ...typing._numpy import NDArrayFloat, NDArrayInt
from ..depth import Depth
from ..outliers import MSPlotOutlierDetector
from ._baseplot import BasePlot
class MagnitudeShapePlot(BasePlot):
r"""
Implementation of the magnitude-shape plot.
This plot, which is based on the calculation of the :func:`directional
outlyingness <fda.magnitude_shape_plot.directional_outlyingness>`
of each of the samples, serves as a visualization tool for the centrality
of curves. Furthermore, an outlier detection procedure is included.
The norm of the mean of the directional outlyingness (:math:`\lVert
\mathbf{MO}\rVert`) is plotted in the x-axis, and the variation of the
directional outlyingness (:math:`VO`) in the y-axis.
The outliers are detected using an instance of
:class:`MSPlotOutlierDetector`.
For more information see :footcite:ts:`dai+genton_2018_visualization`.
Args:
fdata: Object containing the data.
multivariate_depth:
Method used to order the data. Defaults to :class:`projection
depth <fda.depth_measures.multivariate.ProjectionDepth>`.
pointwise_weights: an array containing the
weights of each points of discretisation where values have
been recorded.
cutoff_factor: Factor that multiplies the cutoff value, in order to
consider more or less curves as outliers.
assume_centered: If True, the support of the
robust location and the covariance estimates is computed, and a
covariance estimate is recomputed from it, without centering
the data. Useful to work with data whose mean is significantly
equal to zero but is not exactly zero. If False, default value,
the robust location and covariance are directly computed with
the FastMCD algorithm without additional treatment.
support_fraction: The
proportion of points to be included in the support of the
raw MCD estimate.
Default is None, which implies that the minimum value of
support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state: If int,
random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number
generator; If None, the random number generator is the
RandomState instance used by np.random. By default, it is 0.
ellipsoid: Whether to draw the non outlying ellipsoid.
Attributes:
points(numpy.ndarray): 2-dimensional matrix where each row
contains the points plotted in the graph.
outliers (1-D array, (fdata.n_samples,)): Contains 1 or 0 to denote
if a sample is an outlier or not, respecively.
colormap(matplotlib.pyplot.LinearSegmentedColormap, optional): Colormap
from which the colors of the plot are extracted. Defaults to
'seismic'.
color (float, optional): Tone of the colormap in which the nonoutlier
points are plotted. Defaults to 0.2.
outliercol (float, optional): Tone of the colormap in which the
outliers are plotted. Defaults to 0.8.
xlabel (string, optional): Label of the x-axis. Defaults to 'MO',
mean of the directional outlyingness.
ylabel (string, optional): Label of the y-axis. Defaults to 'VO',
variation of the directional outlyingness.
title (string, optional): Title of the plot. defaults to 'MS-Plot'.
Representation in a Jupyter notebook:
.. jupyter-execute::
from skfda.datasets import make_gaussian_process
from skfda.misc.covariances import Exponential
from skfda.exploratory.visualization import MagnitudeShapePlot
fd = make_gaussian_process(
n_samples=20, cov=Exponential(), random_state=1)
MagnitudeShapePlot(fd)
Example:
>>> import skfda
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [ 0., 2., 4., 6., 8., 10.]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> MagnitudeShapePlot(fd)
MagnitudeShapePlot(
fdata=FDataGrid(
array([[[ 1. ],
[ 1. ],
[ 2. ],
[ 3. ],
[ 2.5],
[ 2. ]],
[[ 0.5],
[ 0.5],
[ 1. ],
[ 2. ],
[ 1.5],
[ 1. ]],
[[-1. ],
[-1. ],
[-0.5],
[ 1. ],
[ 1. ],
[ 0.5]],
[[-0.5],
[-0.5],
[-0.5],
[-1. ],
[-1. ],
[-1. ]]]),
grid_points=(array([ 0., 2., 4., 6., 8., 10.]),),
domain_range=((0.0, 10.0),),
...),
multivariate_depth=None,
pointwise_weights=None,
cutoff_factor=1,
points=array([[ 1.66666667, 0.12777778],
[ 0. , 0. ],
[-0.8 , 0.17666667],
[-1.74444444, 0.94395062]]),
outliers=array([False, False, False, False]),
colormap=seismic,
color=0.2,
outliercol=0.8,
xlabel='MO',
ylabel='VO',
title='')
References:
.. footbibliography::
"""
def __init__(
self,
fdata: FDataGrid,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Sequence[Axes] | None = None,
ellipsoid: bool = True,
**kwargs: Any,
) -> None:
BasePlot.__init__(
self,
chart,
fig=fig,
axes=axes,
)
if fdata.dim_codomain > 1:
raise NotImplementedError(
"Only support 1 dimension on the codomain.")
self.outlier_detector = MSPlotOutlierDetector(**kwargs)
y = self.outlier_detector.fit_predict(fdata)
outliers = (y == -1)
self.ellipsoid = ellipsoid
self._fdata = fdata
self._outliers = outliers
self._colormap = plt.cm.get_cmap('seismic')
self._color = 0.2
self._outliercol = 0.8
self.xlabel = 'MO'
self.ylabel = 'VO'
self.title = (
"" if self.fdata.dataset_name is None else self.fdata.dataset_name
)
@property
def fdata(self) -> FDataGrid:
return self._fdata
@property
def multivariate_depth(self) -> Depth[NDArrayFloat] | None:
return self.outlier_detector.multivariate_depth
@property
def pointwise_weights(self) -> NDArrayFloat | None:
return self.outlier_detector.pointwise_weights
@property
def cutoff_factor(self) -> float:
return self.outlier_detector.cutoff_factor
@property
def points(self) -> NDArrayFloat:
return self.outlier_detector.points_
@property
def outliers(self) -> NDArrayInt:
return self._outliers # type: ignore[no-any-return]
@property
def colormap(self) -> Colormap:
return self._colormap
@colormap.setter
def colormap(self, value: Colormap) -> None:
if not isinstance(value, matplotlib.colors.Colormap):
raise ValueError(
"colormap must be of type "
"matplotlib.colors.Colormap",
)
self._colormap = value
@property
def color(self) -> float:
return self._color
@color.setter
def color(self, value: float) -> None:
if value < 0 or value > 1:
raise ValueError(
"color must be a number between 0 and 1.")
self._color = value
@property
def outliercol(self) -> float:
return self._outliercol
@outliercol.setter
def outliercol(self, value: float) -> None:
if value < 0 or value > 1:
raise ValueError(
"outcol must be a number between 0 and 1.")
self._outliercol = value
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot(
self,
fig: Figure,
axes: Axes,
) -> None:
self.artists = np.zeros(
(self.n_samples, 1),
dtype=Artist,
)
colors = np.zeros((self.fdata.n_samples, 4))
colors[np.where(self.outliers == 1)] = self.colormap(self.outliercol)
colors[np.where(self.outliers == 0)] = self.colormap(self.color)
colors_rgba = [tuple(i) for i in colors]
if self.ellipsoid:
center = self.outlier_detector.cov_.location_
prec = self.outlier_detector.cov_.get_precision()
K = (
self.outlier_detector.cutoff_value_
/ self.outlier_detector.scaling_
)
eigvals, eigvecs = np.linalg.eigh(prec)
a, b = np.sqrt(K / eigvals)
if eigvecs[0, 1] * eigvecs[1, 0] > 0:
eigvecs[:, 0] *= -1
angle = np.rad2deg(np.arctan2(eigvecs[1, 0], eigvecs[0, 0]))
ellipse = Ellipse(
xy=center,
width=2 * a,
height=2 * b,
angle=angle,
facecolor='C0',
alpha=0.1,
)
axes[0].add_patch(ellipse)
for i, _ in enumerate(self.points[:, 0].ravel()):
self.artists[i, 0] = axes[0].scatter(
self.points[:, 0].ravel()[i],
self.points[:, 1].ravel()[i],
color=colors_rgba[i],
picker=True,
pickradius=2,
)
axes[0].set_xlabel(self.xlabel)
axes[0].set_ylabel(self.ylabel)
axes[0].set_title(self.title)
def __repr__(self) -> str:
"""Return repr(self)."""
return (
f"MagnitudeShapePlot("
f"\nfdata={repr(self.fdata)},"
f"\nmultivariate_depth={self.multivariate_depth},"
f"\npointwise_weights={repr(self.pointwise_weights)},"
f"\ncutoff_factor={repr(self.cutoff_factor)},"
f"\npoints={repr(self.points)},"
f"\noutliers={repr(self.outliers)},"
f"\ncolormap={self.colormap.name},"
f"\ncolor={repr(self.color)},"
f"\noutliercol={repr(self.outliercol)},"
f"\nxlabel={repr(self.xlabel)},"
f"\nylabel={repr(self.ylabel)},"
f"\ntitle={repr(self.title)})"
).replace('\n', '\n ') | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/visualization/_magnitude_shape_plot.py | 0.959317 | 0.741545 | _magnitude_shape_plot.py | pypi |
from __future__ import annotations
from typing import Sequence, Tuple
import matplotlib
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.collections import PatchCollection
from matplotlib.figure import Figure
from matplotlib.patches import Rectangle
from matplotlib.ticker import MaxNLocator
from sklearn.exceptions import NotFittedError
from sklearn.utils.validation import check_is_fitted
from typing_extensions import Protocol
from ...misc.validation import check_fdata_same_dimensions
from ...representation import FData, FDataGrid
from ...typing._numpy import NDArrayFloat, NDArrayInt
from ._baseplot import BasePlot
from ._utils import ColorLike, _darken, _set_labels
class ClusteringEstimator(Protocol):
@property
def n_clusters(self) -> int:
pass
@property
def cluster_centers_(self) -> FDataGrid:
pass
@property
def labels_(self) -> NDArrayInt:
pass
def fit(self, X: FDataGrid) -> ClusteringEstimator:
pass
def predict(self, X: FDataGrid) -> NDArrayInt:
pass
class FuzzyClusteringEstimator(ClusteringEstimator, Protocol):
def predict_proba(self, X: FDataGrid) -> NDArrayFloat:
pass
def _plot_clustering_checks(
estimator: ClusteringEstimator,
fdata: FData,
sample_colors: Sequence[ColorLike] | None,
sample_labels: Sequence[str] | None,
cluster_colors: Sequence[ColorLike] | None,
cluster_labels: Sequence[str] | None,
center_colors: Sequence[ColorLike] | None,
center_labels: Sequence[str] | None,
) -> None:
"""Check the arguments."""
if (
sample_colors is not None
and len(sample_colors) != fdata.n_samples
):
raise ValueError(
"sample_colors must contain a color for each sample.",
)
if (
sample_labels is not None
and len(sample_labels) != fdata.n_samples
):
raise ValueError(
"sample_labels must contain a label for each sample.",
)
if (
cluster_colors is not None
and len(cluster_colors) != estimator.n_clusters
):
raise ValueError(
"cluster_colors must contain a color for each cluster.",
)
if (
cluster_labels is not None
and len(cluster_labels) != estimator.n_clusters
):
raise ValueError(
"cluster_labels must contain a label for each cluster.",
)
if (
center_colors is not None
and len(center_colors) != estimator.n_clusters
):
raise ValueError(
"center_colors must contain a color for each center.",
)
if (
center_labels is not None
and len(center_labels) != estimator.n_clusters
):
raise ValueError(
"centers_labels must contain a label for each center.",
)
def _get_labels(
x_label: str | None,
y_label: str | None,
title: str | None,
xlabel_str: str,
) -> Tuple[str, str, str]:
"""
Get the axes labels.
Set the arguments *xlabel*, *ylabel*, *title* passed to the plot
functions :func:`plot_cluster_lines
<skfda.exploratory.visualization.clustering_plots.plot_cluster_lines>` and
:func:`plot_cluster_bars
<skfda.exploratory.visualization.clustering_plots.plot_cluster_bars>`,
in case they are not set yet.
Args:
x_label: Label for the x-axes.
y_label: Label for the y-axes.
title: Title for the figure where the clustering results are
ploted.
xlabel_str: In case xlabel is None, string to use for the labels
in the x-axes.
Returns:
xlabel: Labels for the x-axes.
ylabel: Labels for the y-axes.
title: Title for the figure where the clustering results are
plotted.
"""
if x_label is None:
x_label = xlabel_str
if y_label is None:
y_label = "Degree of membership"
if title is None:
title = "Degrees of membership of the samples to each cluster"
return x_label, y_label, title
class ClusterPlot(BasePlot):
"""
ClusterPlot class.
Args:
estimator: estimator used to calculate the
clusters.
X: contains the samples which are grouped
into different clusters.
fig: figure over which the graphs are plotted in
case ax is not specified. If None and ax is also None, the figure
is initialized.
axes: axis over where the graphs are plotted.
If None, see param fig.
n_rows: designates the number of rows of the figure to plot the
different dimensions of the image. Only specified if fig and
ax are None.
n_cols: designates the number of columns of the figure to plot
the different dimensions of the image. Only specified if fig
and ax are None.
sample_labels: contains in order the labels of each
sample of the fdatagrid.
cluster_colors: contains in order the colors of each
cluster the samples of the fdatagrid are classified into.
cluster_labels: contains in order the names of each
cluster the samples of the fdatagrid are classified into.
center_colors: contains in order the colors of each
centroid of the clusters the samples of the fdatagrid are
classified into.
center_labels: contains in order the labels of each
centroid of the clusters the samples of the fdatagrid are
classified into.
center_width: width of the centroid curves.
colormap: colormap from which the colors of the plot are
taken. Defaults to `rainbow`.
"""
def __init__(
self,
estimator: ClusteringEstimator,
fdata: FDataGrid,
chart: Figure | Axes | None = None,
fig: Figure | None = None,
axes: Axes | Sequence[Axes] | None = None,
n_rows: int | None = None,
n_cols: int | None = None,
sample_labels: Sequence[str] | None = None,
cluster_colors: Sequence[ColorLike] | None = None,
cluster_labels: Sequence[str] | None = None,
center_colors: Sequence[ColorLike] | None = None,
center_labels: Sequence[str] | None = None,
center_width: int = 3,
colormap: matplotlib.colors.Colormap = None,
) -> None:
if colormap is None:
colormap = plt.cm.get_cmap('rainbow')
super().__init__(
chart,
fig=fig,
axes=axes,
n_rows=n_rows,
n_cols=n_cols,
)
self.fdata = fdata
self.estimator = estimator
self.sample_labels = sample_labels
self.cluster_colors = cluster_colors
self.cluster_labels = cluster_labels
self.center_colors = center_colors
self.center_labels = center_labels
self.center_width = center_width
self.colormap = colormap
@property
def n_subplots(self) -> int:
return self.fdata.dim_codomain
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot_clusters(
self,
fig: Figure,
axes: Sequence[Axes],
) -> None:
"""Implement the plot of the FDataGrid samples by clusters."""
_plot_clustering_checks(
estimator=self.estimator,
fdata=self.fdata,
sample_colors=None,
sample_labels=self.sample_labels,
cluster_colors=self.cluster_colors,
cluster_labels=self.cluster_labels,
center_colors=self.center_colors,
center_labels=self.center_labels,
)
if self.sample_labels is None:
self.sample_labels = [
f'$SAMPLE: {i}$' for i in range(self.fdata.n_samples)
]
if self.cluster_colors is None:
self.cluster_colors = self.colormap(
np.arange(self.estimator.n_clusters)
/ (self.estimator.n_clusters - 1),
)
if self.cluster_labels is None:
self.cluster_labels = [
f'$CLUSTER: {i}$' for i in range(self.estimator.n_clusters)
]
if self.center_colors is None:
self.center_colors = [_darken(c, 0.5) for c in self.cluster_colors]
if self.center_labels is None:
self.center_labels = [
f'$CENTER: {i}$' for i in range(self.estimator.n_clusters)
]
colors_by_cluster = np.asarray(self.cluster_colors)[self.labels]
patches = [
mpatches.Patch(
color=self.cluster_colors[i],
label=self.cluster_labels[i],
)
for i in range(self.estimator.n_clusters)
]
artists = [
axes[j].plot(
self.fdata.grid_points[0],
self.fdata.data_matrix[i, :, j],
c=colors_by_cluster[i],
label=self.sample_labels[i],
)
for j in range(self.fdata.dim_codomain)
for i in range(self.fdata.n_samples)
]
self.artists = np.array(artists).reshape(
(self.n_subplots, self.n_samples),
).T
for j in range(self.fdata.dim_codomain):
for i in range(self.estimator.n_clusters):
axes[j].plot(
self.fdata.grid_points[0],
self.estimator.cluster_centers_.data_matrix[i, :, j],
c=self.center_colors[i],
label=self.center_labels[i],
linewidth=self.center_width,
)
axes[j].legend(handles=patches)
_set_labels(self.fdata, fig, axes)
def _plot(
self,
fig: Figure,
axes: Sequence[Axes],
) -> None:
try:
check_is_fitted(self.estimator)
check_fdata_same_dimensions(
self.estimator.cluster_centers_,
self.fdata,
)
except NotFittedError:
self.estimator.fit(self.fdata)
self.labels = self.estimator.labels_
self._plot_clusters(fig=fig, axes=axes)
class ClusterMembershipLinesPlot(BasePlot):
"""
Class ClusterMembershipLinesPlot.
Args:
estimator: estimator used to calculate the
clusters.
X: contains the samples which are grouped
into different clusters.
fig: figure over which the graph is
plotted in case ax is not specified. If None and ax is also None,
the figure is initialized.
axes: axis over where the graph is plotted.
If None, see param fig.
sample_colors: contains in order the colors
of each sample of the fdatagrid.
sample_labels: contains in order the labels
of each sample of the fdatagrid.
cluster_labels: contains in order the names of
each cluster the samples of the fdatagrid are classified into.
colormap: colormap from which the colors of the
plot are taken.
x_label: Label for the x-axis. Defaults to "Cluster".
y_label: Label for the y-axis. Defaults to
"Degree of membership".
title: Title for the figure where the clustering
results are ploted.
Defaults to "Degrees of membership of the samples to each cluster".
"""
def __init__(
self,
estimator: FuzzyClusteringEstimator,
fdata: FDataGrid,
*,
chart: Figure | Axes | None = None,
fig: Figure | None = None,
axes: Axes | Sequence[Axes] | None = None,
sample_colors: Sequence[ColorLike] | None = None,
sample_labels: Sequence[str] | None = None,
cluster_labels: Sequence[str] | None = None,
colormap: matplotlib.colors.Colormap = None,
x_label: str | None = None,
y_label: str | None = None,
title: str | None = None,
) -> None:
if colormap is None:
colormap = plt.cm.get_cmap('rainbow')
super().__init__(
chart,
fig=fig,
axes=axes,
)
self.fdata = fdata
self.estimator = estimator
self.sample_labels = sample_labels
self.sample_colors = sample_colors
self.cluster_labels = cluster_labels
self.x_label = x_label
self.y_label = y_label
self.title = title
self.colormap = colormap
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot(
self,
fig: Figure,
axes: Sequence[Axes],
) -> None:
try:
check_is_fitted(self.estimator)
check_fdata_same_dimensions(
self.estimator.cluster_centers_,
self.fdata,
)
except NotFittedError:
self.estimator.fit(self.fdata)
membership = self.estimator.predict_proba(self.fdata)
_plot_clustering_checks(
estimator=self.estimator,
fdata=self.fdata,
sample_colors=self.sample_colors,
sample_labels=self.sample_labels,
cluster_colors=None,
cluster_labels=self.cluster_labels,
center_colors=None,
center_labels=None,
)
x_label, y_label, title = _get_labels(
self.x_label,
self.y_label,
self.title,
"Cluster",
)
if self.sample_colors is None:
self.cluster_colors = self.colormap(
np.arange(self.estimator.n_clusters)
/ (self.estimator.n_clusters - 1),
)
labels_by_cluster = self.estimator.labels_
self.sample_colors = self.cluster_colors[labels_by_cluster]
if self.sample_labels is None:
self.sample_labels = [
f'$SAMPLE: {i}$'
for i in range(self.fdata.n_samples)
]
if self.cluster_labels is None:
self.cluster_labels = [
f'${i}$'
for i in range(self.estimator.n_clusters)
]
axes[0].get_xaxis().set_major_locator(MaxNLocator(integer=True))
self.artists = np.array([
axes[0].plot(
np.arange(self.estimator.n_clusters),
membership[i],
label=self.sample_labels[i],
color=self.sample_colors[i],
)
for i in range(self.fdata.n_samples)
])
axes[0].set_xticks(np.arange(self.estimator.n_clusters))
axes[0].set_xticklabels(self.cluster_labels)
axes[0].set_xlabel(x_label)
axes[0].set_ylabel(y_label)
fig.suptitle(title)
class ClusterMembershipPlot(BasePlot):
"""
Class ClusterMembershipPlot.
Args:
estimator: estimator used to calculate the
clusters.
X: contains the samples which are grouped
into different clusters.
fig: figure over which the graph is
plotted in case ax is not specified. If None and ax is also None,
the figure is initialized.
axes: axis over where the graph is plotted.
If None, see param fig.
sample_colors: contains in order the colors
of each sample of the fdatagrid.
sample_labels: contains in order the labels
of each sample of the fdatagrid.
cluster_labels: contains in order the names of
each cluster the samples of the fdatagrid are classified into.
colormap: colormap from which the colors of the
plot are taken.
x_label: Label for the x-axis. Defaults to "Cluster".
y_label: Label for the y-axis. Defaults to
"Degree of membership".
title: Title for the figure where the clustering
results are ploted.
Defaults to "Degrees of membership of the samples to each cluster".
"""
def __init__(
self,
estimator: FuzzyClusteringEstimator,
fdata: FData,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Axes | Sequence[Axes] | None = None,
sort: int = -1,
sample_labels: Sequence[str] | None = None,
cluster_colors: Sequence[ColorLike] | None = None,
cluster_labels: Sequence[str] | None = None,
colormap: matplotlib.colors.Colormap = None,
x_label: str | None = None,
y_label: str | None = None,
title: str | None = None,
) -> None:
if colormap is None:
colormap = plt.cm.get_cmap('rainbow')
super().__init__(
chart,
fig=fig,
axes=axes,
)
self.fdata = fdata
self.estimator = estimator
self.sample_labels = sample_labels
self.cluster_colors = (
None
if cluster_colors is None
else list(cluster_colors)
)
self.cluster_labels = cluster_labels
self.x_label = x_label
self.y_label = y_label
self.title = title
self.colormap = colormap
self.sort = sort
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot(
self,
fig: Figure,
axes: Sequence[Axes],
) -> None:
self.artists = np.full(
(self.n_samples, self.n_subplots),
None,
dtype=Artist,
)
try:
check_is_fitted(self.estimator)
check_fdata_same_dimensions(
self.estimator.cluster_centers_,
self.fdata,
)
except NotFittedError:
self.estimator.fit(self.fdata)
membership = self.estimator.predict_proba(self.fdata)
if self.sort < -1 or self.sort >= self.estimator.n_clusters:
raise ValueError(
"The sorting number must belong to "
"the interval [-1, n_clusters)",
)
_plot_clustering_checks(
estimator=self.estimator,
fdata=self.fdata,
sample_colors=None,
sample_labels=self.sample_labels,
cluster_colors=self.cluster_colors,
cluster_labels=self.cluster_labels,
center_colors=None,
center_labels=None,
)
x_label, y_label, title = _get_labels(
self.x_label,
self.y_label,
self.title,
"Sample",
)
if self.sample_labels is None:
self.sample_labels = list(
np.arange(
self.fdata.n_samples,
).astype(np.str_),
)
if self.cluster_colors is None:
self.cluster_colors = list(
self.colormap(
np.arange(self.estimator.n_clusters)
/ (self.estimator.n_clusters - 1),
),
)
if self.cluster_labels is None:
self.cluster_labels = [
f'$CLUSTER: {i}$'
for i in range(self.estimator.n_clusters)
]
patches = [
mpatches.Patch(
color=self.cluster_colors[i],
label=self.cluster_labels[i],
)
for i in range(self.estimator.n_clusters)
]
if self.sort == -1:
labels_dim = membership
else:
sample_indices = np.argsort(-membership[:, self.sort])
self.sample_labels = list(
np.array(self.sample_labels)[sample_indices],
)
labels_dim = np.copy(membership[sample_indices])
temp_labels = np.copy(labels_dim[:, 0])
labels_dim[:, 0] = labels_dim[:, self.sort]
labels_dim[:, self.sort] = temp_labels
# Swap
self.cluster_colors[0], self.cluster_colors[self.sort] = (
self.cluster_colors[self.sort],
self.cluster_colors[0],
)
conc = np.zeros((self.fdata.n_samples, 1))
labels_dim = np.concatenate((conc, labels_dim), axis=-1)
bars = [
axes[0].bar(
np.arange(self.fdata.n_samples),
labels_dim[:, i + 1],
bottom=np.sum(labels_dim[:, :(i + 1)], axis=1),
color=self.cluster_colors[i],
)
for i in range(self.estimator.n_clusters)
]
for b in bars:
b.remove()
b.figure = None
for i in range(self.n_samples):
collection = PatchCollection(
[
Rectangle(
bar.patches[i].get_xy(),
bar.patches[i].get_width(),
bar.patches[i].get_height(),
color=bar.patches[i].get_facecolor(),
) for bar in bars
],
match_original=True,
)
axes[0].add_collection(collection)
self.artists[i, 0] = collection
fig.canvas.draw()
axes[0].set_xticks(np.arange(self.fdata.n_samples))
axes[0].set_xticklabels(self.sample_labels)
axes[0].set_xlabel(x_label)
axes[0].set_ylabel(y_label)
axes[0].legend(handles=patches)
fig.suptitle(title) | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/visualization/clustering.py | 0.967302 | 0.519765 | clustering.py | pypi |
from __future__ import annotations
import numpy as np
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from ...representation import FDataGrid
from ..outliers import OutliergramOutlierDetector
from ._baseplot import BasePlot
class Outliergram(BasePlot):
"""
Outliergram method of visualization.
Plots the :class:`Modified Band Depth
(MBD)<skfda.exploratory.depth.ModifiedBandDepth>` on the Y axis and the
:func:`Modified Epigraph Index
(MEI)<skfda.exploratory.stats.modified_epigraph_index>` on the X axis.
These points will create the form of a parabola.
The shape outliers will be the points that appear far from this curve.
Args:
fdata: functional data set that we want to examine.
chart: figure over with the graphs are plotted or axis over
where the graphs are plotted. If None and ax is also
None, the figure is initialized.
fig: figure over with the graphs are plotted in case ax is not
specified. If None and ax is also None, the figure is
initialized.
axes: axis where the graphs are plotted. If None, see param fig.
n_rows: designates the number of rows of the figure
to plot the different dimensions of the image. Only specified
if fig and ax are None.
n_cols: designates the number of columns of the
figure to plot the different dimensions of the image. Only
specified if fig and ax are None.
Attributes:
mbd: result of the calculation of the Modified Band Depth on our
dataset. Represents the mean time a curve stays between other pair
of curves, being a good measure of centrality.
mei: result of the calculation of the Modified Epigraph Index on our
dataset. Represents the mean time a curve stays below other curve.
References:
López-Pintado S., Romo J.. (2011). A half-region depth for functional
data, Computational Statistics & Data Analysis, volume 55
(page 1679-1695).
Arribas-Gil A., Romo J.. Shape outlier detection and visualization for
functional data: the outliergram
https://academic.oup.com/biostatistics/article/15/4/603/266279
"""
def __init__(
self,
fdata: FDataGrid,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Axes | None = None,
factor: float = 1.5,
) -> None:
BasePlot.__init__(
self,
chart,
fig=fig,
axes=axes,
)
self.fdata = fdata
self.factor = factor
self.outlier_detector = OutliergramOutlierDetector(factor=factor)
self.outlier_detector.fit(fdata)
indices = np.argsort(self.outlier_detector.mei_)
self._parabola_ordered = self.outlier_detector.parabola_[indices]
self._mei_ordered = self.outlier_detector.mei_[indices]
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot(
self,
fig: Figure,
axes: Axes,
) -> None:
self.artists = np.zeros(
(self.n_samples, 1),
dtype=Artist,
)
for i, (mei, mbd) in enumerate(
zip(self.outlier_detector.mei_, self.outlier_detector.mbd_),
):
self.artists[i, 0] = axes[0].scatter(
mei,
mbd,
picker=2,
)
axes[0].plot(
self._mei_ordered,
self._parabola_ordered,
)
shifted_parabola = (
self._parabola_ordered
- self.outlier_detector.max_inlier_distance_
)
axes[0].plot(
self._mei_ordered,
shifted_parabola,
linestyle='dashed',
)
# Set labels of graph
if self.fdata.dataset_name is not None:
axes[0].set_title(self.fdata.dataset_name)
axes[0].set_xlabel("MEI")
axes[0].set_ylabel("MBD")
axes[0].set_xlim([0, 1])
axes[0].set_ylim([
0, # Minimum MBD
1, # Maximum MBD
]) | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/visualization/_outliergram.py | 0.952364 | 0.799403 | _outliergram.py | pypi |
from __future__ import annotations
from typing import Any, Dict, Sequence, Sized, Tuple, TypeVar
import matplotlib.cm
import matplotlib.patches
import numpy as np
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.colors import Colormap
from matplotlib.figure import Figure
from typing_extensions import Protocol
from ..._utils import _to_grid_points, constants
from ...misc.validation import validate_domain_range
from ...representation._functional_data import FData
from ...typing._base import DomainRangeLike, GridPointsLike
from ._baseplot import BasePlot
from ._utils import ColorLike, _set_labels
K = TypeVar('K', contravariant=True)
V = TypeVar('V', covariant=True)
class Indexable(Protocol[K, V]):
"""Class Indexable used to type _get_color_info."""
def __getitem__(self, __key: K) -> V:
pass
def __len__(self) -> int:
pass
def _get_color_info(
fdata: Sized,
group: Sequence[K] | None = None,
group_names: Indexable[K, str] | None = None,
group_colors: Indexable[K, ColorLike] | None = None,
legend: bool = False,
kwargs: Dict[str, Any] | None = None,
) -> Tuple[
Sequence[ColorLike] | None,
Sequence[matplotlib.patches.Patch] | None,
]:
if kwargs is None:
kwargs = {}
patches = None
if group is not None:
# In this case, each curve has a label, and all curves with the same
# label should have the same color
group_unique, group_indexes = np.unique(
np.asarray(group),
return_inverse=True,
)
n_labels = len(group_unique)
if group_colors is not None:
group_colors_array = np.array(
[group_colors[g] for g in group_unique],
)
else:
prop_cycle = matplotlib.rcParams['axes.prop_cycle']
cycle_colors = prop_cycle.by_key()['color']
group_colors_array = np.take(
cycle_colors, np.arange(n_labels), mode='wrap',
)
sample_colors = list(group_colors_array[group_indexes])
group_names_array = None
if group_names is not None:
group_names_array = np.array(
[group_names[g] for g in group_unique],
)
elif legend is True:
group_names_array = group_unique
if group_names_array is not None:
patches = [
matplotlib.patches.Patch(color=c, label=l)
for c, l in zip(group_colors_array, group_names_array)
]
else:
# In this case, each curve has a different color unless specified
# otherwise
if 'color' in kwargs:
sample_colors = len(fdata) * [kwargs.get("color")]
kwargs.pop('color')
elif 'c' in kwargs:
sample_colors = len(fdata) * [kwargs.get("c")]
kwargs.pop('c')
else:
sample_colors = None
return sample_colors, patches
class GraphPlot(BasePlot):
"""
Class used to plot the FDataGrid object graph as hypersurfaces.
When plotting functional data, we can either choose manually a color,
a group of colors for the representations. Besides, we can use a list of
variables (depths, scalar regression targets...) can be used as an
argument to display the functions wtih a gradient of colors.
Args:
fdata: functional data set that we want to plot.
gradient_criteria: list of real values used to determine the color
in which each of the instances will be plotted.
max_grad: maximum value that the gradient_list can take, it will be
used to normalize the ``gradient_criteria`` in order to get values
that can be used in the function colormap.__call__(). If not
declared it will be initialized to the maximum value of
gradient_list.
min_grad: minimum value that the gradient_list can take, it will be
used to normalize the ``gradient_criteria`` in order to get values
that can be used in the function colormap.__call__(). If not
declared it will be initialized to the minimum value of
gradient_list.
chart: figure over
with the graphs are plotted or axis over where the graphs are
plotted. If None and ax is also None, the figure is
initialized.
fig: figure over with the graphs are
plotted in case ax is not specified. If None and ax is also
None, the figure is initialized.
axes: axis over where the graphs
are plotted. If None, see param fig.
n_rows: designates the number of rows of the figure
to plot the different dimensions of the image. Only specified
if fig and ax are None.
n_cols: designates the number of columns of the
figure to plot the different dimensions of the image. Only
specified if fig and ax are None.
n_points: Number of points to evaluate in
the plot. In case of surfaces a tuple of length 2 can be pased
with the number of points to plot in each axis, otherwise the
same number of points will be used in the two axes. By default
in unidimensional plots will be used 501 points; in surfaces
will be used 30 points per axis, wich makes a grid with 900
points.
domain_range: Range where the
function will be plotted. In objects with unidimensional domain
the domain range should be a tuple with the bounds of the
interval; in the case of surfaces a list with 2 tuples with
the ranges for each dimension. Default uses the domain range
of the functional object.
group: contains integers from [0 to number of
labels) indicating to which group each sample belongs to. Then,
the samples with the same label are plotted in the same color.
If None, the default value, each sample is plotted in the color
assigned by matplotlib.pyplot.rcParams['axes.prop_cycle'].
group_colors: colors in which groups are
represented, there must be one for each group. If None, each
group is shown with distict colors in the "Greys" colormap.
group_names: name of each of the groups which appear
in a legend, there must be one for each one. Defaults to None
and the legend is not shown. Implies `legend=True`.
colormap: name of the colormap to be used. By default we will
use autumn.
legend: if `True`, show a legend with the groups. If
`group_names` is passed, it will be used for finding the names
to display in the legend. Otherwise, the values passed to
`group` will be used.
kwargs: if dim_domain is 1, keyword arguments to be passed to
the matplotlib.pyplot.plot function; if dim_domain is 2,
keyword arguments to be passed to the
matplotlib.pyplot.plot_surface function.
Attributes:
gradient_list: normalization of the values from gradient color_list
that will be used to determine the intensity of the color
each function will have.
"""
def __init__(
self,
fdata: FData,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Axes | None = None,
n_rows: int | None = None,
n_cols: int | None = None,
n_points: int | Tuple[int, int] | None = None,
domain_range: DomainRangeLike | None = None,
group: Sequence[K] | None = None,
group_colors: Indexable[K, ColorLike] | None = None,
group_names: Indexable[K, str] | None = None,
gradient_criteria: Sequence[float] | None = None,
max_grad: float | None = None,
min_grad: float | None = None,
colormap: Colormap | str | None = None,
legend: bool = False,
**kwargs: Any,
) -> None:
super().__init__(
chart,
fig=fig,
axes=axes,
n_rows=n_rows,
n_cols=n_cols,
)
self.fdata = fdata
self.gradient_criteria = gradient_criteria
if self.gradient_criteria is not None:
if len(self.gradient_criteria) != fdata.n_samples:
raise ValueError(
"The length of the gradient color",
"list should be the same as the number",
"of samples in fdata",
)
if min_grad is None:
self.min_grad = min(self.gradient_criteria)
else:
self.min_grad = min_grad
if max_grad is None:
self.max_grad = max(self.gradient_criteria)
else:
self.max_grad = max_grad
self.gradient_list: Sequence[float] | None = (
[
(grad_color - self.min_grad)
/ (self.max_grad - self.min_grad)
for grad_color in self.gradient_criteria
]
)
else:
self.gradient_list = None
self.n_points = n_points
self.group = group
self.group_colors = group_colors
self.group_names = group_names
self.legend = legend
self.colormap = colormap
self.kwargs = kwargs
if domain_range is None:
self.domain_range = self.fdata.domain_range
else:
self.domain_range = validate_domain_range(domain_range)
if self.gradient_list is None:
sample_colors, patches = _get_color_info(
self.fdata,
self.group,
self.group_names,
self.group_colors,
self.legend,
kwargs,
)
else:
patches = None
if self.colormap is None:
colormap = matplotlib.cm.get_cmap("autumn")
colormap = colormap.reversed()
else:
colormap = matplotlib.cm.get_cmap(self.colormap)
sample_colors = colormap(self.gradient_list)
self.sample_colors = sample_colors
self.patches = patches
@property
def dim(self) -> int:
return self.fdata.dim_domain + 1
@property
def n_subplots(self) -> int:
return self.fdata.dim_codomain
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot(
self,
fig: Figure,
axes: Sequence[Axes],
) -> None:
self.artists = np.zeros(
(self.n_samples, self.fdata.dim_codomain),
dtype=Artist,
)
color_dict: Dict[str, ColorLike | None] = {}
if self.fdata.dim_domain == 1:
if self.n_points is None:
self.n_points = constants.N_POINTS_UNIDIMENSIONAL_PLOT_MESH
assert isinstance(self.n_points, int)
# Evaluates the object in a linspace
eval_points = np.linspace(*self.domain_range[0], self.n_points)
mat = self.fdata(eval_points)
for i in range(self.fdata.dim_codomain):
for j in range(self.fdata.n_samples):
set_color_dict(self.sample_colors, j, color_dict)
self.artists[j, i] = axes[i].plot(
eval_points,
mat[j, ..., i].T,
**self.kwargs,
**color_dict,
)[0]
else:
# Selects the number of points
if self.n_points is None:
n_points_tuple = 2 * (constants.N_POINTS_SURFACE_PLOT_AX,)
elif isinstance(self.n_points, int):
n_points_tuple = (self.n_points, self.n_points)
elif len(self.n_points) != 2:
raise ValueError(
"n_points should be a number or a tuple of "
"length 2, and has "
"length {0}.".format(len(self.n_points)),
)
# Axes where will be evaluated
x = np.linspace(*self.domain_range[0], n_points_tuple[0])
y = np.linspace(*self.domain_range[1], n_points_tuple[1])
# Evaluation of the functional object
Z = self.fdata((x, y), grid=True)
X, Y = np.meshgrid(x, y, indexing='ij')
for k in range(self.fdata.dim_codomain):
for h in range(self.fdata.n_samples):
set_color_dict(self.sample_colors, h, color_dict)
self.artists[h, k] = axes[k].plot_surface(
X,
Y,
Z[h, ..., k],
**self.kwargs,
**color_dict,
)
_set_labels(self.fdata, fig, axes, self.patches)
class ScatterPlot(BasePlot):
"""
Class used to scatter the FDataGrid object.
Args:
fdata: functional data set that we want to plot.
grid_points: points to plot.
chart: figure over
with the graphs are plotted or axis over where the graphs are
plotted. If None and ax is also None, the figure is
initialized.
fig: figure over with the graphs are
plotted in case ax is not specified. If None and ax is also
None, the figure is initialized.
axes: axis over where the graphs
are plotted. If None, see param fig.
n_rows: designates the number of rows of the figure
to plot the different dimensions of the image. Only specified
if fig and ax are None.
n_cols: designates the number of columns of the
figure to plot the different dimensions of the image. Only
specified if fig and ax are None.
domain_range: Range where the
function will be plotted. In objects with unidimensional domain
the domain range should be a tuple with the bounds of the
interval; in the case of surfaces a list with 2 tuples with
the ranges for each dimension. Default uses the domain range
of the functional object.
group: contains integers from [0 to number of
labels) indicating to which group each sample belongs to. Then,
the samples with the same label are plotted in the same color.
If None, the default value, each sample is plotted in the color
assigned by matplotlib.pyplot.rcParams['axes.prop_cycle'].
group_colors: colors in which groups are
represented, there must be one for each group. If None, each
group is shown with distict colors in the "Greys" colormap.
group_names: name of each of the groups which appear
in a legend, there must be one for each one. Defaults to None
and the legend is not shown. Implies `legend=True`.
legend: if `True`, show a legend with the groups. If
`group_names` is passed, it will be used for finding the names
to display in the legend. Otherwise, the values passed to
`group` will be used.
kwargs: if dim_domain is 1, keyword arguments to be passed to
the matplotlib.pyplot.plot function; if dim_domain is 2,
keyword arguments to be passed to the
matplotlib.pyplot.plot_surface function.
"""
def __init__(
self,
fdata: FData,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Axes | None = None,
n_rows: int | None = None,
n_cols: int | None = None,
grid_points: GridPointsLike | None = None,
domain_range: Tuple[int, int] | DomainRangeLike | None = None,
group: Sequence[K] | None = None,
group_colors: Indexable[K, ColorLike] | None = None,
group_names: Indexable[K, str] | None = None,
legend: bool = False,
**kwargs: Any,
) -> None:
super().__init__(
chart,
fig=fig,
axes=axes,
n_rows=n_rows,
n_cols=n_cols,
)
self.fdata = fdata
if grid_points is None:
# This can only be done for FDataGrid
self.grid_points = self.fdata.grid_points
self.evaluated_points = self.fdata.data_matrix
else:
self.grid_points = _to_grid_points(grid_points)
self.evaluated_points = self.fdata(
self.grid_points, grid=True,
)
self.domain_range = domain_range
self.group = group
self.group_colors = group_colors
self.group_names = group_names
self.legend = legend
if self.domain_range is None:
self.domain_range = self.fdata.domain_range
else:
self.domain_range = validate_domain_range(self.domain_range)
sample_colors, patches = _get_color_info(
self.fdata,
self.group,
self.group_names,
self.group_colors,
self.legend,
kwargs,
)
self.sample_colors = sample_colors
self.patches = patches
@property
def dim(self) -> int:
return self.fdata.dim_domain + 1
@property
def n_subplots(self) -> int:
return self.fdata.dim_codomain
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot(
self,
fig: Figure,
axes: Sequence[Axes],
) -> None:
"""
Scatter FDataGrid object.
Returns:
fig: figure object in which the graphs are plotted.
"""
self.artists = np.zeros(
(self.n_samples, self.fdata.dim_codomain),
dtype=Artist,
)
color_dict: Dict[str, ColorLike | None] = {}
if self.fdata.dim_domain == 1:
for i in range(self.fdata.dim_codomain):
for j in range(self.fdata.n_samples):
set_color_dict(self.sample_colors, j, color_dict)
self.artists[j, i] = axes[i].scatter(
self.grid_points[0],
self.evaluated_points[j, ..., i].T,
**color_dict,
picker=True,
pickradius=2,
)
else:
X = self.fdata.grid_points[0]
Y = self.fdata.grid_points[1]
X, Y = np.meshgrid(X, Y)
for k in range(self.fdata.dim_codomain):
for h in range(self.fdata.n_samples):
set_color_dict(self.sample_colors, h, color_dict)
self.artists[h, k] = axes[k].scatter(
X,
Y,
self.evaluated_points[h, ..., k].T,
**color_dict,
picker=True,
pickradius=2,
)
_set_labels(self.fdata, fig, axes, self.patches)
def set_color_dict(
sample_colors: Any,
ind: int,
color_dict: Dict[str, ColorLike | None],
) -> None:
"""
Auxiliary method used to update color_dict.
Sets the new color of the color_dict
thanks to sample colors and index.
"""
if sample_colors is not None:
color_dict["color"] = sample_colors[ind] | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/visualization/representation.py | 0.961198 | 0.46563 | representation.py | pypi |
from __future__ import annotations
import warnings
from typing import Sequence
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from skfda.exploratory.visualization.representation import GraphPlot
from skfda.representation import FData
from ._baseplot import BasePlot
class FPCAPlot(BasePlot):
"""
FPCAPlot visualization.
Args:
mean: The functional data object containing the mean function.
If len(mean) > 1, the mean is computed.
components: The principal components
factor: Multiple of the principal component curve to be added or
subtracted.
fig: Figure over which the graph is plotted. If not specified it will
be initialized
axes: Axes over where the graph is plotted.
If ``None``, see param fig.
n_rows: Designates the number of rows of the figure.
n_cols: Designates the number of columns of the figure.
"""
def __init__(
self,
mean: FData,
components: FData,
*,
factor: float = 1,
multiple: float | None = None,
chart: Figure | Axes | None = None,
fig: Figure | None = None,
axes: Axes | None = None,
n_rows: int | None = None,
n_cols: int | None = None,
):
super().__init__(
chart,
fig=fig,
axes=axes,
n_rows=n_rows,
n_cols=n_cols,
)
self.mean = mean
self.components = components
if multiple is None:
self.factor = factor
else:
warnings.warn(
"The 'multiple' parameter is deprecated, "
"use 'factor' instead.",
DeprecationWarning,
)
self.factor = multiple
@property
def multiple(self) -> float:
warnings.warn(
"The 'multiple' attribute is deprecated, use 'factor' instead.",
DeprecationWarning,
)
return self.factor
@property
def n_subplots(self) -> int:
return len(self.components)
def _plot(
self,
fig: Figure,
axes: Sequence[Axes],
) -> None:
if len(self.mean) > 1:
self.mean = self.mean.mean()
for i, ax in enumerate(axes):
perturbations = self._get_component_perturbations(i)
GraphPlot(fdata=perturbations, axes=ax).plot()
ax.set_title(f"Principal component {i + 1}")
def _get_component_perturbations(self, index: int = 0) -> FData:
"""
Compute the perturbations over the mean of a principal component.
Args:
index: Index of the component for which we want to compute the
perturbations
Returns:
The mean function followed by the positive perturbation and
the negative perturbation.
"""
if not isinstance(self.mean, FData):
raise AttributeError("X must be a FData object")
perturbations = self.mean.copy()
perturbations = perturbations.concatenate(
perturbations[0] + self.multiple * self.components[index],
)
return perturbations.concatenate(
perturbations[0] - self.multiple * self.components[index],
) | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/visualization/fpca.py | 0.963343 | 0.501587 | fpca.py | pypi |
from __future__ import annotations
from typing import Dict, Sequence, TypeVar
import numpy as np
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from ...representation import FData
from ._baseplot import BasePlot
from ._utils import ColorLike
from .representation import Indexable, _get_color_info
K = TypeVar('K', contravariant=True)
class ParametricPlot(BasePlot):
"""
Parametric Plot visualization.
This class contains the functionality in charge of plotting
two different functions as coordinates, this can be done giving
one FData, with domain 1 and codomain 2, or giving two FData, both
of them with domain 1 and codomain 1.
Args:
fdata1: functional data set that we will use for the graph. If it has
a dim_codomain = 1, the fdata2 will be needed.
fdata2: optional functional data set, that will be needed if the fdata1
has dim_codomain = 1.
chart: figure over with the graphs are plotted or axis over
where the graphs are plotted. If None and ax is also
None, the figure is initialized.
fig: figure over with the graphs are plotted in case ax is not
specified. If None and ax is also None, the figure is
initialized.
ax: axis where the graphs are plotted. If None, see param fig.
"""
def __init__(
self,
fdata1: FData,
fdata2: FData | None = None,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Axes | None = None,
group: Sequence[K] | None = None,
group_colors: Indexable[K, ColorLike] | None = None,
group_names: Indexable[K, str] | None = None,
legend: bool = False,
) -> None:
BasePlot.__init__(
self,
chart,
fig=fig,
axes=axes,
)
self.fdata1 = fdata1
self.fdata2 = fdata2
if self.fdata2 is not None:
self.fd_final = self.fdata1.concatenate(
self.fdata2, as_coordinates=True,
)
else:
self.fd_final = self.fdata1
self.group = group
self.group_names = group_names
self.group_colors = group_colors
self.legend = legend
@property
def n_samples(self) -> int:
return self.fd_final.n_samples
def _plot(
self,
fig: Figure,
axes: Axes,
) -> None:
self.artists = np.zeros((self.n_samples, 1), dtype=Artist)
sample_colors, patches = _get_color_info(
self.fd_final,
self.group,
self.group_names,
self.group_colors,
self.legend,
)
color_dict: Dict[str, ColorLike | None] = {}
if (
self.fd_final.dim_domain == 1
and self.fd_final.dim_codomain == 2
):
ax = axes[0]
for i in range(self.fd_final.n_samples):
if sample_colors is not None:
color_dict["color"] = sample_colors[i]
self.artists[i, 0] = ax.plot(
self.fd_final.data_matrix[i][:, 0].tolist(),
self.fd_final.data_matrix[i][:, 1].tolist(),
**color_dict,
)[0]
else:
raise ValueError(
"Error in data arguments,",
"codomain or domain is not correct.",
)
if self.fd_final.dataset_name is not None:
fig.suptitle(self.fd_final.dataset_name)
if self.fd_final.coordinate_names[0] is None:
ax.set_xlabel("Function 1")
else:
ax.set_xlabel(self.fd_final.coordinate_names[0])
if self.fd_final.coordinate_names[1] is None:
ax.set_ylabel("Function 2")
else:
ax.set_ylabel(self.fd_final.coordinate_names[1]) | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/visualization/_parametric_plot.py | 0.940216 | 0.526708 | _parametric_plot.py | pypi |
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Sequence, Tuple
import matplotlib.pyplot as plt
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.backend_bases import LocationEvent, MouseEvent
from matplotlib.collections import PathCollection
from matplotlib.colors import ListedColormap
from matplotlib.figure import Figure
from matplotlib.text import Annotation
from ...representation import FData
from ...typing._numpy import NDArrayInt, NDArrayObject
from ._utils import _figure_to_svg, _get_figure_and_axes, _set_figure_layout
class BasePlot(ABC):
"""
BasePlot class.
Attributes:
artists: List of Artist objects corresponding
to every instance of our plot. They will be used to modify
the visualization with interactivity and widgets.
fig: Figure over with the graphs are plotted.
axes: Sequence of axes where the graphs are plotted.
"""
@abstractmethod
def __init__(
self,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Axes | Sequence[Axes] | None = None,
n_rows: int | None = None,
n_cols: int | None = None,
c: NDArrayInt | None = None,
cmap_bold: ListedColormap = None,
x_label: str | None = None,
y_label: str | None = None,
) -> None:
self.artists: NDArrayObject | None = None
self.chart = chart
self.fig = fig
self.axes = axes
self.n_rows = n_rows
self.n_cols = n_cols
self._tag = self._create_annotation()
self.c = c
self.cmap_bold = cmap_bold
self.x_label = x_label
self.y_label = y_label
def _plot(
self,
fig: Figure,
axes: Sequence[Axes],
) -> None:
pass
def plot(
self,
) -> Figure:
"""
Plot the object and its data.
Returns:
Figure: figure object in which the displays and
widgets will be plotted.
"""
fig: Figure | None = getattr(self, "fig_", None)
axes: Sequence[Axes] | None = getattr(self, "axes_", None)
if fig is None:
fig, axes = self._set_figure_and_axes(
self.chart,
fig=self.fig,
axes=self.axes,
)
assert axes is not None
if self.x_label is not None:
axes[0].set_xlabel(self.x_label)
if self.y_label is not None:
axes[0].set_ylabel(self.y_label)
self._plot(fig, axes)
self._hover_event_id = fig.canvas.mpl_connect(
'motion_notify_event',
self.hover,
)
return fig
@property
def dim(self) -> int:
"""Get the number of dimensions for this plot."""
return 2
@property
def n_subplots(self) -> int:
"""Get the number of subplots that this plot uses."""
return 1
@property
def n_samples(self) -> int | None:
"""Get the number of instances that will be used for interactivity."""
return None
def _set_figure_and_axes(
self,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Axes | Sequence[Axes] | None = None,
) -> Tuple[Figure, Sequence[Axes]]:
fig, axes = _get_figure_and_axes(chart, fig, axes)
fig, axes = _set_figure_layout(
fig=fig,
axes=axes,
dim=self.dim,
n_axes=self.n_subplots,
n_rows=self.n_rows,
n_cols=self.n_cols,
)
self.fig_ = fig
self.axes_ = axes
return fig, axes
def _repr_svg_(self) -> str:
"""Automatically represents the object as an svg when calling it."""
self.fig = self.plot()
plt.close(self.fig)
return _figure_to_svg(self.fig)
def _create_annotation(self) -> Annotation:
tag = Annotation(
"",
xy=(0, 0),
xytext=(20, 20),
textcoords="offset points",
bbox={
"boxstyle": "round",
"fc": "w",
},
arrowprops={
"arrowstyle": "->",
},
annotation_clip=False,
clip_on=False,
)
tag.get_bbox_patch().set_facecolor(color='khaki')
intensity = 0.8
tag.get_bbox_patch().set_alpha(intensity)
return tag
def _update_annotation(
self,
tag: Annotation,
*,
axes: Axes,
sample_number: int,
fdata: FData | None,
position: Tuple[float, float],
) -> None:
"""
Auxiliary method used to update the hovering annotations.
Method used to update the annotations that appear while
hovering a scattered point. The annotations indicate
the index and coordinates of the point hovered.
Args:
tag: Annotation to update.
axes: Axes were the annotation belongs.
sample_number: Number of the current sample.
"""
xdata_graph, ydata_graph = position
tag.xy = (xdata_graph, ydata_graph)
sample_name = (
fdata.sample_names[sample_number]
if fdata is not None
else None
)
sample_descr = f" ({sample_name})" if sample_name is not None else ""
text = (
f"{sample_number}{sample_descr}: "
f"({xdata_graph:.3g}, {ydata_graph:.3g})"
)
tag.set_text(text)
x_axis = axes.get_xlim()
y_axis = axes.get_ylim()
label_xpos = -60
label_ypos = 20
if (xdata_graph - x_axis[0]) > (x_axis[1] - xdata_graph):
label_xpos = -80
if (ydata_graph - y_axis[0]) > (y_axis[1] - ydata_graph):
label_ypos = -20
if tag.figure:
tag.remove()
tag.figure = None
axes.add_artist(tag)
tag.set_transform(axes.transData)
tag.set_position((label_xpos, label_ypos))
def _sample_artist_from_event(
self,
event: LocationEvent,
) -> Tuple[int, FData | None, Artist] | None:
"""Get the number, fdata and artist under a location event."""
if self.artists is None:
return None
try:
i = self.axes_.index(event.inaxes)
except ValueError:
return None
for j, artist in enumerate(self.artists[:, i]):
if not isinstance(artist, PathCollection):
return None
if artist.contains(event)[0]:
return j, getattr(self, "fdata", None), artist
return None
def hover(self, event: MouseEvent) -> None:
"""
Activate the annotation when hovering a point.
Callback method that activates the annotation when hovering
a specific point in a graph. The annotation is a description
of the point containing its coordinates.
Args:
event: event object containing the artist of the point
hovered.
"""
found_artist = self._sample_artist_from_event(event)
if event.inaxes is not None and found_artist is not None:
sample_number, fdata, artist = found_artist
self._update_annotation(
self._tag,
axes=event.inaxes,
sample_number=sample_number,
fdata=fdata,
position=artist.get_offsets()[0],
)
self._tag.set_visible(True)
self.fig_.canvas.draw_idle()
elif self._tag.get_visible():
self._tag.set_visible(False)
self.fig_.canvas.draw_idle() | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/visualization/_baseplot.py | 0.952142 | 0.551574 | _baseplot.py | pypi |
from __future__ import annotations
import io
import math
import re
from itertools import repeat
from typing import Sequence, Tuple, TypeVar, Union
import matplotlib.backends.backend_svg
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from typing_extensions import Protocol, TypeAlias
from ...representation._functional_data import FData
non_close_text = '[^>]*?'
svg_width_regex = re.compile(
f'(<svg {non_close_text}width="){non_close_text}("{non_close_text}>)',
)
svg_width_replacement = r'\g<1>100%\g<2>'
svg_height_regex = re.compile(
f'(<svg {non_close_text})height="{non_close_text}"({non_close_text}>)',
)
svg_height_replacement = r'\g<1>\g<2>'
ColorLike: TypeAlias = Union[
Tuple[float, float, float],
Tuple[float, float, float, float],
str,
Sequence[float],
]
K = TypeVar('K', contravariant=True)
V = TypeVar('V', covariant=True)
class Indexable(Protocol[K, V]):
"""Class Indexable used to type _get_color_info."""
def __getitem__(self, __key: K) -> V:
pass
def __len__(self) -> int:
pass
def _create_figure() -> Figure:
"""Create figure using the default backend."""
return plt.figure()
def _figure_to_svg(figure: Figure) -> str:
"""Return the SVG representation of a figure."""
old_canvas = figure.canvas
matplotlib.backends.backend_svg.FigureCanvas(figure)
output = io.BytesIO()
figure.savefig(output, format='svg')
figure.set_canvas(old_canvas)
data = output.getvalue()
decoded_data = data.decode('utf-8')
new_data = svg_width_regex.sub(
svg_width_replacement,
decoded_data,
count=1,
)
return svg_height_regex.sub(
svg_height_replacement,
new_data,
count=1,
)
def _get_figure_and_axes(
chart: Figure | Axes | Sequence[Axes] | None = None,
fig: Figure | None = None,
axes: Axes | Sequence[Axes] | None = None,
) -> Tuple[Figure, Sequence[Axes]]:
"""Obtain the figure and axes from the arguments."""
num_defined = sum(e is not None for e in (chart, fig, axes))
if num_defined > 1:
raise ValueError(
"Only one of chart, fig and axes parameters"
"can be passed as an argument.",
)
# Parse chart argument
if chart is not None:
if isinstance(chart, matplotlib.figure.Figure):
fig = chart
else:
axes = chart
if fig is None and axes is None:
new_fig = _create_figure()
new_axes = []
elif fig is not None:
new_fig = fig
new_axes = fig.axes
else:
assert axes is not None
if isinstance(axes, Axes):
axes = [axes]
new_fig = axes[0].figure
new_axes = axes
return new_fig, new_axes
def _get_axes_shape(
n_axes: int,
n_rows: int | None = None,
n_cols: int | None = None,
) -> Tuple[int, int]:
"""Get the number of rows and columns of the subplots."""
if (
(n_rows is not None and n_cols is not None)
and ((n_rows * n_cols) < n_axes)
):
raise ValueError(
f"The number of rows ({n_rows}) multiplied by "
f"the number of columns ({n_cols}) "
f"is less than the number of required "
f"axes ({n_axes})",
)
if n_rows is None and n_cols is None:
new_n_cols = int(math.ceil(math.sqrt(n_axes)))
new_n_rows = int(math.ceil(n_axes / new_n_cols))
elif n_rows is None and n_cols is not None:
new_n_cols = n_cols
new_n_rows = int(math.ceil(n_axes / n_cols))
elif n_cols is None and n_rows is not None:
new_n_cols = int(math.ceil(n_axes / n_rows))
new_n_rows = n_rows
return new_n_rows, new_n_cols
def _projection_from_dim(dim: int) -> str:
if dim == 2:
return 'rectilinear'
elif dim == 3:
return '3d'
raise NotImplementedError(
"Only bidimensional or tridimensional plots are supported.",
)
def _set_figure_layout(
fig: Figure,
axes: Sequence[Axes],
dim: int | Sequence[int] = 2,
n_axes: int = 1,
n_rows: int | None = None,
n_cols: int | None = None,
) -> Tuple[Figure, Sequence[Axes]]:
"""
Set the figure axes for plotting.
Args:
fig: Figure over with the graphs are plotted in case ax is not
specified.
axes: Axis over where the graphs are plotted.
dim: Dimension of the plot. Either 2 for a 2D plot or 3 for a 3D plot.
n_axes: Number of subplots.
n_rows: Designates the number of rows of the figure to plot the
different dimensions of the image. Can only be passed if no axes
are specified.
n_cols: Designates the number of columns of the figure to plot the
different dimensions of the image. Can only be passed if no axes
are specified.
Returns:
(tuple): tuple containing:
* fig (figure): figure object in which the graphs are plotted.
* axes (list): axes in which the graphs are plotted.
"""
if len(axes) not in {0, n_axes}:
raise ValueError(
f"The number of axes ({len(axes)}) must be 0 (to create them)"
f" or equal to the number of axes needed "
f"({n_axes} in this case).",
)
if len(axes) != 0 and (n_rows is not None or n_cols is not None):
raise ValueError(
"The number of columns and/or number of rows of "
"the figure, in which each dimension of the "
"image is plotted, can only be customized in case "
"that no axes are provided.",
)
if len(axes) == 0:
# Create the axes
n_rows, n_cols = _get_axes_shape(n_axes, n_rows, n_cols)
for i in range(n_rows):
for j in range(n_cols):
subplot_index = i * n_cols + j
if subplot_index < n_axes:
plot_dim = (
dim if isinstance(dim, int) else dim[subplot_index]
)
fig.add_subplot(
n_rows,
n_cols,
subplot_index + 1,
projection=_projection_from_dim(plot_dim),
)
axes = fig.axes
else:
# Check that the projections are right
projections = (
repeat(_projection_from_dim(dim))
if isinstance(dim, int)
else (_projection_from_dim(d) for d in dim)
)
for a, proj in zip(axes, projections):
if a.name != proj:
raise ValueError(
f"The projection of the axes is {a.name} "
f"but should be {proj}",
)
return fig, axes
def _set_labels(
fdata: FData,
fig: Figure,
axes: Sequence[Axes],
patches: Sequence[matplotlib.patches.Patch] | None = None,
) -> None:
"""Set labels if any.
Args:
fdata: functional data object.
fig: figure object containing the axes that implement
set_xlabel and set_ylabel, and set_zlabel in case
of a 3d projection.
axes: axes objects that implement set_xlabel and set_ylabel,
and set_zlabel in case of a 3d projection; used if
fig is None.
patches: objects used to generate each entry in the legend.
"""
# Dataset name
if fdata.dataset_name is not None:
fig.suptitle(fdata.dataset_name)
# Legend
if patches is not None:
fig.legend(handles=patches)
elif patches is not None:
axes[0].legend(handles=patches)
assert len(axes) >= fdata.dim_codomain
# Axis labels
if axes[0].name == '3d':
for i, a in enumerate(axes):
if fdata.argument_names[0] is not None:
a.set_xlabel(fdata.argument_names[0])
if fdata.argument_names[1] is not None:
a.set_ylabel(fdata.argument_names[1])
if fdata.coordinate_names[i] is not None:
a.set_zlabel(fdata.coordinate_names[i])
else:
for i in range(fdata.dim_codomain):
if fdata.argument_names[0] is not None:
axes[i].set_xlabel(fdata.argument_names[0])
if fdata.coordinate_names[i] is not None:
axes[i].set_ylabel(fdata.coordinate_names[i])
def _change_luminosity(color: ColorLike, amount: float = 0.5) -> ColorLike:
"""
Change the given color luminosity by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Note:
Based on https://stackoverflow.com/a/49601444/2455333
"""
import colorsys
import matplotlib.colors as mc
try:
c = mc.cnames[color]
except TypeError:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
intensity = (amount - 0.5) * 2
up = intensity > 0
intensity = abs(intensity)
lightness = c[1]
if up:
new_lightness = lightness + intensity * (1 - lightness)
else:
new_lightness = lightness - intensity * lightness
return colorsys.hls_to_rgb(c[0], new_lightness, c[2])
def _darken(color: ColorLike, amount: float = 0) -> ColorLike:
return _change_luminosity(color, 0.5 - amount / 2)
def _lighten(color: ColorLike, amount: float = 0) -> ColorLike:
return _change_luminosity(color, 0.5 + amount / 2) | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/visualization/_utils.py | 0.925331 | 0.407333 | _utils.py | pypi |