code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
from typing import Optional, Sequence, Union # needed for typehints_formatter hack
from scico.typing import ( # needed for typehints_formatter hack
ArrayIndex,
AxisIndex,
DType,
)
# An explanation for this nasty hack, the primary purpose of which is to avoid
# the very long definition of the scico.typing.DType appearing explicitly in the
# docs. This is handled correctly by sphinx.ext.autodoc in some circumstances,
# but only when sphinx_autodoc_typehints is not included in the extension list,
# and the appearance of the type hints (e.g. whether links to definitions are
# included) seems to depend on whether "from __future__ import annotations" was
# used in the module being documented, which is not ideal from a consistency
# perspective. (It's also worth noting that sphinx.ext.autodoc provides some
# configurability for type aliases via the autodoc_type_aliases sphinx
# configuration option.) The alternative is to include sphinx_autodoc_typehints,
# which gives a consistent appearance to the type hints, but the
# autodoc_type_aliases configuration option is ignored, and type aliases are
# always expanded. This hack avoids expansion for the type aliases with the
# longest definitions by definining a custom function for formatting the
# type hints, using an option provided by sphinx_autodoc_typehints. For
# more information, see
# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_type_aliases
# https://github.com/tox-dev/sphinx-autodoc-typehints/issues/284
# https://github.com/tox-dev/sphinx-autodoc-typehints/blob/main/README.md
def typehints_formatter_function(annotation, config):
markup = {
DType: ":obj:`~scico.typing.DType`",
# Compound types involving DType must be added here to avoid their DType
# component being expanded in the docs.
Optional[DType]: ":obj:`~typing.Optional`\ [\ :obj:`~scico.typing.DType`\ ]",
Union[DType, Sequence[DType]]: (
":obj:`~typing.Union`\ [\ :obj:`~scico.typing.DType`\ , "
":obj:`~typing.Sequence`\ [\ :obj:`~scico.typing.DType`\ ]]"
),
AxisIndex: ":obj:`~scico.typing.AxisIndex`",
ArrayIndex: ":obj:`~scico.typing.ArrayIndex`",
}
if annotation in markup:
return markup[annotation]
else:
return None
typehints_formatter = typehints_formatter_function | /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/conf/85-dtype_typehints.py | 0.893527 | 0.225961 | 85-dtype_typehints.py | pypi |
import re
from inspect import getmembers, isfunction
# Rewrite module names for certain functions imported into scico.numpy so that they are
# included in the docs for that module. While a bit messy to do so here rather than in a
# function run via app.connect, it is necessary (for some yet to be identified reason)
# to do it here to ensure that the relevant API docs include a table of functions.
import scico.numpy
for module in (scico.numpy, scico.numpy.fft, scico.numpy.linalg, scico.numpy.testing):
for _, f in getmembers(module, isfunction):
# Rewrite module name so that function is included in docs
f.__module__ = module.__name__
f.__doc__ = re.sub(
r"^:func:`([\w_]+)` wrapped to operate",
r":obj:`jax.numpy.\1` wrapped to operate",
str(f.__doc__),
flags=re.M,
)
modname = ".".join(module.__name__.split(".")[1:])
f.__doc__ = re.sub(
r"^LAX-backend implementation of :func:`([\w_]+)`.",
r"LAX-backend implementation of :obj:`%s.\1`." % modname,
str(f.__doc__),
flags=re.M,
)
# Improve formatting of jax.numpy warning
f.__doc__ = re.sub(
r"^\*\*\* This function is not yet implemented by jax.numpy, and will "
"raise NotImplementedError \*\*\*",
"**WARNING**: This function is not yet implemented by jax.numpy, "
" and will raise :exc:`NotImplementedError`.",
f.__doc__,
flags=re.M,
)
# Remove cross-references to section NEP35
f.__doc__ = re.sub(":ref:`NEP 35 <NEP35>`", "NEP 35", f.__doc__, re.M)
# Remove cross-reference to numpydoc style references section
f.__doc__ = re.sub(r" \[(\d+)\]_", "", f.__doc__, flags=re.M)
# Remove entire numpydoc references section
f.__doc__ = re.sub(r"References\n----------\n.*\n", "", f.__doc__, flags=re.DOTALL)
# Remove spurious two-space indentation of entire docstring
scico.numpy.vectorize.__doc__ = re.sub("^ ", "", scico.numpy.vectorize.__doc__, flags=re.M)
# Fix various docstring formatting errors
scico.numpy.testing.break_cycles.__doc__ = re.sub(
"calling gc.collect$",
"calling gc.collect.\n\n",
scico.numpy.testing.break_cycles.__doc__,
flags=re.M,
)
scico.numpy.testing.break_cycles.__doc__ = re.sub(
" __del__\) inside", "__del__\) inside", scico.numpy.testing.break_cycles.__doc__, flags=re.M
)
scico.numpy.testing.assert_raises_regex.__doc__ = re.sub(
"\*args,\n.*\*\*kwargs",
"*args, **kwargs",
scico.numpy.testing.assert_raises_regex.__doc__,
flags=re.M,
)
scico.numpy.BlockArray.global_shards.__doc__ = re.sub(
"`Shard`s", "`Shard`\ s", scico.numpy.BlockArray.global_shards.__doc__, flags=re.M
) | /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/conf/80-scico_numpy.py | 0.749179 | 0.255187 | 80-scico_numpy.py | pypi |
Operators
=========
An operator is a map from :math:`\mathbb{R}^n` or :math:`\mathbb{C}^n`
to :math:`\mathbb{R}^m` or :math:`\mathbb{C}^m`. In SCICO, operators
are primarily used to represent imaging systems and provide
regularization. SCICO operators are represented by instances of the
:class:`.Operator` class.
SCICO :class:`.Operator` objects extend the notion of "shape" and
"size" from the usual NumPy ``ndarray`` class. Each
:class:`.Operator` object has an ``input_shape`` and ``output_shape``;
these shapes can be either tuples or a tuple of tuples (in the case of
a :class:`.BlockArray`). The ``matrix_shape`` attribute describes the
shape of the :class:`.LinearOperator` if it were to act on vectorized,
or flattened, inputs.
For example, consider a two-dimensional array :math:`\mb{x} \in
\mathbb{R}^{n \times m}`. We compute the discrete differences of
:math:`\mb{x}` in the horizontal and vertical directions, generating
two new arrays: :math:`\mb{x}_h \in \mathbb{R}^{n \times (m-1)}` and
:math:`\mb{x}_v \in \mathbb{R}^{(n-1) \times m}`. We represent this
linear operator by :math:`\mb{A} : \mathbb{R}^{n \times m} \to
\mathbb{R}^{n \times (m-1)} \otimes \mathbb{R}^{(n-1) \times m}`. In
SCICO, this linear operator will return a :class:`.BlockArray` with
the horizontal and vertical differences stored as blocks. Letting
:math:`y = \mb{A} x`, we have ``y.shape = ((n, m-1), (n-1, m))`` and
::
A.input_shape = (n, m)
A.output_shape = ((n, m-1), (n-1, m)], (n, m))
A.shape = ( ((n, m-1), (n-1, m)), (n, m)) # (output_shape, input_shape)
A.input_size = n*m
A.output_size = n*(n-1)*m*(m-1)
A.matrix_shape = (n*(n-1)*m*(m-1), n*m) # (output_size, input_size)
Operator Calculus
-----------------
SCICO supports a variety of operator calculus rules, allowing new
operators to be defined in terms of old ones. The following table
summarizes the available operations.
+----------------+-----------------+
| Operation | Result |
+----------------+-----------------+
| ``(A+B)(x)`` | ``A(x) + B(x)`` |
+----------------+-----------------+
| ``(A-B)(x)`` | ``A(x) - B(x)`` |
+----------------+-----------------+
| ``(c * A)(x)`` | ``c * A(x)`` |
+----------------+-----------------+
| ``(A/c)(x)`` | ``A(x)/c`` |
+----------------+-----------------+
| ``(-A)(x)`` | ``-A(x)`` |
+----------------+-----------------+
| ``A(B)(x)`` | ``A(B(x))`` |
+----------------+-----------------+
| ``A(B)`` | ``Operator`` |
+----------------+-----------------+
Defining A New Operator
-----------------------
To define a new operator, pass a callable to the :class:`.Operator`
constructor:
::
A = Operator(input_shape=(32,), eval_fn = lambda x: 2 * x)
Or use subclassing:
::
>>> from scico.operator import Operator
>>> class MyOp(Operator):
...
... def _eval(self, x):
... return 2 * x
>>> A = MyOp(input_shape=(32,))
At a minimum, the ``_eval`` function must be overridden. If either
``output_shape`` or ``output_dtype`` are unspecified, they are
determined by evaluating the operator on an input of appropriate shape
and dtype.
Linear Operators
================
Linear operators are those for which
.. math::
H(a \mb{x} + b \mb{y}) = a H(\mb{x}) + b H(\mb{y}) \;.
SCICO represents linear operators as instances of the class
:class:`.LinearOperator`. While finite-dimensional linear operators
can always be associated with a matrix, it is often useful to
represent them in a matrix-free manner. Most of SCICO's linear
operators are implemented matrix-free.
Using A LinearOperator
----------------------
We implement two ways to evaluate a :class:`.LinearOperator`. The
first is using standard callable syntax: ``A(x)``. The second mimics
the NumPy matrix multiplication syntax: ``A @ x``. Both methods
perform shape and type checks to validate the input before ultimately
either calling `A._eval` or generating a new :class:`.LinearOperator`.
For linear operators that map real-valued inputs to real-valued
outputs, there are two ways to apply the adjoint: ``A.adj(y)`` and
``A.T @ y``.
For complex-valued linear operators, there are three ways to apply the
adjoint ``A.adj(y)``, ``A.H @ y``, and ``A.conj().T @ y``. Note that
in this case, ``A.T`` returns the non-conjugated transpose of the
:class:`.LinearOperator`.
While the cost of evaluating the linear operator is virtually
identical for ``A(x)`` and ``A @ x``, the ``A.H`` and ``A.conj().T``
methods are somewhat slower; especially the latter. This is because
two intermediate linear operators must be created before the function
is evaluated. Evaluating ``A.conj().T @ y`` is equivalent to:
::
def f(y):
B = A.conj() # New LinearOperator #1
C = B.T # New LinearOperator #2
return C @ y
**Note**: the speed differences between these methods vanish if
applied inside of a jit-ed function. For instance:
::
f = jax.jit(lambda x: A.conj().T @ x)
+------------------+-----------------+
| Public Method | Private Method |
+------------------+-----------------+
| ``__call__`` | ``._eval`` |
+------------------+-----------------+
| ``adj`` | ``._adj`` |
+------------------+-----------------+
| ``gram`` | ``._gram`` |
+------------------+-----------------+
The public methods perform shape and type checking to validate the
input before either calling the corresponding private method or
returning a composite LinearOperator.
Linear Operator Calculus
------------------------
SCICO supports several linear operator calculus rules.
Given
``A`` and ``B`` of class :class:`.LinearOperator` and of appropriate shape,
``x`` an array of appropriate shape,
``c`` a scalar, and
``O`` an :class:`.Operator`,
we have
+----------------+----------------------------+
| Operation | Result |
+----------------+----------------------------+
| ``(A+B)(x)`` | ``A(x) + B(x)`` |
+----------------+----------------------------+
| ``(A-B)(x)`` | ``A(x) - B(x)`` |
+----------------+----------------------------+
| ``(c * A)(x)`` | ``c * A(x)`` |
+----------------+----------------------------+
| ``(A/c)(x)`` | ``A(x)/c`` |
+----------------+----------------------------+
| ``(-A)(x)`` | ``-A(x)`` |
+----------------+----------------------------+
| ``(A@B)(x)`` | ``A@B@x`` |
+----------------+----------------------------+
| ``A @ B`` | ``ComposedLinearOperator`` |
+----------------+----------------------------+
| ``A @ O`` | ``Operator`` |
+----------------+----------------------------+
| ``O(A)`` | ``Operator`` |
+----------------+----------------------------+
Defining A New Linear Operator
------------------------------
To define a new linear operator, pass a callable to the
:class:`.LinearOperator` constructor
::
>>> from scico.linop import LinearOperator
>>> A = LinearOperator(input_shape=(32,),
... eval_fn = lambda x: 2 * x)
Or, use subclassing:
::
>>> class MyLinearOperator(LinearOperator):
... def _eval(self, x):
... return 2 * x
>>> A = MyLinearOperator(input_shape=(32,))
At a minimum, the ``_eval`` method must be overridden. If the
``_adj`` method is not overriden, the adjoint is determined using
:func:`scico.linear_adjoint`. If either ``output_shape`` or
``output_dtype`` are unspecified, they are determined by evaluating
the Operator on an input of appropriate shape and dtype.
🔪 Sharp Edges 🔪
------------------
Strict Types in Adjoint
^^^^^^^^^^^^^^^^^^^^^^^
SCICO silently promotes real types to complex types in forward
application, but enforces strict type checking in the adjoint. This
is due to the strict type-safe nature of jax adjoints.
LinearOperators From External Code
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
External code may be wrapped as a subclass of :class:`.Operator` or
:class:`.LinearOperator` and used in SCICO optimization routines;
however this process can be complicated and error-prone. As a
starting point, look at the source for
:class:`.radon_svmbir.TomographicProjector` or
:class:`.radon_astra.TomographicProjector` and the JAX documentation
for the `vector-jacobian product
<https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.html#vector-jacobian-products-vjps-aka-reverse-mode-autodiff>`_
and `custom VJP rules
<https://jax.readthedocs.io/en/latest/notebooks/Custom_derivative_rules_for_Python_code.html>`_.
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/include/operator.rst | 0.955981 | 0.969785 | operator.rst | pypi |
.. _blockarray_class:
BlockArray
==========
.. testsetup::
>>> import scico
>>> import scico.numpy as snp
>>> from scico.numpy import BlockArray
>>> import numpy as np
>>> import jax.numpy
The class :class:`.BlockArray` provides a way to combine arrays of
different shapes into a single object for use with other SCICO classes.
A :class:`.BlockArray` consists of a list of :class:`jax.Array` objects,
which we refer to as blocks. A :class:`.BlockArray` differs from a list in
that, whenever possible, :class:`.BlockArray` properties and methods
(including unary and binary operators like +, -, \*, ...) automatically
map along the blocks, returning another :class:`.BlockArray` or tuple as
appropriate. For example,
::
>>> x = snp.blockarray((
... [[1, 3, 7],
... [2, 2, 1]],
... [2, 4, 8]
... ))
>>> x.shape # returns tuple
((2, 3), (3,))
>>> x * 2 # returns BlockArray # doctest: +ELLIPSIS
BlockArray([...Array([[ 2, 6, 14],
[ 4, 4, 2]], dtype=...), ...Array([ 4, 8, 16], dtype=...)])
>>> y = snp.blockarray((
... [[.2],
... [.3]],
... [.4]
... ))
>>> x + y # returns BlockArray # doctest: +ELLIPSIS
BlockArray([...Array([[1.2, 3.2, 7.2],
[2.3, 2.3, 1.3]], dtype=...), ...Array([2.4, 4.4, 8.4], dtype=...)])
.. _numpy_functions_blockarray:
NumPy and SciPy Functions
-------------------------
:mod:`scico.numpy`, :mod:`scico.numpy.testing`, and
:mod:`scico.scipy.special` provide wrappers around :mod:`jax.numpy`,
:mod:`numpy.testing` and :mod:`jax.scipy.special` where many of the
functions have been extended to work with instances of :class:`.BlockArray`.
In particular:
* When a tuple of tuples is passed as the `shape`
argument to an array creation routine, a :class:`.BlockArray` is created.
* When a :class:`.BlockArray` is passed to a reduction function, the blocks are
ravelled (i.e., reshaped to be 1D) and concatenated before the reduction
is applied. This behavior may be prevented by passing the `axis`
argument, in which case the function is mapped over the blocks.
* When one or more :class:`.BlockArray` instances are passed to a mathematical
function that is not a reduction, the function is mapped over
(corresponding) blocks.
For a list of array creation routines, see
::
>>> scico.numpy.creation_routines # doctest: +ELLIPSIS
('empty', ...)
For a list of reduction functions, see
::
>>> scico.numpy.reduction_functions # doctest: +ELLIPSIS
('sum', ...)
For lists of the remaining wrapped functions, see
::
>>> scico.numpy.mathematical_functions # doctest: +ELLIPSIS
('sin', ...)
>>> scico.numpy.testing_functions # doctest: +ELLIPSIS
('testing.assert_allclose', ...)
>>> import scico.scipy
>>> scico.scipy.special.functions # doctest: +ELLIPSIS
('betainc', ...)
Note that:
* Both :func:`scico.numpy.ravel` and :meth:`.BlockArray.ravel` return a
:class:`.BlockArray` with ravelled blocks rather than the concatenation
of these blocks as a single array.
* The functional and method versions of the "same" function differ in their
behavior, with the method version only applying the reduction within each
block, and the function version applying the reduction across all blocks.
For example, :func:`scico.numpy.sum` applied to a :class:`.BlockArray` with
two blocks returns a scalar value, while :meth:`.BlockArray.sum` returns a
:class:`.BlockArray` two scalar blocks.
Motivating Example
------------------
The discrete differences of a two-dimensional array, :math:`\mb{x} \in
\mbb{R}^{n \times m}`, in the horizontal and vertical directions can
be represented by the arrays :math:`\mb{x}_h \in \mbb{R}^{n \times
(m-1)}` and :math:`\mb{x}_v \in \mbb{R}^{(n-1) \times m}`
respectively. While it is usually useful to consider the output of a
difference operator as a single entity, we cannot combine these two
arrays into a single array since they have different shapes. We could
vectorize each array and concatenate the resulting vectors, leading to
:math:`\mb{\bar{x}} \in \mbb{R}^{n(m-1) + m(n-1)}`, which can be
stored as a one-dimensional array, but this makes it hard to access
the individual components :math:`\mb{x}_h` and :math:`\mb{x}_v`.
Instead, we can construct a :class:`.BlockArray`, :math:`\mb{x}_B =
[\mb{x}_h, \mb{x}_v]`:
::
>>> n = 32
>>> m = 16
>>> x_h, key = scico.random.randn((n, m-1))
>>> x_v, _ = scico.random.randn((n-1, m), key=key)
# Form the blockarray
>>> x_B = snp.blockarray([x_h, x_v])
# The blockarray shape is a tuple of tuples
>>> x_B.shape
((32, 15), (31, 16))
# Each block component can be easily accessed
>>> x_B[0].shape
(32, 15)
>>> x_B[1].shape
(31, 16)
Constructing a BlockArray
-------------------------
The recommended way to construct a :class:`.BlockArray` is by using the
:func:`~scico.numpy.blockarray` function.
::
>>> import scico.numpy as snp
>>> x0, key = scico.random.randn((32, 32))
>>> x1, _ = scico.random.randn((16,), key=key)
>>> X = snp.blockarray((x0, x1))
>>> X.shape
((32, 32), (16,))
>>> X.size
(1024, 16)
>>> len(X)
2
While :func:`~scico.numpy.blockarray` will accept arguments of type
:class:`~numpy.ndarray` or :class:`~jax.Array`, arguments of type :class:`~numpy.ndarray` will be converted to :class:`~jax.Array` type.
Operating on a BlockArray
-------------------------
.. _blockarray_indexing:
Indexing
^^^^^^^^
:class:`.BlockArray` indexing works just like indexing a list.
Multiplication Between BlockArray and LinearOperator
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The :class:`.Operator` and :class:`.LinearOperator` classes are designed
to work on instances of :class:`.BlockArray` in addition to instances of
:obj:`~jax.Array`. For example
::
>>> x, key = scico.random.randn((3, 4))
>>> A_1 = scico.linop.Identity(x.shape)
>>> A_1.shape # array -> array
((3, 4), (3, 4))
>>> A_2 = scico.linop.FiniteDifference(x.shape)
>>> A_2.shape # array -> BlockArray
(((2, 4), (3, 3)), (3, 4))
>>> diag = snp.blockarray([np.array(1.0), np.array(2.0)])
>>> A_3 = scico.linop.Diagonal(diag, input_shape=(A_2.output_shape))
>>> A_3.shape # BlockArray -> BlockArray
(((2, 4), (3, 3)), ((2, 4), (3, 3)))
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/include/blockarray.rst | 0.963213 | 0.764232 | blockarray.rst | pypi |
Learned Models
==============
In SCICO, neural network models are used to represent imaging problems and provide different modes of data-driven regularization.
The models are implemented in `Flax <https://flax.readthedocs.io/>`_, and constitute a representative sample of frequently used networks.
FlaxMap
-------
SCICO interfaces with the implemented models via :class:`.FlaxMap`. This provides a standardized access to all trained models via the model definiton and the learned parameters. Further specialized functionality, such as learned denoisers, are built on top of :class:`.FlaxMap`. The specific models that have been implemented are described below.
DnCNN
-----
The denoiser convolutional neural network model (DnCNN) :cite:`zhang-2017-dncnn`, implemented as :class:`.DnCNNNet`, is used to denoise images that have been corrupted with additive Gaussian noise.
ODP
---
The unrolled optimization with deep priors (ODP) :cite:`diamond-2018-odp`, implemented as :class:`.ODPNet`, is used to solve inverse problems in imaging by adapting classical iterative methods into an end-to-end framework that incorporates deep networks as well as knowledge of the image formation model.
The framework aims to solve the optimization problem
.. math::
\argmin_{\mb{x}} \; f(A \mb{x}, \mb{y}) + r(\mb{x}) \;,
where :math:`A` represents a linear forward model and :math:`r` a regularization function encoding prior information, by unrolling the iterative solution method into a network where each iteration corresponds to a different stage in the ODP network. Different iterative solutions produce different unrolled optimization algorithms which, in turn, produce different ODP networks. The ones implemented in SCICO are described below.
Proximal Map
^^^^^^^^^^^^
This algorithm corresponds to solving
.. math::
:label: eq:odp_prox
\argmin_{\mb{x}} \; \alpha_k \, f(A \mb{x}, \mb{y}) + \frac{1}{2} \| \mb{x} - \mb{x}^k - \mb{x}^{k+1/2} \|_2^2 \;,
with :math:`k` corresponding to the index of the iteration, which translates to an index of the stage of the network, :math:`f(A \mb{x}, \mb{y})` a fidelity term, usually an :math:`\ell_2` norm, and :math:`\mb{x}^{k+1/2}` a regularization representing :math:`\mathrm{prox}_r (\mb{x}^k)` and usually implemented as a convolutional neural network (CNN). This proximal map representation is used when minimization problem :eq:`eq:odp_prox` can be solved in a computationally efficient manner.
:class:`.ODPProxDnBlock` uses this formulation to solve a denoising problem, which, according to :cite:`diamond-2018-odp`, can be solved by
.. math::
\mb{x}^{k+1} = (\alpha_k \, \mb{y} + \mb{x}^k + \mb{x}^{k+1/2}) \, / \, (\alpha_k + 1) \;,
where :math:`A` corresponds to the identity operator and is therefore omitted, :math:`\mb{y}` is the noisy signal, :math:`\alpha_k > 0` is a learned stage-wise parameter weighting the contribution of the fidelity term and :math:`\mb{x}^k + \mb{x}^{k+1/2}` is the regularization, usually represented by a residual CNN.
:class:`.ODPProxDblrBlock` uses this formulation to solve a deblurring problem, which, according to :cite:`diamond-2018-odp`, can be solved by
.. math::
\mb{x}^{k+1} = \mathcal{F}^{-1} \mathrm{diag} (\alpha_k | \mathcal{F}(K)|^2 + 1 )^{-1} \mathcal{F} \, (\alpha_k K^T * \mb{y} + \mb{x}^k + \mb{x}^{k+1/2}) \;,
where :math:`A` is the blurring operator, :math:`K` is the blurring kernel, :math:`\mb{y}` is the blurred signal, :math:`\mathcal{F}` is the DFT, :math:`\alpha_k > 0` is a learned stage-wise parameter weighting the contribution of the fidelity term and :math:`\mb{x}^k + \mb{x}^{k+1/2}` is the regularization represented by a residual CNN.
Gradient Descent
^^^^^^^^^^^^^^^^
When the solution of the optimization problem in :eq:`eq:odp_prox` can not be simply represented by an analytical step, a formulation based on a gradient descent iteration is preferred. This yields
.. math::
\mb{x}^{k+1} = \mb{x}^k + \mb{x}^{k+1/2} - \alpha_k \, A^T \nabla_x \, f(A \mb{x}^k, \mb{y}) \;,
where :math:`\mb{x}^{k+1/2}` represents :math:`\nabla r(\mb{x}^k)`.
:class:`.ODPGrDescBlock` uses this formulation to solve a generic problem with :math:`\ell_2` fidelity as
.. math::
\mb{x}^{k+1} = \mb{x}^k + \mb{x}^{k+1/2} - \alpha_k \, A^T (A \mb{x} - \mb{y}) \;,
with :math:`\mb{y}` the measured signal and :math:`\mb{x} + \mb{x}^{k+1/2}` a residual CNN.
MoDL
----
The model-based deep learning (MoDL) :cite:`aggarwal-2019-modl`, implemented as :class:`.MoDLNet`, is used to solve inverse problems in imaging also by adapting classical iterative methods into an end-to-end deep learning framework, but, in contrast to ODP, it solves the optimization problem
.. math::
\argmin_{\mb{x}} \; \| A \mb{x} - \mb{y}\|_2^2 + \lambda \, \| \mb{x} - \mathrm{D}_w(\mb{x})\|_2^2 \;,
by directly computing the update
.. math::
\mb{x}^{k+1} = (A^T A + \lambda \, I)^{-1} (A^T \mb{y} + \lambda \, \mb{z}^k) \;,
via conjugate gradient. The regularization :math:`\mb{z}^k = \mathrm{D}_w(\mb{x}^{k})` incorporates prior information, usually in the form of a denoiser model. In this case, the denoiser :math:`\mathrm{D}_w` is shared between all the stages of the network requiring relatively less memory than other unrolling methods. This also allows for deploying a different number of iterations in testing than the ones used in training.
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/include/learning.rst | 0.967163 | 0.983769 | learning.rst | pypi |
.. _optimizer:
Optimization Algorithms
=======================
ADMM
----
The Alternating Direction Method of Multipliers (ADMM)
:cite:`glowinski-1975-approximation` :cite:`gabay-1976-dual` is an
algorithm for minimizing problems of the form
.. math::
:label: eq:admm_prob
\argmin_{\mb{x}, \mb{z}} \; f(\mb{x}) + g(\mb{z}) \; \text{such that}
\; \acute{A} \mb{x} + \acute{B} \mb{z} = \mb{c} \;,
where :math:`f` and :math:`g` are convex (but not necessarily smooth)
functionals, :math:`\acute{A}` and :math:`\acute{B}` are linear operators,
and :math:`\mb{c}` is a constant vector. (For a thorough introduction and
overview, see :cite:`boyd-2010-distributed`.)
The SCICO ADMM solver, :class:`.ADMM`, solves problems of the form
.. math::
\argmin_{\mb{x}} \; f(\mb{x}) + \sum_{i=1}^N g_i(C_i \mb{x}) \;,
where :math:`f` and the :math:`g_i` are instances of :class:`.Functional`,
and the :math:`C_i` are :class:`.LinearOperator`, by defining
.. math::
g(\mb{z}) = \sum_{i=1}^N g_i(\mb{z}_i) \qquad \mb{z}_i = C_i \mb{x}
in :eq:`eq:admm_prob`, corresponding to defining
.. math::
\acute{A} = \left( \begin{array}{c} C_0 \\ C_1 \\ C_2 \\
\vdots \end{array} \right) \quad
\acute{B} = \left( \begin{array}{cccc}
-I & 0 & 0 & \ldots \\
0 & -I & 0 & \ldots \\
0 & 0 & -I & \ldots \\
\vdots & \vdots & \vdots & \ddots
\end{array} \right) \quad
\mb{z} = \left( \begin{array}{c} \mb{z}_0 \\ \mb{z}_1 \\ \mb{z}_2 \\
\vdots \end{array} \right) \quad
\mb{c} = \left( \begin{array}{c} 0 \\ 0 \\ 0 \\
\vdots \end{array} \right) \;.
In :class:`.ADMM`, :math:`f` is a :class:`.Functional`, typically a
:class:`.Loss`, corresponding to the forward model of an imaging
problem, and the :math:`g_i` are :class:`.Functional`, typically
corresponding to a regularization term or constraint. Each of the
:math:`g_i` must have a proximal operator defined. It is also possible
to set ``f = None``, which corresponds to defining :math:`f = 0`,
i.e. the zero function.
Subproblem Solvers
^^^^^^^^^^^^^^^^^^
The most computational expensive component of the ADMM iterations is typically
the :math:`\mb{x}`-update,
.. math::
:label: eq:admm_x_step
\argmin_{\mb{x}} \; f(\mb{x}) + \sum_i \frac{\rho_i}{2}
\norm{\mb{z}^{(k)}_i - \mb{u}^{(k)}_i - C_i \mb{x}}_2^2 \;.
The available solvers for this problem are:
* :class:`.admm.GenericSubproblemSolver`
This is the default subproblem solver as it is applicable in all cases. It
it is only suitable for relatively small-scale problems as it makes use of
:func:`.solver.minimize`, which wraps :func:`scipy.optimize.minimize`.
* :class:`.admm.LinearSubproblemSolver`
This subproblem solver can be used when :math:`f` takes the form
:math:`\norm{\mb{A} \mb{x} - \mb{y}}^2_W`. It makes use of the conjugate
gradient method, and is significantly more efficient than
:class:`.admm.GenericSubproblemSolver` when it can be used.
* :class:`.admm.MatrixSubproblemSolver`
This subproblem solver can be used when :math:`f` takes the form
:math:`\norm{\mb{A} \mb{x} - \mb{y}}^2_W`, and :math:`A` and all of the
:math:`C_i` are diagonal (:class:`.Diagonal`) or matrix operators
(:class:`MatrixOperator`). It exploits a pre-computed matrix factorization
for a significantly more efficient solution than conjugate gradient.
* :class:`.admm.CircularConvolveSolver`
This subproblem solver can be used when :math:`f` takes the form
:math:`\norm{\mb{A} \mb{x} - \mb{y}}^2_W` and :math:`\mb{A}` and all
the :math:`C_i` s are circulant (i.e., diagonalized by the DFT).
* :class:`.admm.FBlockCircularConvolveSolver` and :class:`.admm.G0BlockCircularConvolveSolver`
These subproblem solvers can be used when the primary linear operator
is block-circulant (i.e. an operator with blocks that are diagonalied
by the DFT).
For more details of these solvers and how to specify them, see the API
reference page for :mod:`scico.optimize.admm`.
Proximal ADMM
-------------
Proximal ADMM :cite:`deng-2015-global` is an algorithm for solving
problems of the form
.. math::
\argmin_{\mb{x}} \; f(\mb{x}) + g(\mb{z}) \;
\text{such that}\; A \mb{x} + B \mb{z} = \mb{c} \;,
where :math:`f` and :math:`g` are are convex (but not necessarily
smooth) functionals and :math:`A` and :math:`B` are linear
operators. Although convergence per iteration is typically somewhat
worse than that of ADMM, the iterations can be much cheaper than that
of ADMM, giving Proximal ADMM competitive time convergence
performance.
The SCICO Proximal ADMM solver, :class:`.ProximalADMM`, requires
:math:`f` and :math:`g` to be instances of :class:`.Functional`, and
to have a proximal operator defined (:meth:`.Functional.prox`), and
:math:`A` and :math:`B` are required to be an instance of
:class:`.LinearOperator`.
Non-Linear Proximal ADMM
------------------------
Non-Linear Proximal ADMM :cite:`benning-2016-preconditioned` is an
algorithm for solving problems of the form
.. math::
\argmin_{\mb{x}} \; f(\mb{x}) + g(\mb{z}) \;
\text{such that}\; H(\mb{x}, \mb{z}) = 0 \;,
where :math:`f` and :math:`g` are are convex (but not necessarily
smooth) functionals and :math:`H` is a function of two vector variables.
The SCICO Non-Linear Proximal ADMM solver, :class:`.NonLinearPADMM`, requires
:math:`f` and :math:`g` to be instances of :class:`.Functional`, and
to have a proximal operator defined (:meth:`.Functional.prox`), and
:math:`H` is required to be an instance of :class:`.Function`.
Linearized ADMM
---------------
Linearized ADMM :cite:`yang-2012-linearized`
:cite:`parikh-2014-proximal` (Sec. 4.4.2) is an algorithm for solving
problems of the form
.. math::
\argmin_{\mb{x}} \; f(\mb{x}) + g(C \mb{x}) \;,
where :math:`f` and :math:`g` are are convex (but not necessarily
smooth) functionals. Although convergence per iteration is typically
significantly worse than that of ADMM, the :math:`\mb{x}`-update, can
be much cheaper than that of ADMM, giving Linearized ADMM competitive
time convergence performance.
The SCICO Linearized ADMM solver, :class:`.LinearizedADMM`,
requires :math:`f` and :math:`g` to be instances of :class:`.Functional`,
and to have a proximal operator defined (:meth:`.Functional.prox`), and
:math:`C` is required to be an instance of :class:`.LinearOperator`.
PDHG
----
The Primal–Dual Hybrid Gradient (PDHG) algorithm
:cite:`esser-2010-general` :cite:`chambolle-2010-firstorder`
:cite:`pock-2011-diagonal` solves problems of the form
.. math::
\argmin_{\mb{x}} \; f(\mb{x}) + g(C \mb{x}) \;,
where :math:`f` and :math:`g` are are convex (but not necessarily smooth)
functionals. The algorithm has similar advantages over ADMM to those of Linearized ADMM, but typically exhibits better convergence properties.
The SCICO PDHG solver, :class:`.PDHG`,
requires :math:`f` and :math:`g` to be instances of :class:`.Functional`,
and to have a proximal operator defined (:meth:`.Functional.prox`), and
:math:`C` is required to be an instance of :class:`.Operator` or :class:`.LinearOperator`.
PGM
---
The Proximal Gradient Method (PGM) :cite:`daubechies-2004-iterative`
:cite:`beck-2010-gradient` and Accelerated Proximal Gradient Method
(AcceleratedPGM) :cite:`beck-2009-fast` are algorithms for minimizing
problems of the form
.. math::
\argmin_{\mb{x}} f(\mb{x}) + g(\mb{x}) \;,
where :math:`g` is convex and :math:`f` is smooth and convex. The
corresponding SCICO solvers are :class:`.PGM` and :class:`.AcceleratedPGM`
respectively. In most cases :class:`.AcceleratedPGM` is expected to provide
faster convergence. In both of these classes, :math:`f` and :math:`g` are
both of type :class:`.Functional`, where :math:`f` must be differentiable,
and :math:`g` must have a proximal operator defined.
While ADMM provides significantly more flexibility than PGM, and often
converges faster, the latter is preferred when solving the ADMM
:math:`\mb{x}`-step is very computationally expensive, such as in the case of
:math:`f(\mb{x}) = \norm{\mb{A} \mb{x} - \mb{y}}^2_W` where :math:`A` is
large and does not have any special structure that would allow an efficient
solution of :eq:`eq:admm_x_step`.
Step Size Options
^^^^^^^^^^^^^^^^^
The step size (usually referred to in terms of its reciprocal,
:math:`L`) for the gradient descent in :class:`PGM` can be adapted via
Barzilai-Borwein methods (also called spectral methods) and iterative
line search methods.
The available step size policy classes are:
* :class:`.BBStepSize`
This implements the step size adaptation based on the Barzilai-Borwein
method :cite:`barzilai-1988-stepsize`. The step size :math:`\alpha` is
estimated as
.. math::
\mb{\Delta x} = \mb{x}_k - \mb{x}_{k-1} \; \\
\mb{\Delta g} = \nabla f(\mb{x}_k) - \nabla f (\mb{x}_{k-1}) \; \\
\alpha = \frac{\mb{\Delta x}^T \mb{\Delta g}}{\mb{\Delta g}^T
\mb{\Delta g}} \;.
Since the PGM solver uses the reciprocal of the step size, the value
:math:`L = 1 / \alpha` is returned.
* :class:`.AdaptiveBBStepSize`
This implements the adaptive Barzilai-Borwein method as introduced in
:cite:`zhou-2006-adaptive`. The adaptive step size rule computes
.. math::
\mb{\Delta x} = \mb{x}_k - \mb{x}_{k-1} \; \\
\mb{\Delta g} = \nabla f(\mb{x}_k) - \nabla f (\mb{x}_{k-1}) \; \\
\alpha^{\mathrm{BB1}} = \frac{\mb{\Delta x}^T \mb{\Delta x}}
{\mb{\Delta x}^T \mb{\Delta g}} \; \\
\alpha^{\mathrm{BB2}} = \frac{\mb{\Delta x}^T \mb{\Delta g}}
{\mb{\Delta g}^T \mb{\Delta g}} \;.
The determination of the new step size is made via the rule
.. math::
\alpha = \left\{ \begin{array}{ll} \alpha^{\mathrm{BB2}} &
\mathrm{~if~} \alpha^{\mathrm{BB2}} / \alpha^{\mathrm{BB1}}
< \kappa \; \\
\alpha^{\mathrm{BB1}} & \mathrm{~otherwise} \end{array}
\right . \;,
with :math:`\kappa \in (0, 1)`.
Since the PGM solver uses the reciprocal of the step size, the value
:math:`L = 1 / \alpha` is returned.
* :class:`.LineSearchStepSize`
This implements the line search strategy described in :cite:`beck-2009-fast`.
This strategy estimates :math:`L` such that
:math:`f(\mb{x}) \leq \hat{f}_{L}(\mb{x})` is satisfied with
:math:`\hat{f}_{L}` a quadratic approximation to :math:`f` defined as
.. math::
\hat{f}_{L}(\mb{x}, \mb{y}) = f(\mb{y}) + \nabla f(\mb{y})^H
(\mb{x} - \mb{y}) + \frac{L}{2} \left\| \mb{x} - \mb{y}
\right\|_2^2 \;,
with :math:`\mb{x}` the potential new update and :math:`\mb{y}` the
current solution or current extrapolation (if using :class:`.AcceleratedPGM`).
* :class:`.RobustLineSearchStepSize`
This implements the robust line search strategy described in
:cite:`florea-2017-robust`. This strategy estimates :math:`L` such that
:math:`f(\mb{x}) \leq \hat{f}_{L}(\mb{x})` is satisfied with
:math:`\hat{f}_{L}` a quadratic approximation to :math:`f` defined as
.. math::
\hat{f}_{L}(\mb{x}, \mb{y}) = f(\mb{y}) + \nabla f(\mb{y})^H
(\mb{x} - \mb{y}) + \frac{L}{2} \left\| \mb{x} - \mb{y} \right\|_2^2 \;,
with :math:`\mb{x}` the potential new update and :math:`\mb{y}` the
auxiliary extrapolation state. Note that this should only be used
with :class:`.AcceleratedPGM`.
For more details of these step size managers and how to specify them, see
the API reference page for :mod:`scico.optimize.pgm`.
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/include/optimizer.rst | 0.929568 | 0.807726 | optimizer.rst | pypi |
# Construct an index README file and a docs example index file from
# source index file "scripts/index.rst".
# Run as
# python makeindex.py
import re
from pathlib import Path
import nbformat as nbf
import py2jn
import pypandoc
src = "scripts/index.rst"
# Make dict mapping script names to docstring header titles
titles = {}
scripts = list(Path("scripts").glob("*py"))
for s in scripts:
prevline = None
with open(s, "r") as sfile:
for line in sfile:
if line[0:3] == "===":
titles[s.name] = prevline.rstrip()
break
else:
prevline = line
# Build README in scripts directory
dst = "scripts/README.rst"
with open(dst, "w") as dstfile:
with open(src, "r") as srcfile:
for line in srcfile:
# Detect lines containing script filenames
m = re.match(r"(\s+)- ([^\s]+.py)", line)
if m:
prespace = m.group(1)
name = m.group(2)
title = titles[name]
print(
"%s`%s <%s>`_\n%s %s" % (prespace, name, name, prespace, title), file=dstfile
)
else:
print(line, end="", file=dstfile)
# Build notebooks index file in notebooks directory
dst = "notebooks/index.ipynb"
rst_text = ""
with open(src, "r") as srcfile:
for line in srcfile:
# Detect lines containing script filenames
m = re.match(r"(\s+)- ([^\s]+).py", line)
if m:
prespace = m.group(1)
name = m.group(2)
title = titles[name + ".py"]
rst_text += "%s- `%s <%s.ipynb>`_\n" % (prespace, title, name)
else:
rst_text += line
# Convert text from rst to markdown
md_format = "markdown_github+tex_math_dollars+fenced_code_attributes"
md_text = pypandoc.convert_text(rst_text, md_format, format="rst", extra_args=["--atx-headers"])
md_text = '"""' + md_text + '"""'
# Convert from python to notebook format and write notebook
nb = py2jn.py_string_to_notebook(md_text)
py2jn.tools.write_notebook(nb, dst, nbver=4)
nb = nbf.read(dst, nbf.NO_CONVERT)
nb.metadata = {"nbsphinx": {"orphan": True}}
nbf.write(nb, dst)
# Build examples index for docs
dst = "../docs/source/examples.rst"
prfx = "examples/"
with open(dst, "w") as dstfile:
print(".. _example_notebooks:\n", file=dstfile)
with open(src, "r") as srcfile:
for line in srcfile:
# Add toctree and include statements after main heading
if line[0:3] == "===":
print(line, end="", file=dstfile)
print("\n.. toctree::\n :maxdepth: 1", file=dstfile)
print("\n.. include:: include/examplenotes.rst", file=dstfile)
continue
# Detect lines containing script filenames
m = re.match(r"(\s+)- ([^\s]+).py", line)
if m:
print(" " + prfx + m.group(2), file=dstfile)
else:
print(line, end="", file=dstfile)
# Add toctree statement after section headings
if line[0:3] == line[0] * 3 and line[0] in ["=", "-", "^"]:
print("\n.. toctree::\n :maxdepth: 1", file=dstfile) | /scico-0.0.4.tar.gz/scico-0.0.4/examples/makeindex.py | 0.462716 | 0.352536 | makeindex.py | pypi |
import jax
import scico
import scico.numpy as snp
import scico.random
from scico import denoiser, functional, linop, loss, metric, plot
from scico.data import kodim23
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.solver import cg
from scico.util import device_info
"""
Define downsampling function.
"""
def downsample_image(img, rate):
img = snp.mean(snp.reshape(img, (-1, rate, img.shape[1], img.shape[2])), axis=1)
img = snp.mean(snp.reshape(img, (img.shape[0], -1, rate, img.shape[2])), axis=2)
return img
"""
Read a ground truth image.
"""
img = kodim23(asfloat=True)[160:416, 60:316]
img = jax.device_put(img)
"""
Create a test image by downsampling and adding Gaussian white noise.
"""
rate = 4 # downsampling rate
σ = 2e-2 # noise standard deviation
Afn = lambda x: downsample_image(x, rate=rate)
s = Afn(img)
input_shape = img.shape
output_shape = s.shape
noise, key = scico.random.randn(s.shape, seed=0)
sn = s + σ * noise
"""
Set up the PPP problem pseudo-functional. The DnCNN denoiser
:cite:`zhang-2017-dncnn` is used as a regularizer.
"""
A = linop.LinearOperator(input_shape=input_shape, output_shape=output_shape, eval_fn=Afn)
f = loss.SquaredL2Loss(y=sn, A=A)
C = linop.Identity(input_shape=input_shape)
g = functional.DnCNN("17M")
"""
Compute a baseline solution via denoising of the pseudo-inverse of the
forward operator. This baseline solution is also used to initialize the
PPP solver.
"""
xpinv, info = cg(A.T @ A, A.T @ sn, snp.zeros(input_shape))
dncnn = denoiser.DnCNN("17M")
xden = dncnn(xpinv)
"""
Set up an ADMM solver and solve.
"""
ρ = 3.4e-2 # ADMM penalty parameter
maxiter = 12 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=xden,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 10}),
itstat_options={"display": True},
)
print(f"Solving on {device_info()}\n")
xppp = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
"""
Show reference and test images.
"""
fig = plot.figure(figsize=(8, 6))
ax0 = plot.plt.subplot2grid((1, rate + 1), (0, 0), colspan=rate)
plot.imview(img, title="Reference", fig=fig, ax=ax0)
ax1 = plot.plt.subplot2grid((1, rate + 1), (0, rate))
plot.imview(sn, title="Downsampled", fig=fig, ax=ax1)
fig.show()
"""
Show recovered full-resolution images.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=True, figsize=(21, 7))
plot.imview(xpinv, title="Pseudo-inverse: %.2f (dB)" % metric.psnr(img, xpinv), fig=fig, ax=ax[0])
plot.imview(
xden, title="Denoised pseudo-inverse: %.2f (dB)" % metric.psnr(img, xden), fig=fig, ax=ax[1]
)
plot.imview(xppp, title="PPP solution: %.2f (dB)" % metric.psnr(img, xppp), fig=fig, ax=ax[2])
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/superres_ppp_dncnn_admm.py | 0.79909 | 0.526525 | superres_ppp_dncnn_admm.py | pypi |
import os
from time import time
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scico import flax as sflax
from scico import metric, plot
from scico.flax.examples import load_ct_data
"""
Prepare parallel processing. Set an arbitrary processor count (only
applies if GPU is not available).
"""
os.environ["XLA_FLAGS"] = "--xla_force_host_platform_device_count=8"
platform = jax.lib.xla_bridge.get_backend().platform
print("Platform: ", platform)
"""
Read data from cache or generate if not available.
"""
N = 256 # phantom size
train_nimg = 536 # number of training images
test_nimg = 64 # number of testing images
nimg = train_nimg + test_nimg
n_projection = 45 # CT views
trdt, ttdt = load_ct_data(train_nimg, test_nimg, N, n_projection, verbose=True)
"""
Build training and testing structures. Inputs are the filter
back-projected sinograms and outpus are the original generated foams.
Keep training and testing partitions.
"""
train_ds = {"image": trdt["fbp"], "label": trdt["img"]}
test_ds = {"image": ttdt["fbp"], "label": ttdt["img"]}
"""
Define configuration dictionary for model and training loop.
Parameters have been selected for demonstration purposes and relatively
short training. The model depth controls the levels of pooling in the
U-Net model. The block depth controls the number of layers at each level
of depth. The number of filters controls the number of filters at the
input and output levels and doubles (halves) at each pooling (unpooling)
operation. Better performance may be obtained by increasing depth, block
depth, number of filters or training epochs, but may require longer
training times.
"""
# model configuration
model_conf = {
"depth": 2,
"num_filters": 64,
"block_depth": 2,
}
# training configuration
train_conf: sflax.ConfigDict = {
"seed": 0,
"opt_type": "SGD",
"momentum": 0.9,
"batch_size": 16,
"num_epochs": 200,
"base_learning_rate": 1e-2,
"warmup_epochs": 0,
"log_every_steps": 1000,
"log": True,
}
"""
Construct UNet model.
"""
channels = train_ds["image"].shape[-1]
model = sflax.UNet(
depth=model_conf["depth"],
channels=channels,
num_filters=model_conf["num_filters"],
block_depth=model_conf["block_depth"],
)
"""
Run training loop.
"""
workdir = os.path.join(os.path.expanduser("~"), ".cache", "scico", "examples", "unet_ct_out")
train_conf["workdir"] = workdir
print(f"{'JAX process: '}{jax.process_index()}{' / '}{jax.process_count()}")
print(f"{'JAX local devices: '}{jax.local_devices()}")
# Construct training object
trainer = sflax.BasicFlaxTrainer(
train_conf,
model,
train_ds,
test_ds,
)
start_time = time()
modvar, stats_object = trainer.train()
time_train = time() - start_time
"""
Evaluate on testing data.
"""
start_time = time()
fmap = sflax.FlaxMap(model, modvar)
output = fmap(test_ds["image"])
time_eval = time() - start_time
output = jax.numpy.clip(output, a_min=0, a_max=1.0)
"""
Compare trained model in terms of reconstruction time and data fidelity.
"""
snr_eval = metric.snr(test_ds["label"], output)
psnr_eval = metric.psnr(test_ds["label"], output)
print(
f"{'UNet training':15s}{'epochs:':2s}{train_conf['num_epochs']:>5d}"
f"{'':21s}{'time[s]:':10s}{time_train:>7.2f}"
)
print(
f"{'UNet testing':15s}{'SNR:':5s}{snr_eval:>5.2f}{' dB'}{'':3s}"
f"{'PSNR:':6s}{psnr_eval:>5.2f}{' dB'}{'':3s}{'time[s]:':10s}{time_eval:>7.2f}"
)
"""
Plot comparison.
"""
key = jax.random.PRNGKey(123)
indx = jax.random.randint(key, shape=(1,), minval=0, maxval=test_nimg)[0]
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(test_ds["label"][indx, ..., 0], title="Ground truth", cbar=None, fig=fig, ax=ax[0])
plot.imview(
test_ds["image"][indx, ..., 0],
title="FBP Reconstruction: \nSNR: %.2f (dB), MAE: %.3f"
% (
metric.snr(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
metric.mae(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
),
cbar=None,
fig=fig,
ax=ax[1],
)
plot.imview(
output[indx, ..., 0],
title="UNet Reconstruction\nSNR: %.2f (dB), MAE: %.3f"
% (
metric.snr(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
metric.mae(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
),
fig=fig,
ax=ax[2],
)
divider = make_axes_locatable(ax[2])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[2].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
"""
Plot convergence statistics. Statistics only generated if a training
cycle was done (i.e. not reading final epoch results from checkpoint).
"""
if stats_object is not None:
hist = stats_object.history(transpose=True)
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
jax.numpy.vstack((hist.Train_Loss, hist.Eval_Loss)).T,
x=hist.Epoch,
ptyp="semilogy",
title="Loss function",
xlbl="Epoch",
ylbl="Loss value",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[0],
)
plot.plot(
jax.numpy.vstack((hist.Train_SNR, hist.Eval_SNR)).T,
x=hist.Epoch,
title="Metric",
xlbl="Epoch",
ylbl="SNR (dB)",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_astra_unet_train_foam2.py | 0.723505 | 0.523116 | ct_astra_unet_train_foam2.py | pypi |
r"""
Image Deconvolution with TV Regularization (ADMM Solver)
========================================================
This example demonstrates the solution of an image deconvolution problem
with isotropic total variation (TV) regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - C \mathbf{x}
\|_2^2 + \lambda \| D \mathbf{x} \|_{2,1} \;,$$
where $C$ is a convolution operator, $\mathbf{y}$ is the blurred image,
$D$ is a 2D finite fifference operator, and $\mathbf{x}$ is the
deconvolved image.
In this example the problem is solved via standard ADMM, while proximal
ADMM is used in a [companion example](deconv_tv_padmm.rst).
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Set up the forward operator and create a test signal consisting of a
blurred signal with additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
C = linop.Convolve(h=psf, input_shape=x_gt.shape)
Cx = C(x_gt) # blurred image
noise, key = scico.random.randn(Cx.shape, seed=0)
y = Cx + σ * noise
r"""
Set up the problem to be solved. We want to minimize the functional
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - C \mathbf{x}
\|_2^2 + \lambda \| D \mathbf{x} \|_{2,1} \;,$$
where $C$ is the convolution operator and $D$ is a finite difference
operator. This problem can be expressed as
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}} \; (1/2) \| \mathbf{y} -
C \mathbf{x} \|_2^2 + \lambda \| \mathbf{z} \|_{2,1} \;\;
\text{such that} \;\; \mathbf{z} = D \mathbf{x} \;,$$
which is easily written in the form of a standard ADMM problem.
This is simpler splitting than that used in the
[companion example](deconv_tv_padmm.rst), but it requires the use
conjugate gradient sub-iterations to solve the ADMM step associated with
the data fidelity term.
"""
f = loss.SquaredL2Loss(y=y, A=C)
# Penalty parameters must be accounted for in the gi functions, not as
# additional inputs.
λ = 2.1e-2 # L21 norm regularization parameter
g = λ * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical
# finite differences the same shape, which is required for the L21Norm,
# which is used so that g(Cx) corresponds to isotropic TV.
D = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
"""
Set up an ADMM solver object.
"""
ρ = 1.0e-1 # ADMM penalty parameter
maxiter = 50 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[D],
rho_list=[ρ],
x0=C.adj(y),
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = y[nc:-nc, nc:-nc]
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, yc), fig=fig, ax=ax[1])
plot.imview(
solver.x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, solver.x), fig=fig, ax=ax[2]
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_tv_admm.py | 0.939789 | 0.955089 | deconv_tv_admm.py | pypi |
import numpy as np
import jax
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot, random
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
np.random.seed(1234)
N = 512 # image size
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
"""
Set up forward operator and test signal consisting of blurred signal with
additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = random.randn(Ax.shape)
y = Ax + σ * noise
"""
Set up the problem to be solved. We want to minimize the functional
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + R(\mathbf{x}) \;$$
where $R(\cdot)$ is a pseudo-functional having the DnCNN denoiser as its
proximal operator. The problem is solved via ADMM, using the standard
variable splitting for problems of this form, which requires the use of
conjugate gradient sub-iterations in the ADMM step that involves the data
fidelity term.
"""
f = loss.SquaredL2Loss(y=y, A=A)
g = functional.DnCNN("17M")
C = linop.Identity(x_gt.shape)
"""
Set up ADMM solver.
"""
ρ = 0.2 # ADMM penalty parameter
maxiter = 10 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.T @ y,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 30}),
itstat_options={"display": True},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
x = snp.clip(x, 0, 1)
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = snp.clip(y[nc:-nc, nc:-nc], 0, 1)
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, yc), fig=fig, ax=ax[1])
plot.imview(x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, x), fig=fig, ax=ax[2])
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_ppp_dncnn_admm.py | 0.834204 | 0.660487 | deconv_ppp_dncnn_admm.py | pypi |
r"""
ℓ1 Total Variation Denoising
============================
This example demonstrates impulse noise removal via ℓ1 total variation
:cite:`alliney-1992-digital` :cite:`esser-2010-primal` (Sec. 2.4.4)
(i.e. total variation regularization with an ℓ1 data fidelity term),
minimizing the functional
$$\mathrm{argmin}_{\mathbf{x}} \; \| \mathbf{y} - \mathbf{x}
\|_1 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $\mathbf{y}$ is the noisy image, $C$ is a 2D finite difference
operator, and $\mathbf{x}$ is the denoised image.
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import spnoise
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
from scipy.ndimage import median_filter
"""
Create a ground truth image and impose salt & pepper noise to create a
noisy test image.
"""
N = 256 # image size
phantom = SiemensStar(16)
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = 0.5 * x_gt / x_gt.max()
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
y = spnoise(x_gt, 0.5)
"""
Denoise with median filtering.
"""
x_med = median_filter(y, size=(5, 5))
"""
Denoise with ℓ1 total variation.
"""
λ = 1.5e0
g_loss = loss.Loss(y=y, f=functional.L1Norm())
g_tv = λ * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
solver = ADMM(
f=None,
g_list=[g_loss, g_tv],
C_list=[linop.Identity(input_shape=y.shape), C],
rho_list=[5e0, 5e0],
x0=y,
maxiter=100,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 20}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x_tv = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Plot results.
"""
plt_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.0))
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(13, 12))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy image", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(
x_med,
title=f"Median filtering: {metric.psnr(x_gt, x_med):.2f} (dB)",
fig=fig,
ax=ax[1, 0],
**plt_args,
)
plot.imview(
x_tv,
title=f"ℓ1-TV denoising: {metric.psnr(x_gt, x_tv):.2f} (dB)",
fig=fig,
ax=ax[1, 1],
**plt_args,
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/denoise_l1tv_admm.py | 0.915067 | 0.931618 | denoise_l1tv_admm.py | pypi |
r"""
Non-Negative Basis Pursuit DeNoising (ADMM)
===========================================
This example demonstrates the solution of a non-negative sparse coding
problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - D \mathbf{x} \|_2^2
+ \lambda \| \mathbf{x} \|_1 + I(\mathbf{x} \geq 0) \;,$$
where $D$ the dictionary, $\mathbf{y}$ the signal to be represented,
$\mathbf{x}$ is the sparse representation, and $I(\mathbf{x} \geq 0)$
is the non-negative indicator.
"""
import numpy as np
import jax
from scico import functional, linop, loss, plot
from scico.optimize.admm import ADMM, MatrixSubproblemSolver
from scico.util import device_info
"""
Create random dictionary, reference random sparse representation, and
test signal consisting of the synthesis of the reference sparse
representation.
"""
m = 32 # signal size
n = 128 # dictionary size
s = 10 # sparsity level
np.random.seed(1)
D = np.random.randn(m, n)
D = D / np.linalg.norm(D, axis=0, keepdims=True) # normalize dictionary
xt = np.zeros(n) # true signal
idx = np.random.randint(low=0, high=n, size=s) # support of xt
xt[idx] = np.random.rand(s)
y = D @ xt + 5e-2 * np.random.randn(m) # synthetic signal
xt = jax.device_put(xt) # convert to jax array, push to GPU
y = jax.device_put(y) # convert to jax array, push to GPU
"""
Set up the forward operator and ADMM solver object.
"""
lmbda = 1e-1
A = linop.MatrixOperator(D)
f = loss.SquaredL2Loss(y=y, A=A)
g_list = [lmbda * functional.L1Norm(), functional.NonNegativeIndicator()]
C_list = [linop.Identity((n)), linop.Identity((n))]
rho_list = [1.0, 1.0]
maxiter = 100 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=g_list,
C_list=C_list,
rho_list=rho_list,
x0=A.adj(y),
maxiter=maxiter,
subproblem_solver=MatrixSubproblemSolver(),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
"""
Plot the recovered coefficients and signal.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((xt, solver.x)).T,
title="Coefficients",
lgnd=("Ground Truth", "Recovered"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((D @ xt, y, D @ solver.x)).T,
title="Signal",
lgnd=("Ground Truth", "Noisy", "Recovered"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/sparsecode_admm.py | 0.898908 | 0.890913 | sparsecode_admm.py | pypi |
r"""
Basis Pursuit DeNoising (APGM)
==============================
This example demonstrates the solution of the the sparse coding problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - D \mathbf{x}
\|_2^2 + \lambda \| \mathbf{x} \|_1\;,$$
where $D$ the dictionary, $\mathbf{y}$ the signal to be represented,
and $\mathbf{x}$ is the sparse representation.
"""
import numpy as np
import jax
from scico import functional, linop, loss, plot
from scico.optimize.pgm import AcceleratedPGM
from scico.util import device_info
"""
Construct a random dictionary, a reference random sparse
representation, and a test signal consisting of the synthesis of the
reference sparse representation.
"""
m = 512 # Signal size
n = 4 * m # Dictionary size
s = 32 # Sparsity level (number of non-zeros)
σ = 0.5 # Noise level
np.random.seed(12345)
D = np.random.randn(m, n)
L0 = np.linalg.norm(D, 2) ** 2
x_gt = np.zeros(n) # true signal
idx = np.random.permutation(list(range(0, n - 1)))
x_gt[idx[0:s]] = np.random.randn(s)
y = D @ x_gt + σ * np.random.randn(m) # synthetic signal
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
y = jax.device_put(y) # convert to jax array, push to GPU
"""
Set up the forward operator and AcceleratedPGM solver object.
"""
maxiter = 100
λ = 2.98e1
A = linop.MatrixOperator(D)
f = loss.SquaredL2Loss(y=y, A=A)
g = λ * functional.L1Norm()
solver = AcceleratedPGM(
f=f, g=g, L0=L0, x0=A.adj(y), maxiter=maxiter, itstat_options={"display": True, "period": 10}
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Plot the recovered coefficients and convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((x_gt, x)).T,
title="Coefficients",
lgnd=("Ground Truth", "Recovered"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((hist.Objective, hist.Residual)).T,
ptyp="semilogy",
title="Convergence",
xlbl="Iteration",
lgnd=("Objective", "Residual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/sparsecode_pgm.py | 0.890097 | 0.927034 | sparsecode_pgm.py | pypi |
r"""
TV-Regularized Abel Inversion
=============================
This example demonstrates a TV-regularized Abel inversion by solving the
problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_1 \;,$$
where $A$ is the Abel projector (with an implementation based on a
projector from PyAbel :cite:`pyabel-2022`), $\mathbf{y}$ is the measured
data, $C$ is a 2D finite difference operator, and $\mathbf{x}$ is the
desired image.
"""
import numpy as np
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import create_circular_phantom
from scico.linop.abel import AbelProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # image size
x_gt = create_circular_phantom((N, N), [0.4 * N, 0.2 * N, 0.1 * N], [1, 0, 0.5])
"""
Set up the forward operator and create a test measurement.
"""
A = AbelProjector(x_gt.shape)
y = A @ x_gt
np.random.seed(12345)
y = y + np.random.normal(size=y.shape).astype(np.float32)
"""
Compute inverse Abel transform solution.
"""
x_inv = A.inverse(y)
"""
Set up the problem to be solved. Anisotropic TV, which gives slightly
better performance than isotropic TV for this problem, is used here.
"""
f = loss.SquaredL2Loss(y=y, A=A)
λ = 2.35e1 # L1 norm regularization parameter
g = λ * functional.L1Norm() # Note the use of anisotropic TV
C = linop.FiniteDifference(input_shape=x_gt.shape)
"""
Set up ADMM solver object.
"""
ρ = 1.03e2 # ADMM penalty parameter
maxiter = 100 # number of ADMM iterations
cg_tol = 1e-4 # CG relative tolerance
cg_maxiter = 25 # maximum CG iterations per ADMM iteration
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=snp.clip(x_inv, 0.0, 1.0),
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
solver.solve()
hist = solver.itstat_object.history(transpose=True)
x_tv = snp.clip(solver.x, 0.0, 1.0)
"""
Show results.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1, vmax=1.2)
fig, ax = plot.subplots(nrows=2, ncols=2, figsize=(12, 12))
plot.imview(x_gt, title="Ground Truth", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0], norm=norm)
plot.imview(y, title="Measurement", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(
x_inv,
title="Inverse Abel: %.2f (dB)" % metric.psnr(x_gt, x_inv),
cmap=plot.cm.Blues,
fig=fig,
ax=ax[1, 0],
norm=norm,
)
plot.imview(
x_tv,
title="TV-Regularized Inversion: %.2f (dB)" % metric.psnr(x_gt, x_tv),
cmap=plot.cm.Blues,
fig=fig,
ax=ax[1, 1],
norm=norm,
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_abel_tv_admm.py | 0.922426 | 0.939969 | ct_abel_tv_admm.py | pypi |
r"""
Non-negative Poisson Loss Reconstruction (APGM)
===============================================
This example demonstrates the use of class
[pgm.PGMStepSize](../_autosummary/scico.optimize.pgm.rst#scico.optimize.pgm.PGMStepSize)
to solve the non-negative reconstruction problem with Poisson negative
log likelihood loss
$$\mathrm{argmin}_{\mathbf{x}} \; \frac{1}{2} \left ( A(\mathbf{x}) -
\mathbf{y} \log\left( A(\mathbf{x}) \right) + \log(\mathbf{y}!) \right
) + I(\mathbf{x}^{(0)} \geq 0) \;,$$
where $A$ is the forward operator, $\mathbf{y}$ is the
measurement, $\mathbf{x}$ is the signal reconstruction, and
$I(\mathbf{x}^{(0)} \geq 0)$ is the non-negative indicator.
This example also demonstrates the application of
[numpy.BlockArray](../_autosummary/scico.numpy.rst#scico.numpy.BlockArray),
[functional.SeparableFunctional](../_autosummary/scico.functional.rst#scico.functional.SeparableFunctional),
and
[functional.ZeroFunctional](../_autosummary/scico.functional.rst#scico.functional.ZeroFunctional)
to implement the forward operator
$A(\mathbf{x}) = A_0(\mathbf{x}^{(0)}) + A_1(\mathbf{x}^{(1)})$
and the selective non-negativity constraint that only applies to
$\mathbf{x}^{(0)}$.
"""
import jax
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import scico.numpy as snp
import scico.random
from scico import functional, loss, plot
from scico.numpy import BlockArray
from scico.operator import Operator
from scico.optimize.pgm import (
AcceleratedPGM,
AdaptiveBBStepSize,
BBStepSize,
LineSearchStepSize,
RobustLineSearchStepSize,
)
from scico.typing import Shape
from scico.util import device_info
from scipy.linalg import dft
"""
Construct a dictionary, a reference random reconstruction, and a test
measurement signal consisting of the synthesis of the reference
reconstruction.
"""
m = 1024 # signal size
n = 8 # dictionary size
n0 = 2
n1 = n - n0
# Create dictionary with bump-like features.
D = ((snp.real(dft(m))[1 : n + 1, :m]) ** 12).T
D0 = D[:, :n0]
D1 = D[:, n0:]
# Define composed operator.
class ForwardOperator(Operator):
"""Toy problem non-linear forward operator with different treatment
of x[0] and x[1].
Attributes:
D0: Matrix multiplying x[0].
D1: Matrix multiplying x[1].
"""
def __init__(self, input_shape: Shape, D0, D1, jit: bool = True):
self.D0 = D0
self.D1 = D1
output_shape = (D0.shape[0],)
super().__init__(
input_shape=input_shape,
input_dtype=snp.complex64,
output_dtype=snp.complex64,
output_shape=output_shape,
jit=jit,
)
def _eval(self, x: BlockArray) -> BlockArray:
return 10 * snp.exp(-D0 @ x[0]) + 5 * snp.exp(-D1 @ x[1])
x_gt, key = scico.random.uniform(((n0,), (n1,)), seed=12345) # true coefficients
A = ForwardOperator(x_gt.shape, D0, D1)
lam = A(x_gt)
y, key = scico.random.poisson(lam, shape=lam.shape, key=key) # synthetic signal
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
y = jax.device_put(y) # convert to jax array, push to GPU
"""
Set up the loss function and the regularization.
"""
f = loss.PoissonLoss(y=y, A=A)
g0 = functional.NonNegativeIndicator()
g1 = functional.ZeroFunctional()
g = functional.SeparableFunctional([g0, g1])
"""
Define common setup: maximum of iterations and initial estimation of solution.
"""
maxiter = 50
x0, key = scico.random.uniform(((n0,), (n1,)), key=key)
x0 = jax.device_put(x0) # Initial solution estimate
"""
Define plotting functionality.
"""
def plot_results(hist, str_ss, L0, xsol, xgt, Aop):
# Plot signal, coefficients and convergence statistics.
fig = plot.figure(
figsize=(12, 6),
tight_layout=True,
)
gs = gridspec.GridSpec(nrows=2, ncols=3)
fig.suptitle(
"Results for PGM Solver and " + str_ss + r" ($L_0$: " + "{:4.2f}".format(L0) + ")",
fontsize=16,
)
ax0 = fig.add_subplot(gs[0, 0])
plot.plot(
hist.Objective,
ptyp="semilogy",
title="Objective",
xlbl="Iteration",
fig=fig,
ax=ax0,
)
ax1 = fig.add_subplot(gs[0, 1])
plot.plot(
hist.Residual,
ptyp="semilogy",
title="Residual",
xlbl="Iteration",
fig=fig,
ax=ax1,
)
ax2 = fig.add_subplot(gs[0, 2])
plot.plot(
hist.L,
ptyp="semilogy",
title="L",
xlbl="Iteration",
fig=fig,
ax=ax2,
)
ax3 = fig.add_subplot(gs[1, 0])
plt.stem(snp.concatenate((xgt[0], xgt[1])), linefmt="C1-", markerfmt="C1o", basefmt="C1-")
plt.stem(snp.concatenate((xsol[0], xsol[1])), linefmt="C2-", markerfmt="C2x", basefmt="C1-")
plt.legend(["Ground Truth", "Recovered"])
plt.xlabel("Index")
plt.title("Coefficients")
ax4 = fig.add_subplot(gs[1, 1:])
plot.plot(
snp.vstack((y, Aop(xgt), Aop(xsol))).T,
title="Fit",
xlbl="Index",
lgnd=("y", "A(x_gt)", "A(x)"),
fig=fig,
ax=ax4,
)
fig.show()
"""
Use default PGMStepSize object, set L0 based on norm of Forward
operator and set up AcceleratedPGM solver object. Run the solver and
plot the recontructed signal and convergence statistics.
"""
L0 = 1e3
str_L0 = "(Specifically chosen so that convergence occurs)"
solver = AcceleratedPGM(
f=f,
g=g,
L0=L0,
x0=x0,
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
)
str_ss = type(solver.step_size).__name__
print(f"Solving on {device_info()}\n")
print("============================================================")
print("Running solver with step size of class: ", str_ss)
print("L0 " + str_L0 + ": ", L0, "\n")
x = solver.solve() # Run the solver.
hist = solver.itstat_object.history(transpose=True)
plot_results(hist, str_ss, L0, x, x_gt, A)
"""
Use BBStepSize object, set L0 with arbitary initial value and set up
AcceleratedPGM solver object. Run the solver and plot the
recontructed signal and convergence statistics.
"""
L0 = 90.0 # initial reciprocal of gradient descent step size
str_L0 = "(Arbitrary Initialization)"
solver = AcceleratedPGM(
f=f,
g=g,
L0=L0,
x0=x0,
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
step_size=BBStepSize(),
)
str_ss = type(solver.step_size).__name__
print("===================================================")
print("Running solver with step size of class: ", str_ss)
print("L0 " + str_L0 + ": ", L0, "\n")
x = solver.solve() # Run the solver.
hist = solver.itstat_object.history(transpose=True)
plot_results(hist, str_ss, L0, x, x_gt, A)
"""
Use AdaptiveBBStepSize object, set L0 with arbitary initial value and
set up AcceleratedPGM solver object. Run the solver and plot the
recontructed signal and convergence statistics.
"""
L0 = 90.0 # initial reciprocal of gradient descent step size
str_L0 = "(Arbitrary Initialization)"
solver = AcceleratedPGM(
f=f,
g=g,
L0=L0,
x0=x0,
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
step_size=AdaptiveBBStepSize(kappa=0.75),
)
str_ss = type(solver.step_size).__name__
print("===========================================================")
print("Running solver with step size of class: ", str_ss)
print("L0 " + str_L0 + ": ", L0, "\n")
x = solver.solve() # Run the solver.
hist = solver.itstat_object.history(transpose=True)
plot_results(hist, str_ss, L0, x, x_gt, A)
"""
Use LineSearchStepSize object, set L0 with arbitary initial value and
set up AcceleratedPGM solver object. Run the solver and plot the
recontructed signal and convergence statistics.
"""
L0 = 90.0 # initial reciprocal of gradient descent step size
str_L0 = "(Arbitrary Initialization)"
solver = AcceleratedPGM(
f=f,
g=g,
L0=L0,
x0=x0,
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
step_size=LineSearchStepSize(),
)
str_ss = type(solver.step_size).__name__
print("===========================================================")
print("Running solver with step size of class: ", str_ss)
print("L0 " + str_L0 + ": ", L0, "\n")
x = solver.solve() # Run the solver.
hist = solver.itstat_object.history(transpose=True)
plot_results(hist, str_ss, L0, x, x_gt, A)
"""
Use RobustLineSearchStepSize object, set L0 with arbitary initial
value and set up AcceleratedPGM solver object. Run the solver and
plot the recontructed signal and convergence statistics.
"""
L0 = 90.0 # initial reciprocal of gradient descent step size
str_L0 = "(Arbitrary Initialization)"
solver = AcceleratedPGM(
f=f,
g=g,
L0=L0,
x0=x0,
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
step_size=RobustLineSearchStepSize(),
)
str_ss = type(solver.step_size).__name__
print("=================================================================")
print("Running solver with step size of class: ", str_ss)
print("L0 " + str_L0 + ": ", L0, "\n")
x = solver.solve() # Run the solver.
hist = solver.itstat_object.history(transpose=True)
plot_results(hist, str_ss, L0, x, x_gt, A)
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/sparsecode_poisson_pgm.py | 0.944228 | 0.903847 | sparsecode_poisson_pgm.py | pypi |
r"""
CT Reconstruction with CG and PCG
=================================
This example demonstrates a simple iterative CT reconstruction using
conjugate gradient (CG) and preconditioned conjugate gradient (PCG)
algorithms to solve the problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, and
$\mathbf{x}$ is the reconstructed image.
"""
from time import time
import numpy as np
import jax
import jax.numpy as jnp
from xdesign import Foam, discrete_phantom
from scico import loss, plot
from scico.linop import CircularConvolve
from scico.linop.radon_astra import TomographicProjector
from scico.solver import cg
"""
Create a ground truth image.
"""
N = 256 # phantom size
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Configure a CT projection operator and generate synthetic measurements.
"""
n_projection = N # matches the phantom size so this is not few-view CT
angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles
A = 1 / N * TomographicProjector(x_gt.shape, 1, N, angles) # Radon transform operator
y = A @ x_gt # sinogram
r"""
Forward and back project a single pixel (Kronecker delta) to compute
an approximate impulse response for $\mathbf{A}^T \mathbf{A}$.
"""
H = CircularConvolve.from_operator(A.T @ A)
r"""
Invert in the Fourier domain to form a preconditioner $\mathbf{M}
\approx (\mathbf{A}^T \mathbf{A})^{-1}$. See
:cite:`clinthorne-1993-preconditioning` Section V.A. for more details.
"""
# γ limits the gain of the preconditioner; higher gives a weaker filter.
γ = 1e-2
# The imaginary part comes from numerical errors in A.T and needs to be
# removed to ensure H is symmetric, positive definite.
frequency_response = np.real(H.h_dft)
inv_frequency_response = 1 / (frequency_response + γ)
# Using circular convolution without padding is sufficient here because
# M is approximate anyway.
M = CircularConvolve(inv_frequency_response, x_gt.shape, h_is_dft=True)
r"""
Check that $\mathbf{M}$ does approximately invert $\mathbf{A}^T \mathbf{A}$.
"""
plot_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.5))
fig, axes = plot.subplots(nrows=1, ncols=3, figsize=(12, 4.5))
plot.imview(x_gt, title="Ground truth, $x_{gt}$", fig=fig, ax=axes[0], **plot_args)
plot.imview(
A.T @ A @ x_gt, title=r"$\mathbf{A}^T \mathbf{A} x_{gt}$", fig=fig, ax=axes[1], **plot_args
)
plot.imview(
M @ A.T @ A @ x_gt,
title=r"$\mathbf{M} \mathbf{A}^T \mathbf{A} x_{gt}$",
fig=fig,
ax=axes[2],
**plot_args,
)
fig.suptitle(r"$\mathbf{M}$ approximately inverts $\mathbf{A}^T \mathbf{A}$")
fig.tight_layout()
fig.colorbar(
axes[2].get_images()[0],
ax=axes,
location="right",
shrink=1.0,
pad=0.05,
label="Arbitrary Units",
)
fig.show()
"""
Reconstruct with both standard and preconditioned conjugate gradient.
"""
start_time = time()
x_cg, info_cg = cg(
A.T @ A,
A.T @ y,
jnp.zeros(A.input_shape, dtype=A.input_dtype),
tol=1e-5,
info=True,
)
time_cg = time() - start_time
start_time = time()
x_pcg, info_pcg = cg(
A.T @ A,
A.T @ y,
jnp.zeros(A.input_shape, dtype=A.input_dtype),
tol=2e-5, # preconditioning affects the problem scaling so tol differs between CG and PCG
info=True,
M=M,
)
time_pcg = time() - start_time
"""
Compare CG and PCG in terms of reconstruction time and data fidelity.
"""
f_cg = loss.SquaredL2Loss(y=A.T @ y, A=A.T @ A)
f_data = loss.SquaredL2Loss(y=y, A=A)
print(
f"{'Method':10s}{'Iterations':>15s}{'Time (s)':>15s}{'||ATAx - ATy||':>15s}{'||Ax - y||':>15s}"
)
print(
f"{'CG':10s}{info_cg['num_iter']:>15d}{time_cg:>15.2f}{f_cg(x_cg):>15.2e}{f_data(x_cg):>15.2e}"
)
print(
f"{'PCG':10s}{info_pcg['num_iter']:>15d}{time_pcg:>15.2f}{f_cg(x_pcg):>15.2e}"
f"{f_data(x_pcg):>15.2e}"
)
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_astra_noreg_pcg.py | 0.920222 | 0.973968 | ct_astra_noreg_pcg.py | pypi |
r"""
Complex Total Variation Denoising with PDHG Solver
==================================================
This example demonstrates solution of a problem of the form
$$\argmin_{\mathbf{x}} \; f(\mathbf{x}) + g(C(\mathbf{x})) \;,$$
where $C$ is a nonlinear operator, via non-linear PDHG
:cite:`valkonen-2014-primal`. The example problem represents total
variation (TV) denoising applied to a complex image with piece-wise
smooth magnitude and non-smooth phase. The appropriate TV denoising
formulation for this problem is
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - \mathbf{x}
\|_2^2 + \lambda \| C(\mathbf{x}) \|_{2,1} \;,$$
where $\mathbf{y}$ is the measurement, $\|\cdot\|_{2,1}$ is the
$\ell_{2,1}$ mixed norm, and $C$ is a non-linear operator that applies a
linear difference operator to the magnitude of a complex array. The
standard TV solution, which is also computed for comparison purposes,
gives very poor results since the difference is applied independently to
real and imaginary components of the complex image.
"""
from mpl_toolkits.axes_grid1 import make_axes_locatable
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, operator, plot
from scico.examples import phase_diff
from scico.optimize import PDHG
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # image size
phantom = SiemensStar(16)
x_mag = snp.pad(discrete_phantom(phantom, N - 16), 8) + 1.0
x_mag /= x_mag.max()
# Create reference image with structured magnitude and random phase
x_gt = x_mag * snp.exp(-1j * scico.random.randn(x_mag.shape, seed=0)[0])
"""
Add noise to create a noisy test image.
"""
σ = 0.25 # noise standard deviation
noise, key = scico.random.randn(x_gt.shape, seed=1, dtype=snp.complex64)
y = x_gt + σ * noise
"""
Denoise with standard total variation.
"""
λ_tv = 6e-2
f = loss.SquaredL2Loss(y=y)
g = λ_tv * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, input_dtype=snp.complex64, append=0)
solver_tv = PDHG(
f=f,
g=g,
C=C,
tau=4e-1,
sigma=4e-1,
maxiter=200,
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x_tv = solver_tv.solve()
hist_tv = solver_tv.itstat_object.history(transpose=True)
"""
Denoise with total variation applied to the magnitude of a complex image.
"""
λ_nltv = 2e-1
g = λ_nltv * functional.L21Norm()
# Redefine C for real input (now applied to magnitude of a complex array)
C = linop.FiniteDifference(input_shape=x_gt.shape, input_dtype=snp.float32, append=0)
# Operator computing differences of absolute values
D = C @ operator.Abs(input_shape=x_gt.shape, input_dtype=snp.complex64)
solver_nltv = PDHG(
f=f,
g=g,
C=D,
tau=4e-1,
sigma=4e-1,
maxiter=200,
itstat_options={"display": True, "period": 10},
)
x_nltv = solver_nltv.solve()
hist_nltv = solver_nltv.itstat_object.history(transpose=True)
"""
Plot results.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack((hist_tv.Objective, hist_nltv.Objective)).T,
ptyp="semilogy",
title="Objective function",
xlbl="Iteration",
lgnd=("PDHG", "NL-PDHG"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist_tv.Prml_Rsdl, hist_nltv.Prml_Rsdl)).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Iteration",
lgnd=("PDHG", "NL-PDHG"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack((hist_tv.Dual_Rsdl, hist_nltv.Dual_Rsdl)).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Iteration",
lgnd=("PDHG", "NL-PDHG"),
fig=fig,
ax=ax[2],
)
fig.show()
fig, ax = plot.subplots(nrows=2, ncols=4, figsize=(20, 10))
norm = plot.matplotlib.colors.Normalize(
vmin=min(snp.abs(x_gt).min(), snp.abs(y).min(), snp.abs(x_tv).min(), snp.abs(x_nltv).min()),
vmax=max(snp.abs(x_gt).max(), snp.abs(y).max(), snp.abs(x_tv).max(), snp.abs(x_nltv).max()),
)
plot.imview(snp.abs(x_gt), title="Ground truth", cbar=None, fig=fig, ax=ax[0, 0], norm=norm)
plot.imview(
snp.abs(y),
title="Measured: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(y)),
cbar=None,
fig=fig,
ax=ax[0, 1],
norm=norm,
)
plot.imview(
snp.abs(x_tv),
title="TV: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(x_tv)),
cbar=None,
fig=fig,
ax=ax[0, 2],
norm=norm,
)
plot.imview(
snp.abs(x_nltv),
title="NL-TV: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(x_nltv)),
cbar=None,
fig=fig,
ax=ax[0, 3],
norm=norm,
)
divider = make_axes_locatable(ax[0, 3])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[0, 3].get_images()[0], cax=cax)
norm = plot.matplotlib.colors.Normalize(
vmin=min(snp.angle(x_gt).min(), snp.angle(x_tv).min(), snp.angle(x_nltv).min()),
vmax=max(snp.angle(x_gt).max(), snp.angle(x_tv).max(), snp.angle(x_nltv).max()),
)
plot.imview(
snp.angle(x_gt),
title="Ground truth",
cbar=None,
fig=fig,
ax=ax[1, 0],
norm=norm,
)
plot.imview(
snp.angle(y),
title="Measured: Mean phase diff. %.2f" % phase_diff(snp.angle(x_gt), snp.angle(y)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 1],
norm=norm,
)
plot.imview(
snp.angle(x_tv),
title="TV: Mean phase diff. %.2f" % phase_diff(snp.angle(x_gt), snp.angle(x_tv)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 2],
norm=norm,
)
plot.imview(
snp.angle(x_nltv),
title="NL-TV: Mean phase diff. %.2f" % phase_diff(snp.angle(x_gt), snp.angle(x_nltv)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 3],
norm=norm,
)
divider = make_axes_locatable(ax[1, 3])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[1, 3].get_images()[0], cax=cax)
ax[0, 0].set_ylabel("Magnitude")
ax[1, 0].set_ylabel("Phase")
fig.tight_layout()
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/denoise_cplx_tv_pdhg.py | 0.930387 | 0.929055 | denoise_cplx_tv_pdhg.py | pypi |
r"""
Complex Total Variation Denoising with NLPADMM Solver
=====================================================
This example demonstrates solution of a problem of the form
$$\argmin_{\mb{x}} \; f(\mb{x}) + g(\mb{z}) \; \text{such that}\;
H(\mb{x}, \mb{z}) = 0 \;,$$
where $H$ is a nonlinear function, via a variant of the proximal ADMM
algorithm for problems with a non-linear operator constraint
:cite:`benning-2016-preconditioned`. The example problem represents
total variation (TV) denoising applied to a complex image with
piece-wise smooth magnitude and non-smooth phase. (This example is rather
contrived, and was not constructed to represent a specific real imaging
problem, but it does have some properties in common with synthetic
aperture radar single look complex data in which the magnitude has much
more discernible structure than the phase.) The appropriate TV denoising
formulation for this problem is
$$\argmin_{\mb{x}} \; (1/2) \| \mb{y} - \mb{x} \|_2^2 + \lambda
\| C(\mb{x}) \|_{2,1} \;,$$
where $\mb{y}$ is the measurement, $\|\cdot\|_{2,1}$ is the
$\ell_{2,1}$ mixed norm, and $C$ is a non-linear operator consisting of
a linear difference operator applied to the magnitude of a complex array.
This problem is represented in the form above by taking $H(\mb{x},
\mb{z}) = C(\mb{x}) - \mb{z}$. The standard TV solution, which is
also computed for comparison purposes, gives very poor results since
the difference is applied independently to real and imaginary
components of the complex image.
"""
from mpl_toolkits.axes_grid1 import make_axes_locatable
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import function, functional, linop, loss, metric, operator, plot
from scico.examples import phase_diff
from scico.optimize import NonLinearPADMM, ProximalADMM
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # image size
phantom = SiemensStar(16)
x_mag = snp.pad(discrete_phantom(phantom, N - 16), 8) + 1.0
x_mag /= x_mag.max()
# Create reference image with structured magnitude and random phase
x_gt = x_mag * snp.exp(-1j * scico.random.randn(x_mag.shape, seed=0)[0])
"""
Add noise to create a noisy test image.
"""
σ = 0.25 # noise standard deviation
noise, key = scico.random.randn(x_gt.shape, seed=1, dtype=snp.complex64)
y = x_gt + σ * noise
"""
Denoise with standard total variation.
"""
λ_tv = 6e-2
f = loss.SquaredL2Loss(y=y)
g = λ_tv * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=y.shape, input_dtype=snp.complex64, append=0)
solver_tv = ProximalADMM(
f=f,
g=g,
A=C,
rho=1.0,
mu=8.0,
nu=1.0,
maxiter=200,
itstat_options={"display": True, "period": 20},
)
print(f"Solving on {device_info()}\n")
x_tv = solver_tv.solve()
print()
hist_tv = solver_tv.itstat_object.history(transpose=True)
"""
Denoise with total variation applied to the magnitude of a complex image.
"""
λ_nltv = 2e-1
g = λ_nltv * functional.L21Norm()
# Redefine C for real input (now applied to magnitude of a complex array)
C = linop.FiniteDifference(input_shape=y.shape, input_dtype=snp.float32, append=0)
# Operator computing differences of absolute values
D = C @ operator.Abs(input_shape=x_gt.shape, input_dtype=snp.complex64)
# Constraint function imposing z = D(x) constraint
H = function.Function(
(C.shape[1], C.shape[0]),
output_shape=C.shape[0],
eval_fn=lambda x, z: D(x) - z,
input_dtypes=(snp.complex64, snp.float32),
output_dtype=snp.float32,
)
solver_nltv = NonLinearPADMM(
f=f,
g=g,
H=H,
rho=5.0,
mu=6.0,
nu=1.0,
maxiter=200,
itstat_options={"display": True, "period": 20},
)
x_nltv = solver_nltv.solve()
hist_nltv = solver_nltv.itstat_object.history(transpose=True)
"""
Plot results.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack((hist_tv.Objective, hist_nltv.Objective)).T,
ptyp="semilogy",
title="Objective function",
xlbl="Iteration",
lgnd=("Standard TV", "Magnitude TV"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist_tv.Prml_Rsdl, hist_nltv.Prml_Rsdl)).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Iteration",
lgnd=("Standard TV", "Magnitude TV"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack((hist_tv.Dual_Rsdl, hist_nltv.Dual_Rsdl)).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Iteration",
lgnd=("Standard TV", "Magnitude TV"),
fig=fig,
ax=ax[2],
)
fig.show()
fig, ax = plot.subplots(nrows=2, ncols=4, figsize=(20, 10))
norm = plot.matplotlib.colors.Normalize(
vmin=min(snp.abs(x_gt).min(), snp.abs(y).min(), snp.abs(x_tv).min(), snp.abs(x_nltv).min()),
vmax=max(snp.abs(x_gt).max(), snp.abs(y).max(), snp.abs(x_tv).max(), snp.abs(x_nltv).max()),
)
plot.imview(snp.abs(x_gt), title="Ground truth", cbar=None, fig=fig, ax=ax[0, 0], norm=norm)
plot.imview(
snp.abs(y),
title="Measured: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(y)),
cbar=None,
fig=fig,
ax=ax[0, 1],
norm=norm,
)
plot.imview(
snp.abs(x_tv),
title="Standard TV: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(x_tv)),
cbar=None,
fig=fig,
ax=ax[0, 2],
norm=norm,
)
plot.imview(
snp.abs(x_nltv),
title="Magnitude TV: PSNR %.2f (dB)" % metric.psnr(snp.abs(x_gt), snp.abs(x_nltv)),
cbar=None,
fig=fig,
ax=ax[0, 3],
norm=norm,
)
divider = make_axes_locatable(ax[0, 3])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[0, 3].get_images()[0], cax=cax)
norm = plot.matplotlib.colors.Normalize(
vmin=min(snp.angle(x_gt).min(), snp.angle(x_tv).min(), snp.angle(x_nltv).min()),
vmax=max(snp.angle(x_gt).max(), snp.angle(x_tv).max(), snp.angle(x_nltv).max()),
)
plot.imview(
snp.angle(x_gt),
title="Ground truth",
cbar=None,
fig=fig,
ax=ax[1, 0],
norm=norm,
)
plot.imview(
snp.angle(y),
title="Measured: Mean phase diff. %.2f" % phase_diff(snp.angle(x_gt), snp.angle(y)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 1],
norm=norm,
)
plot.imview(
snp.angle(x_tv),
title="Standard TV: Mean phase diff. %.2f"
% phase_diff(snp.angle(x_gt), snp.angle(x_tv)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 2],
norm=norm,
)
plot.imview(
snp.angle(x_nltv),
title="Magnitude TV: Mean phase diff. %.2f"
% phase_diff(snp.angle(x_gt), snp.angle(x_nltv)).mean(),
cbar=None,
fig=fig,
ax=ax[1, 3],
norm=norm,
)
divider = make_axes_locatable(ax[1, 3])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[1, 3].get_images()[0], cax=cax)
ax[0, 0].set_ylabel("Magnitude")
ax[1, 0].set_ylabel("Phase")
fig.tight_layout()
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/denoise_cplx_tv_nlpadmm.py | 0.944523 | 0.931711 | denoise_cplx_tv_nlpadmm.py | pypi |
r"""
TV-Regularized Sparse-View CT Reconstruction
============================================
This example demonstrates solution of a sparse-view CT reconstruction
problem with isotropic total variation (TV) regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, $C$ is
a 2D finite difference operator, and $\mathbf{x}$ is the desired
image.
"""
import numpy as np
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.linop.radon_astra import TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 512 # phantom size
np.random.seed(1234)
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Configure CT projection operator and generate synthetic measurements.
"""
n_projection = 45 # number of projections
angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles
A = TomographicProjector(x_gt.shape, 1, N, angles) # Radon transform operator
y = A @ x_gt # sinogram
"""
Set up ADMM solver object.
"""
λ = 2e0 # L1 norm regularization parameter
ρ = 5e0 # ADMM penalty parameter
maxiter = 25 # number of ADMM iterations
cg_tol = 1e-4 # CG relative tolerance
cg_maxiter = 25 # maximum CG iterations per ADMM iteration
# The append=0 option makes the results of horizontal and vertical
# finite differences the same shape, which is required for the L21Norm,
# which is used so that g(Cx) corresponds to isotropic TV.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
g = λ * functional.L21Norm()
f = loss.SquaredL2Loss(y=y, A=A)
x0 = snp.clip(A.fbp(y), 0, 1.0)
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=x0,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 5},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
solver.solve()
hist = solver.itstat_object.history(transpose=True)
x_reconstruction = snp.clip(solver.x, 0, 1.0)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", cbar=None, fig=fig, ax=ax[0])
plot.imview(
x0,
title="FBP Reconstruction: \nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(x_gt, x0), metric.mae(x_gt, x0)),
cbar=None,
fig=fig,
ax=ax[1],
)
plot.imview(
x_reconstruction,
title="TV Reconstruction\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(x_gt, x_reconstruction), metric.mae(x_gt, x_reconstruction)),
fig=fig,
ax=ax[2],
)
divider = make_axes_locatable(ax[2])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[2].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_astra_tv_admm.py | 0.895323 | 0.928959 | ct_astra_tv_admm.py | pypi |
r"""
Deconvolution Microscopy (All Channels)
=======================================
This example partially replicates a [GlobalBioIm
example](https://biomedical-imaging-group.github.io/GlobalBioIm/examples.html)
using the [microscopy data](http://bigwww.epfl.ch/deconvolution/bio/)
provided by the EPFL Biomedical Imaging Group.
The deconvolution problem is solved using class
[admm.ADMM](../_autosummary/scico.optimize.rst#scico.optimize.ADMM) to
solve an image deconvolution problem with isotropic total variation (TV)
regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| M (\mathbf{y} - A \mathbf{x})
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} +
\iota_{\mathrm{NN}}(\mathbf{x}) \;,$$
where $M$ is a mask operator, $A$ is circular convolution,
$\mathbf{y}$ is the blurred image, $C$ is a convolutional gradient
operator, $\iota_{\mathrm{NN}}$ is the indicator function of the
non-negativity constraint, and $\mathbf{x}$ is the desired image.
"""
import numpy as np
import jax
import ray
import scico.numpy as snp
from scico import functional, linop, loss, plot
from scico.examples import downsample_volume, epfl_deconv_data, tile_volume_slices
from scico.optimize.admm import ADMM, CircularConvolveSolver
"""
Get and preprocess data. We downsample the data for the for purposes of
the example. Reducing the downsampling rate will make the example slower
and more memory-intensive. To run this example on a GPU it may be
necessary to set environment variables
`XLA_PYTHON_CLIENT_ALLOCATOR=platform` and
`XLA_PYTHON_CLIENT_PREALLOCATE=false`. If your GPU does not have enough
memory, you can try setting the environment variable
`JAX_PLATFORM_NAME=cpu` to run on CPU.
"""
downsampling_rate = 2
y_list = []
y_pad_list = []
psf_list = []
for channel in range(3):
y, psf = epfl_deconv_data(channel, verbose=True) # get data
y = downsample_volume(y, downsampling_rate) # downsample
psf = downsample_volume(psf, downsampling_rate)
y -= y.min() # normalize y
y /= y.max()
psf /= psf.sum() # normalize psf
if channel == 0:
padding = [[0, p] for p in snp.array(psf.shape) - 1]
mask = snp.pad(snp.ones_like(y), padding)
y_pad = snp.pad(y, padding) # zero-padded version of y
y_list.append(y)
y_pad_list.append(y_pad)
psf_list.append(psf)
y = snp.stack(y_list, axis=-1)
yshape = y.shape
del y_list
"""
Define problem and algorithm parameters.
"""
λ = 2e-6 # ℓ1 norm regularization parameter
ρ0 = 1e-3 # ADMM penalty parameter for first auxiliary variable
ρ1 = 1e-3 # ADMM penalty parameter for second auxiliary variable
ρ2 = 1e-3 # ADMM penalty parameter for third auxiliary variable
maxiter = 100 # number of ADMM iterations
"""
Initialize ray, determine available computing resources, and put large arrays
in object store.
"""
ray.init()
ngpu = 0
ar = ray.available_resources()
ncpu = max(int(ar["CPU"]) // 3, 1)
if "GPU" in ar:
ngpu = int(ar["GPU"]) // 3
print(f"Running on {ncpu} CPUs and {ngpu} GPUs per process")
y_pad_list = ray.put(y_pad_list)
psf_list = ray.put(psf_list)
mask_store = ray.put(mask)
"""
Define ray remote function for parallel solves.
"""
@ray.remote(num_cpus=ncpu, num_gpus=ngpu)
def deconvolve_channel(channel):
"""Deconvolve a single channel."""
y_pad = jax.device_put(ray.get(y_pad_list)[channel])
psf = jax.device_put(ray.get(psf_list)[channel])
mask = jax.device_put(ray.get(mask_store))
M = linop.Diagonal(mask)
C0 = linop.CircularConvolve(
h=psf, input_shape=mask.shape, h_center=snp.array(psf.shape) / 2 - 0.5 # forward operator
)
C1 = linop.FiniteDifference(input_shape=mask.shape, circular=True) # gradient operator
C2 = linop.Identity(mask.shape) # identity operator
g0 = loss.SquaredL2Loss(y=y_pad, A=M) # loss function (forward model)
g1 = λ * functional.L21Norm() # TV penalty (when applied to gradient)
g2 = functional.NonNegativeIndicator() # non-negativity constraint
if channel == 0:
print("Displaying solver status for channel 0")
display = True
else:
display = False
solver = ADMM(
f=None,
g_list=[g0, g1, g2],
C_list=[C0, C1, C2],
rho_list=[ρ0, ρ1, ρ2],
maxiter=maxiter,
itstat_options={"display": display, "period": 10, "overwrite": False},
x0=y_pad,
subproblem_solver=CircularConvolveSolver(),
)
x_pad = solver.solve()
x = x_pad[: yshape[0], : yshape[1], : yshape[2]]
return (x, solver.itstat_object.history(transpose=True))
"""
Solve problems for all three channels in parallel and extract results.
"""
ray_return = ray.get([deconvolve_channel.remote(channel) for channel in range(3)])
x = snp.stack([t[0] for t in ray_return], axis=-1)
solve_stats = [t[1] for t in ray_return]
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(14, 7))
plot.imview(tile_volume_slices(y), title="Blurred measurements", fig=fig, ax=ax[0])
plot.imview(tile_volume_slices(x), title="Deconvolved image", fig=fig, ax=ax[1])
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(18, 5))
plot.plot(
np.stack([s.Objective for s in solve_stats]).T,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
lgnd=("CY3", "DAPI", "FITC"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.stack([s.Prml_Rsdl for s in solve_stats]).T,
ptyp="semilogy",
title="Primal Residual",
xlbl="Iteration",
lgnd=("CY3", "DAPI", "FITC"),
fig=fig,
ax=ax[1],
)
plot.plot(
np.stack([s.Dual_Rsdl for s in solve_stats]).T,
ptyp="semilogy",
title="Dual Residual",
xlbl="Iteration",
lgnd=("CY3", "DAPI", "FITC"),
fig=fig,
ax=ax[2],
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_microscopy_allchn_tv_admm.py | 0.931346 | 0.900573 | deconv_microscopy_allchn_tv_admm.py | pypi |
r"""
Circulant Blur Image Deconvolution with TV Regularization
=========================================================
This example demonstrates the solution of an image deconvolution problem
with isotropic total variation (TV) regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $A$ is a circular convolution operator, $\mathbf{y}$ is the blurred
image, $C$ is a 2D finite difference operator, and $\mathbf{x}$ is the
deconvolved image.
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.optimize.admm import ADMM, CircularConvolveSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Set up the forward operator and create a test signal consisting of a
blurred signal with additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.CircularConvolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = scico.random.randn(Ax.shape, seed=0)
y = Ax + σ * noise
"""
Set up an ADMM solver object.
"""
λ = 2e-2 # L21 norm regularization parameter
ρ = 5e-1 # ADMM penalty parameter
maxiter = 50 # number of ADMM iterations
f = loss.SquaredL2Loss(y=y, A=A)
# Penalty parameters must be accounted for in the gi functions, not as
# additional inputs.
g = λ * functional.L21Norm() # regularization functionals gi
C = linop.FiniteDifference(x_gt.shape, circular=True)
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.adj(y),
maxiter=maxiter,
subproblem_solver=CircularConvolveSolver(),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, y), fig=fig, ax=ax[1])
plot.imview(x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, x), fig=fig, ax=ax[2])
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_circ_tv_admm.py | 0.932122 | 0.933613 | deconv_circ_tv_admm.py | pypi |
import numpy as np
import jax
import matplotlib.pyplot as plt
import svmbir
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, metric, plot
from scico.linop import Diagonal
from scico.linop.radon_svmbir import SVMBIRSquaredL2Loss, TomographicProjector
from scico.optimize import PDHG, LinearizedADMM
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Generate a ground truth image.
"""
N = 256 # image size
density = 0.025 # attenuation density of the image
np.random.seed(1234)
x_gt = discrete_phantom(Foam(size_range=[0.05, 0.02], gap=0.02, porosity=0.3), size=N - 10)
x_gt = x_gt / np.max(x_gt) * density
x_gt = np.pad(x_gt, 5)
x_gt[x_gt < 0] = 0
"""
Generate tomographic projector and sinogram.
"""
num_angles = int(N / 2)
num_channels = N
angles = snp.linspace(0, snp.pi, num_angles, dtype=snp.float32)
A = TomographicProjector(x_gt.shape, angles, num_channels)
sino = A @ x_gt
"""
Impose Poisson noise on sinogram. Higher max_intensity means less noise.
"""
max_intensity = 2000
expected_counts = max_intensity * np.exp(-sino)
noisy_counts = np.random.poisson(expected_counts).astype(np.float32)
noisy_counts[noisy_counts == 0] = 1 # deal with 0s
y = -np.log(noisy_counts / max_intensity)
"""
Reconstruct using default prior of SVMBIR :cite:`svmbir-2020`.
"""
weights = svmbir.calc_weights(y, weight_type="transmission")
x_mrf = svmbir.recon(
np.array(y[:, np.newaxis]),
np.array(angles),
weights=weights[:, np.newaxis],
num_rows=N,
num_cols=N,
positivity=True,
verbose=0,
)[0]
"""
Set up problem.
"""
y, x0, weights = jax.device_put([y, x_mrf, weights])
λ = 1e-1 # L1 norm regularization parameter
f = SVMBIRSquaredL2Loss(y=y, A=A, W=Diagonal(weights), scale=0.5)
g = λ * functional.L21Norm() # regularization functional
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
"""
Solve via ADMM.
"""
solve_admm = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[2e1],
x0=x0,
maxiter=50,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-4, "maxiter": 10}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x_admm = solve_admm.solve()
hist_admm = solve_admm.itstat_object.history(transpose=True)
print(f"PSNR: {metric.psnr(x_gt, x_admm):.2f} dB\n")
"""
Solve via Linearized ADMM.
"""
solver_ladmm = LinearizedADMM(
f=f,
g=g,
C=C,
mu=3e-2,
nu=2e-1,
x0=x0,
maxiter=50,
itstat_options={"display": True, "period": 10},
)
x_ladmm = solver_ladmm.solve()
hist_ladmm = solver_ladmm.itstat_object.history(transpose=True)
print(f"PSNR: {metric.psnr(x_gt, x_ladmm):.2f} dB\n")
"""
Solve via PDHG.
"""
solver_pdhg = PDHG(
f=f,
g=g,
C=C,
tau=2e-2,
sigma=8e0,
x0=x0,
maxiter=50,
itstat_options={"display": True, "period": 10},
)
x_pdhg = solver_pdhg.solve()
hist_pdhg = solver_pdhg.itstat_object.history(transpose=True)
print(f"PSNR: {metric.psnr(x_gt, x_pdhg):.2f} dB\n")
"""
Show the recovered images.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1 * density, vmax=1.2 * density)
fig, ax = plt.subplots(1, 2, figsize=[10, 5])
plot.imview(img=x_gt, title="Ground Truth Image", cbar=True, fig=fig, ax=ax[0], norm=norm)
plot.imview(
img=x_mrf,
title=f"MRF (PSNR: {metric.psnr(x_gt, x_mrf):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1],
norm=norm,
)
fig.show()
fig, ax = plt.subplots(1, 3, figsize=[15, 5])
plot.imview(
img=x_admm,
title=f"TV ADMM (PSNR: {metric.psnr(x_gt, x_admm):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[0],
norm=norm,
)
plot.imview(
img=x_ladmm,
title=f"TV LinADMM (PSNR: {metric.psnr(x_gt, x_ladmm):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1],
norm=norm,
)
plot.imview(
img=x_pdhg,
title=f"TV PDHG (PSNR: {metric.psnr(x_gt, x_pdhg):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[2],
norm=norm,
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack((hist_admm.Objective, hist_ladmm.Objective, hist_pdhg.Objective)).T,
ptyp="semilogy",
title="Objective function",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist_admm.Prml_Rsdl, hist_ladmm.Prml_Rsdl, hist_pdhg.Prml_Rsdl)).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack((hist_admm.Dual_Rsdl, hist_ladmm.Dual_Rsdl, hist_pdhg.Dual_Rsdl)).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[2],
)
fig.show()
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack((hist_admm.Objective, hist_ladmm.Objective, hist_pdhg.Objective)).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Objective function",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist_admm.Prml_Rsdl, hist_ladmm.Prml_Rsdl, hist_pdhg.Prml_Rsdl)).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack((hist_admm.Dual_Rsdl, hist_ladmm.Dual_Rsdl, hist_pdhg.Dual_Rsdl)).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[2],
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_svmbir_tv_multi.py | 0.772144 | 0.544378 | ct_svmbir_tv_multi.py | pypi |
r"""
Deconvolution Microscopy (Single Channel)
=========================================
This example partially replicates a [GlobalBioIm
example](https://biomedical-imaging-group.github.io/GlobalBioIm/examples.html)
using the [microscopy data](http://bigwww.epfl.ch/deconvolution/bio/)
provided by the EPFL Biomedical Imaging Group.
The deconvolution problem is solved using class
[admm.ADMM](../_autosummary/scico.optimize.rst#scico.optimize.ADMM) to
solve an image deconvolution problem with isotropic total variation (TV)
regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| M (\mathbf{y} - A \mathbf{x})
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} +
\iota_{\mathrm{NN}}(\mathbf{x}) \;,$$
where $M$ is a mask operator, $A$ is circular convolution,
$\mathbf{y}$ is the blurred image, $C$ is a convolutional gradient
operator, $\iota_{\mathrm{NN}}$ is the indicator function of the
non-negativity constraint, and $\mathbf{x}$ is the desired image.
"""
import scico.numpy as snp
from scico import functional, linop, loss, plot, util
from scico.examples import downsample_volume, epfl_deconv_data, tile_volume_slices
from scico.optimize.admm import ADMM, CircularConvolveSolver
"""
Get and preprocess data. We downsample the data for the for purposes of
the example. Reducing the downsampling rate will make the example slower
and more memory-intensive. To run this example on a GPU it may be
necessary to set environment variables
`XLA_PYTHON_CLIENT_ALLOCATOR=platform` and
`XLA_PYTHON_CLIENT_PREALLOCATE=false`. If your GPU does not have enough
memory, you can try setting the environment variable
`JAX_PLATFORM_NAME=cpu` to run on CPU.
"""
channel = 0
downsampling_rate = 2
y, psf = epfl_deconv_data(channel, verbose=True)
y = downsample_volume(y, downsampling_rate)
psf = downsample_volume(psf, downsampling_rate)
y -= y.min()
y /= y.max()
psf /= psf.sum()
"""
Pad data and create mask.
"""
padding = [[0, p] for p in snp.array(psf.shape) - 1]
y_pad = snp.pad(y, padding)
mask = snp.pad(snp.ones_like(y), padding)
"""
Define problem and algorithm parameters.
"""
λ = 2e-6 # ℓ1 norm regularization parameter
ρ0 = 1e-3 # ADMM penalty parameter for first auxiliary variable
ρ1 = 1e-3 # ADMM penalty parameter for second auxiliary variable
ρ2 = 1e-3 # ADMM penalty parameter for third auxiliary variable
maxiter = 100 # number of ADMM iterations
"""
Create operators.
"""
M = linop.Diagonal(mask)
C0 = linop.CircularConvolve(h=psf, input_shape=mask.shape, h_center=snp.array(psf.shape) / 2 - 0.5)
C1 = linop.FiniteDifference(input_shape=mask.shape, circular=True)
C2 = linop.Identity(mask.shape)
"""
Create functionals.
"""
g0 = loss.SquaredL2Loss(y=y_pad, A=M) # loss function (forward model)
g1 = λ * functional.L21Norm() # TV penalty (when applied to gradient)
g2 = functional.NonNegativeIndicator() # non-negativity constraint
"""
Set up ADMM solver object and solve problem.
"""
solver = ADMM(
f=None,
g_list=[g0, g1, g2],
C_list=[C0, C1, C2],
rho_list=[ρ0, ρ1, ρ2],
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
x0=y_pad,
subproblem_solver=CircularConvolveSolver(),
)
print("Solving on %s\n" % util.device_info())
solver.solve()
solve_stats = solver.itstat_object.history(transpose=True)
x_pad = solver.x
x = x_pad[: y.shape[0], : y.shape[1], : y.shape[2]]
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(14, 7))
plot.imview(tile_volume_slices(y), title="Blurred measurements", fig=fig, ax=ax[0])
plot.imview(tile_volume_slices(x), title="Deconvolved image", fig=fig, ax=ax[1])
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
solve_stats.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((solve_stats.Prml_Rsdl, solve_stats.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_microscopy_tv_admm.py | 0.917261 | 0.936168 | deconv_microscopy_tv_admm.py | pypi |
import numpy as np
import jax
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot, random
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
np.random.seed(1234)
N = 512 # image size
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
"""
Set up forward operator and test signal consisting of blurred signal with
additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = random.randn(Ax.shape)
y = Ax + σ * noise
"""
Set up ADMM solver.
"""
f = loss.SquaredL2Loss(y=y, A=A)
C = linop.Identity(x_gt.shape)
λ = 20.0 / 255 # BM3D regularization strength
g = λ * functional.BM3D()
ρ = 1.0 # ADMM penalty parameter
maxiter = 10 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.T @ y,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
x = snp.clip(x, 0, 1)
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = snp.clip(y[nc:-nc, nc:-nc], 0, 1)
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, yc), fig=fig, ax=ax[1])
plot.imview(x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, x), fig=fig, ax=ax[2])
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_ppp_bm3d_admm.py | 0.81372 | 0.526282 | deconv_ppp_bm3d_admm.py | pypi |
r"""
3D TV-Regularized Sparse-View CT Reconstruction
===============================================
This example demonstrates solution of a sparse-view, 3D CT
reconstruction problem with isotropic total variation (TV)
regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, $C$ is
a 3D finite difference operator, and $\mathbf{x}$ is the desired
image.
"""
import numpy as np
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scico import functional, linop, loss, metric, plot
from scico.examples import create_tangle_phantom
from scico.linop.radon_astra import TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image and projector.
"""
Nx = 128
Ny = 256
Nz = 64
tangle = create_tangle_phantom(Nx, Ny, Nz)
tangle = jax.device_put(tangle)
n_projection = 10 # number of projections
angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles
A = TomographicProjector(
tangle.shape, [1.0, 1.0], [Nz, max(Nx, Ny)], angles
) # Radon transform operator
y = A @ tangle # sinogram
"""
Set up ADMM solver object.
"""
λ = 2e0 # L1 norm regularization parameter
ρ = 5e0 # ADMM penalty parameter
maxiter = 25 # number of ADMM iterations
cg_tol = 1e-4 # CG relative tolerance
cg_maxiter = 25 # maximum CG iterations per ADMM iteration
# The append=0 option makes the results of horizontal and vertical
# finite differences the same shape, which is required for the L21Norm,
# which is used so that g(Cx) corresponds to isotropic TV.
C = linop.FiniteDifference(input_shape=tangle.shape, append=0)
g = λ * functional.L21Norm()
f = loss.SquaredL2Loss(y=y, A=A)
x0 = A.T(y)
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=x0,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 5},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
solver.solve()
hist = solver.itstat_object.history(transpose=True)
tangle_recon = solver.x
print(
"TV Restruction\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(tangle, tangle_recon), metric.mae(tangle, tangle_recon))
)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(7, 5))
plot.imview(tangle[32], title="Ground truth (central slice)", cbar=None, fig=fig, ax=ax[0])
plot.imview(
tangle_recon[32],
title="TV Reconstruction (central slice)\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(tangle, tangle_recon), metric.mae(tangle, tangle_recon)),
fig=fig,
ax=ax[1],
)
divider = make_axes_locatable(ax[1])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[1].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_astra_3d_tv_admm.py | 0.920348 | 0.948775 | ct_astra_3d_tv_admm.py | pypi |
r"""
Video Decomposition via Robust PCA
==================================
This example demonstrates video foreground/background separation via a
variant of the Robust PCA problem
$$\mathrm{argmin}_{\mathbf{x}_0, \mathbf{x}_1} \; (1/2) \| \mathbf{x}_0
+ \mathbf{x}_1 - \mathbf{y} \|_2^2 + \lambda_0 \| \mathbf{x}_0 \|_*
+ \lambda_1 \| \mathbf{x}_1 \|_1 \;,$$
where $\mathbf{x}_0$ and $\mathbf{x}_1$ are respectively low-rank and
sparse components, $\| \cdot \|_*$ denotes the nuclear norm, and
$\| \cdot \|_1$ denotes the $\ell_1$ norm.
Note: while video foreground/background separation is not an example of
the scientific and computational imaging problems that are the focus of
SCICO, it provides a convenient demonstration of Robust PCA, which does
have potential application in scientific imaging problems.
"""
import imageio
import scico.numpy as snp
from scico import functional, linop, loss, plot
from scico.examples import rgb2gray
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Load example video.
"""
reader = imageio.get_reader("imageio:newtonscradle.gif")
nfrm = reader.get_length()
frmlst = []
for i, frm in enumerate(reader):
frmlst.append(rgb2gray(frm[..., 0:3].astype(snp.float32) / 255.0))
vid = snp.stack(frmlst, axis=2)
"""
Construct matrix with each column consisting of a vectorised video frame.
"""
y = vid.reshape((-1, vid.shape[-1]))
"""
Define functional for Robust PCA problem.
"""
A = linop.Sum(axis=0, input_shape=(2,) + y.shape)
f = loss.SquaredL2Loss(y=y, A=A)
C0 = linop.Slice(idx=0, input_shape=(2,) + y.shape)
g0 = functional.NuclearNorm()
C1 = linop.Slice(idx=1, input_shape=(2,) + y.shape)
g1 = functional.L1Norm()
"""
Set up an ADMM solver object.
"""
λ0 = 1e1 # nuclear norm regularization parameter
λ1 = 3e1 # l1 norm regularization parameter
ρ0 = 2e1 # ADMM penalty parameter
ρ1 = 2e1 # ADMM penalty parameter
maxiter = 50 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[λ0 * g0, λ1 * g1],
C_list=[C0, C1],
rho_list=[ρ0, ρ1],
x0=A.adj(y),
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
"""
Reshape low-rank component as background video sequence and sparse component
as foreground video sequence.
"""
xlr = C0(x)
xsp = C1(x)
vbg = xlr.reshape(vid.shape)
vfg = xsp.reshape(vid.shape)
"""
Display original video frames and corresponding background and foreground frames.
"""
fig, ax = plot.subplots(nrows=4, ncols=3, figsize=(10, 10))
ax[0][0].set_title("Original")
ax[0][1].set_title("Background")
ax[0][2].set_title("Foreground")
for n, fn in enumerate(range(1, 9, 2)):
plot.imview(vid[..., fn], fig=fig, ax=ax[n][0])
plot.imview(vbg[..., fn], fig=fig, ax=ax[n][1])
plot.imview(vfg[..., fn], fig=fig, ax=ax[n][2])
ax[n][0].set_ylabel("Frame %d" % fn, labelpad=5, rotation=90, size="large")
fig.tight_layout()
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/video_rpca_admm.py | 0.89197 | 0.965803 | video_rpca_admm.py | pypi |
import numpy as np
import jax
from bm3d import bm3d_rgb
from colour_demosaicing import demosaicing_CFA_Bayer_Menon2007
import scico
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.data import kodim23
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Read a ground truth image.
"""
img = kodim23(asfloat=True)[160:416, 60:316]
img = jax.device_put(img) # convert to jax type, push to GPU
"""
Define demosaicing forward operator and its transpose.
"""
def Afn(x):
"""Map an RGB image to a single channel image with each pixel
representing a single colour according to the colour filter array.
"""
y = snp.zeros(x.shape[0:2])
y = y.at[1::2, 1::2].set(x[1::2, 1::2, 0])
y = y.at[0::2, 1::2].set(x[0::2, 1::2, 1])
y = y.at[1::2, 0::2].set(x[1::2, 0::2, 1])
y = y.at[0::2, 0::2].set(x[0::2, 0::2, 2])
return y
def ATfn(x):
"""Back project a single channel raw image to an RGB image with zeros
at the locations of undefined samples.
"""
y = snp.zeros(x.shape + (3,))
y = y.at[1::2, 1::2, 0].set(x[1::2, 1::2])
y = y.at[0::2, 1::2, 1].set(x[0::2, 1::2])
y = y.at[1::2, 0::2, 1].set(x[1::2, 0::2])
y = y.at[0::2, 0::2, 2].set(x[0::2, 0::2])
return y
"""
Define a baseline demosaicing function based on the demosaicing
algorithm of :cite:`menon-2007-demosaicing` from package
[colour_demosaicing](https://github.com/colour-science/colour-demosaicing).
"""
def demosaic(cfaimg):
"""Apply baseline demosaicing."""
return demosaicing_CFA_Bayer_Menon2007(cfaimg, pattern="BGGR").astype(np.float32)
"""
Create a test image by color filter array sampling and adding Gaussian
white noise.
"""
s = Afn(img)
rgbshp = s.shape + (3,) # shape of reconstructed RGB image
σ = 2e-2 # noise standard deviation
noise, key = scico.random.randn(s.shape, seed=0)
sn = s + σ * noise
"""
Compute a baseline demosaicing solution.
"""
imgb = jax.device_put(bm3d_rgb(demosaic(sn), 3 * σ).astype(np.float32))
"""
Set up an ADMM solver object. Note the use of the baseline solution
as an initializer. We use BM3D :cite:`dabov-2008-image` as the
denoiser, using the [code](https://pypi.org/project/bm3d) released
with :cite:`makinen-2019-exact`.
"""
A = linop.LinearOperator(input_shape=rgbshp, output_shape=s.shape, eval_fn=Afn, adj_fn=ATfn)
f = loss.SquaredL2Loss(y=sn, A=A)
C = linop.Identity(input_shape=rgbshp)
g = 1.8e-1 * 6.1e-2 * functional.BM3D(is_rgb=True)
ρ = 1.8e-1 # ADMM penalty parameter
maxiter = 12 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=imgb,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show reference and demosaiced images.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=True, figsize=(21, 7))
plot.imview(img, title="Reference", fig=fig, ax=ax[0])
plot.imview(imgb, title="Baseline demoisac: %.2f (dB)" % metric.psnr(img, imgb), fig=fig, ax=ax[1])
plot.imview(x, title="PPP demoisac: %.2f (dB)" % metric.psnr(img, x), fig=fig, ax=ax[2])
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/demosaic_ppp_bm3d_admm.py | 0.865835 | 0.614654 | demosaic_ppp_bm3d_admm.py | pypi |
import numpy as np
import jax
import matplotlib.pyplot as plt
import svmbir
from matplotlib.ticker import MaxNLocator
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import metric, plot
from scico.functional import BM3D, NonNegativeIndicator
from scico.linop import Diagonal, Identity
from scico.linop.radon_svmbir import (
SVMBIRExtendedLoss,
SVMBIRSquaredL2Loss,
TomographicProjector,
)
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Generate a ground truth image.
"""
N = 256 # image size
density = 0.025 # attenuation density of the image
np.random.seed(1234)
x_gt = discrete_phantom(Foam(size_range=[0.05, 0.02], gap=0.02, porosity=0.3), size=N - 10)
x_gt = x_gt / np.max(x_gt) * density
x_gt = np.pad(x_gt, 5)
x_gt[x_gt < 0] = 0
"""
Generate tomographic projector and sinogram.
"""
num_angles = int(N / 2)
num_channels = N
angles = snp.linspace(0, snp.pi, num_angles, endpoint=False, dtype=snp.float32)
A = TomographicProjector(x_gt.shape, angles, num_channels)
sino = A @ x_gt
"""
Impose Poisson noise on sinogram. Higher max_intensity means less noise.
"""
max_intensity = 2000
expected_counts = max_intensity * np.exp(-sino)
noisy_counts = np.random.poisson(expected_counts).astype(np.float32)
noisy_counts[noisy_counts == 0] = 1 # deal with 0s
y = -np.log(noisy_counts / max_intensity)
"""
Reconstruct using default prior of SVMBIR :cite:`svmbir-2020`.
"""
weights = svmbir.calc_weights(y, weight_type="transmission")
x_mrf = svmbir.recon(
np.array(y[:, np.newaxis]),
np.array(angles),
weights=weights[:, np.newaxis],
num_rows=N,
num_cols=N,
positivity=True,
verbose=0,
)[0]
"""
Push arrays to device.
"""
y, x0, weights = jax.device_put([y, x_mrf, weights])
"""
Set problem parameters and BM3D pseudo-functional.
"""
ρ = 10 # ADMM penalty parameter
σ = density * 0.26 # denoiser sigma
g0 = σ * ρ * BM3D()
"""
Set up problem using `SVMBIRSquaredL2Loss` and `NonNegativeIndicator`.
"""
f_l2loss = SVMBIRSquaredL2Loss(
y=y, A=A, W=Diagonal(weights), scale=0.5, prox_kwargs={"maxiter": 5, "ctol": 0.0}
)
g1 = NonNegativeIndicator()
solver_l2loss = ADMM(
f=None,
g_list=[f_l2loss, g0, g1],
C_list=[Identity(x_mrf.shape), Identity(x_mrf.shape), Identity(x_mrf.shape)],
rho_list=[ρ, ρ, ρ],
x0=x0,
maxiter=20,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the ADMM solver.
"""
print(f"Solving on {device_info()}\n")
x_l2loss = solver_l2loss.solve()
hist_l2loss = solver_l2loss.itstat_object.history(transpose=True)
"""
Set up problem using `SVMBIRExtendedLoss`, without need for `NonNegativeIndicator`.
"""
f_extloss = SVMBIRExtendedLoss(
y=y,
A=A,
W=Diagonal(weights),
scale=0.5,
positivity=True,
prox_kwargs={"maxiter": 5, "ctol": 0.0},
)
solver_extloss = ADMM(
f=None,
g_list=[f_extloss, g0],
C_list=[Identity(x_mrf.shape), Identity(x_mrf.shape)],
rho_list=[ρ, ρ],
x0=x0,
maxiter=20,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the ADMM solver.
"""
print()
x_extloss = solver_extloss.solve()
hist_extloss = solver_extloss.itstat_object.history(transpose=True)
"""
Show the recovered images.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1 * density, vmax=1.2 * density)
fig, ax = plt.subplots(2, 2, figsize=(15, 15))
plot.imview(img=x_gt, title="Ground Truth Image", cbar=True, fig=fig, ax=ax[0, 0], norm=norm)
plot.imview(
img=x_mrf,
title=f"MRF (PSNR: {metric.psnr(x_gt, x_mrf):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[0, 1],
norm=norm,
)
plot.imview(
img=x_l2loss,
title=f"SquaredL2Loss + non-negativity (PSNR: {metric.psnr(x_gt, x_l2loss):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1, 0],
norm=norm,
)
plot.imview(
img=x_extloss,
title=f"ExtendedLoss (PSNR: {metric.psnr(x_gt, x_extloss):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1, 1],
norm=norm,
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
plot.plot(
snp.vstack((hist_l2loss.Prml_Rsdl, hist_l2loss.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals (SquaredL2Loss + non-negativity)",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[0],
)
ax[0].set_ylim([5e-3, 1e0])
ax[0].xaxis.set_major_locator(MaxNLocator(integer=True))
plot.plot(
snp.vstack((hist_extloss.Prml_Rsdl, hist_extloss.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals (ExtendedLoss)",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
ax[1].set_ylim([5e-3, 1e0])
ax[1].xaxis.set_major_locator(MaxNLocator(integer=True))
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_svmbir_ppp_bm3d_admm_prox.py | 0.793466 | 0.509032 | ct_svmbir_ppp_bm3d_admm_prox.py | pypi |
r"""
Convolutional Sparse Coding with Mask Decoupling (ADMM)
=======================================================
This example demonstrates the solution of a convolutional sparse coding
problem
$$\mathrm{argmin}_{\mathbf{x}} \; \frac{1}{2} \Big\| \mathbf{y} -
B \Big( \sum_k \mathbf{h}_k \ast \mathbf{x}_k \Big) \Big\|_2^2 +
\lambda \sum_k ( \| \mathbf{x}_k \|_1 - \| \mathbf{x}_k \|_2 ) \;,$$
where the $\mathbf{h}$_k is a set of filters comprising the dictionary,
the $\mathbf{x}$_k is a corrresponding set of coefficient maps,
$\mathbf{y}$ is the signal to be represented, and $B$ is a cropping
operator that allows the boundary artifacts resulting from circular
convolution to be avoided. Following the mask decoupling approach
:cite:`almeida-2013-deconvolving`, the problem is posed in ADMM form
as
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}_0, \mathbf{z}_1} \; (1/2) \|
\mathbf{y} - B \mb{z}_0 \|_2^2 + \lambda \sum_k ( \| \mathbf{z}_{1,k}
\|_1 - \| \mathbf{z}_{1,k} \|_2 ) \\ \;\; \text{s.t.} \;\;
\mathbf{z}_0 = \sum_k \mathbf{h}_k \ast \mathbf{x}_k \;\;
\mathbf{z}_{1,k} = \mathbf{x}_k\;,$$.
The most computationally expensive step in the ADMM algorithm is solved
using the frequency-domain approach proposed in
:cite:`wohlberg-2014-efficient`.
"""
import numpy as np
import jax
import scico.numpy as snp
from scico import plot
from scico.examples import create_conv_sparse_phantom
from scico.functional import L1MinusL2Norm, ZeroFunctional
from scico.linop import CircularConvolve, Crop, Identity, Sum
from scico.loss import SquaredL2Loss
from scico.optimize.admm import ADMM, G0BlockCircularConvolveSolver
from scico.util import device_info
"""
Set problem size and create random convolutional dictionary (a set of
filters) and a corresponding sparse random set of coefficient maps.
"""
N = 121 # image size
Nnz = 128 # number of non-zeros in coefficient maps
h, x0 = create_conv_sparse_phantom(N, Nnz)
"""
Normalize dictionary filters and scale coefficient maps accordingly.
"""
hnorm = np.sqrt(np.sum(h**2, axis=(1, 2), keepdims=True))
h /= hnorm
x0 *= hnorm
"""
Convert numpy arrays to jax arrays.
"""
h = jax.device_put(h)
x0 = jax.device_put(x0)
"""
Set up required padding and corresponding crop operator.
"""
h_center = (h.shape[1] // 2, h.shape[2] // 2)
pad_width = ((0, 0), (h_center[0], h_center[0]), (h_center[1], h_center[1]))
x0p = snp.pad(x0, pad_width=pad_width)
B = Crop(pad_width[1:], input_shape=x0p.shape[1:])
"""
Set up sum-of-convolutions forward operator.
"""
C = CircularConvolve(h, input_shape=x0p.shape, ndims=2, h_center=h_center)
S = Sum(input_shape=C.output_shape, axis=0)
A = S @ C
"""
Construct test image from dictionary $\mathbf{h}$ and padded version of
coefficient maps $\mathbf{x}_0$.
"""
y = B(A(x0p))
"""
Set functional and solver parameters.
"""
λ = 1e0 # l1-l2 norm regularization parameter
ρ0 = 1e0 # ADMM penalty parameters
ρ1 = 3e0
maxiter = 200 # number of ADMM iterations
"""
Define loss function and regularization. Note the use of the
$\ell_1 - \ell_2$ norm, which has been found to provide slightly better
performance than the $\ell_1$ norm in this type of problem
:cite:`wohlberg-2021-psf`.
"""
f = ZeroFunctional()
g0 = SquaredL2Loss(y=y, A=B)
g1 = λ * L1MinusL2Norm()
C0 = A
C1 = Identity(input_shape=x0p.shape)
"""
Initialize ADMM solver.
"""
solver = ADMM(
f=f,
g_list=[g0, g1],
C_list=[C0, C1],
rho_list=[ρ0, ρ1],
alpha=1.8,
maxiter=maxiter,
subproblem_solver=G0BlockCircularConvolveSolver(check_solve=True),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x1 = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered coefficient maps.
"""
fig, ax = plot.subplots(nrows=2, ncols=3, figsize=(12, 8.6))
plot.imview(x0[0], title="Coef. map 0", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0])
ax[0, 0].set_ylabel("Ground truth")
plot.imview(x0[1], title="Coef. map 1", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(x0[2], title="Coef. map 2", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 2])
plot.imview(x1[0], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 0])
ax[1, 0].set_ylabel("Recovered")
plot.imview(x1[1], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 1])
plot.imview(x1[2], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 2])
fig.tight_layout()
fig.show()
"""
Show test image and reconstruction from recovered coefficient maps. Note
the absence of the wrap-around effects at the boundary that can be seen
in the corresponding images in the [related example](sparsecode_conv_admm.rst).
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 6))
plot.imview(y, title="Test image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[0])
plot.imview(B(A(x1)), title="Reconstructed image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[1])
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/sparsecode_conv_md_admm.py | 0.954542 | 0.972727 | sparsecode_conv_md_admm.py | pypi |
r"""
TV-Regularized 3D DiffuserCam Reconstruction
============================================
This example demonstrates reconstruction of a 3D DiffuserCam
:cite:`antipa-2018-diffusercam`
[dataset](https://github.com/Waller-Lab/DiffuserCam/tree/master/example_data).
The inverse problem can be written as
$$\mathrm{argmin}_{\mathbf{x}} \; \frac{1}{2} \Big\| \mathbf{y} -
M \Big( \sum_k \mathbf{h}_k \ast \mathbf{x}_k \Big) \Big\|_2^2 +
\lambda_0 \sum_k \| D \mathbf{x}_k \|_{2,1} +
\lambda_1 \sum_k \| \mathbf{x}_k \|_1 \;,$$
where the $\mathbf{h}$_k are the components of the PSF stack, the
$\mathbf{x}$_k are the corrresponding components of the reconstructed
volume, $\mathbf{y}$ is the measured image, and $M$ is a cropping
operator that allows the boundary artifacts resulting from circular
convolution to be avoided. Following the mask decoupling approach
:cite:`almeida-2013-deconvolving`, the problem is posed in ADMM form
as
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}_0, \mathbf{z}_1,
\mathbf{z}_2} \; \frac{1}{2} \| \mathbf{y} - M \mathbf{z}_0 \|_2^2 +
\lambda_0 \sum_k \| \mathbf{z}_{1,k} \|_{2,1} +
\lambda_1 \sum_k \| \mathbf{z}_{2,k}
\|_1 \\ \;\; \text{s.t.} \;\; \mathbf{z}_0 = \sum_k \mathbf{h}_k \ast
\mathbf{x}_k \qquad \mathbf{z}_{1,k} = D \mathbf{x}_k \qquad
\mathbf{z}_{2,k} = \mathbf{x}_k \;.$$
The most computationally expensive step in the ADMM algorithm is solved
using the frequency-domain approach proposed in
:cite:`wohlberg-2014-efficient`.
"""
import numpy as np
import jax
import scico.numpy as snp
from scico import plot
from scico.examples import ucb_diffusercam_data
from scico.functional import L1Norm, L21Norm, ZeroFunctional
from scico.linop import CircularConvolve, Crop, FiniteDifference, Identity, Sum
from scico.loss import SquaredL2Loss
from scico.optimize.admm import ADMM, G0BlockCircularConvolveSolver
from scico.util import device_info
"""
Load the DiffuserCam PSF stack and measured image. The computational cost
of the reconstruction is reduced slightly by removing parts of the PSF
stack that don't make a significant contribution to the reconstruction.
"""
y, psf = ucb_diffusercam_data()
psf = psf[..., 1:-7]
"""
To avoid boundary artifacts, the measured image is padded by half the PSF
width/height and then cropped within the data fidelity term. This padding
is implicit in that the reconstruction volume is computed at the padded
size, but the actual measured image is never explicitly padded since it is
used at the original (unpadded) size within the data fidelity term due to
the cropping operation. The PSF axis order is modified to put the stack
axis at index 0, as required by components of the ADMM solver to be used.
Finally, each PSF in the stack is individually normalized.
"""
half_psf = np.array(psf.shape[0:2]) // 2
pad_spec = ((half_psf[0],) * 2, (half_psf[1],) * 2)
y_pad_shape = tuple(np.array(y.shape) + np.array(pad_spec).sum(axis=1))
x_shape = (psf.shape[-1],) + y_pad_shape
psf = psf.transpose((2, 0, 1))
psf /= np.sqrt(np.sum(psf**2, axis=(1, 2), keepdims=True))
"""
Convert the image and PSF stack to JAX arrays with `float32` dtype since
JAX by default does not support double-precision floating point
arithmetic. This limited precision leads to relatively poor, but still
acceptable accuracy within the ADMM solver x-step. To experiment with the
effect of higher numerical precision, set the environment variable
`JAX_ENABLE_X64=True` and change `dtype` below to `np.float64`.
"""
dtype = np.float32
y = jax.device_put(y.astype(dtype))
psf = jax.device_put(psf.astype(dtype))
"""
Define problem and algorithm parameters.
"""
λ0 = 3e-3 # TV regularization parameter
λ1 = 1e-2 # ℓ1 norm regularization parameter
ρ0 = 1e0 # ADMM penalty parameter for first auxiliary variable
ρ1 = 5e0 # ADMM penalty parameter for second auxiliary variable
ρ2 = 1e1 # ADMM penalty parameter for third auxiliary variable
maxiter = 100 # number of ADMM iterations
"""
Create operators.
"""
C = CircularConvolve(psf, input_shape=x_shape, input_dtype=dtype, h_center=half_psf, ndims=2)
S = Sum(input_shape=x_shape, input_dtype=dtype, axis=0)
M = Crop(pad_spec, input_shape=y_pad_shape, input_dtype=dtype)
"""
Create functionals.
"""
g0 = SquaredL2Loss(y=y, A=M)
g1 = λ0 * L21Norm()
g2 = λ1 * L1Norm()
C0 = S @ C
C1 = FiniteDifference(input_shape=x_shape, input_dtype=dtype, axes=(-2, -1), circular=True)
C2 = Identity(input_shape=x_shape, input_dtype=dtype)
"""
Set up ADMM solver object and solve problem.
"""
solver = ADMM(
f=ZeroFunctional(),
g_list=[g0, g1, g2],
C_list=[C0, C1, C2],
rho_list=[ρ0, ρ1, ρ2],
alpha=1.4,
maxiter=maxiter,
nanstop=True,
subproblem_solver=G0BlockCircularConvolveSolver(ndims=2, check_solve=True),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the measured image and samples from PDF stack
"""
plot.imview(y, cmap=plot.plt.cm.Blues, cbar=True, title="Measured Image")
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(14, 7))
plot.imview(psf[0], title="Nearest PSF", cmap=plot.plt.cm.Blues, fig=fig, ax=ax[0])
plot.imview(psf[-1], title="Furthest PSF", cmap=plot.plt.cm.Blues, fig=fig, ax=ax[1])
fig.show()
"""
Show the recovered volume with depth indicated by color.
"""
XCrop = Crop(((0, 0),) + pad_spec, input_shape=x_shape, input_dtype=dtype)
xm = np.array(XCrop(x[..., ::-1]))
xmr = xm.transpose((1, 2, 0))[..., np.newaxis] / xm.max()
cmap = plot.plt.cm.viridis_r
cmval = cmap(np.arange(0, xm.shape[0]).reshape(1, 1, -1) / (xm.shape[0] - 1))
xms = np.sum(cmval * xmr, axis=2)[..., 0:3]
plot.imview(xms, cmap=cmap, cbar=True, title="Recovered Volume")
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/diffusercam_tv_admm.py | 0.932776 | 0.972779 | diffusercam_tv_admm.py | pypi |
r"""
TV-Regularized Low-Dose CT Reconstruction
=========================================
This example demonstrates solution of a low-dose CT reconstruction problem
with isotropic total variation (TV) regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_W^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, the norm
weighting $W$ is chosen so that the weighted norm is an approximation to
the Poisson negative log likelihood :cite:`sauer-1993-local`, $C$ is
a 2D finite difference operator, and $\mathbf{x}$ is the desired
image.
"""
import numpy as np
import jax
from xdesign import Soil, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.linop.radon_astra import TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 512 # phantom size
np.random.seed(0)
x_gt = discrete_phantom(Soil(porosity=0.80), size=384)
x_gt = np.ascontiguousarray(np.pad(x_gt, (64, 64)))
x_gt = np.clip(x_gt, 0, np.inf) # clip to positive values
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Configure CT projection operator and generate synthetic measurements.
"""
n_projection = 360 # number of projections
Io = 1e3 # source flux
𝛼 = 1e-2 # attenuation coefficient
angles = np.linspace(0, 2 * np.pi, n_projection) # evenly spaced projection angles
A = TomographicProjector(x_gt.shape, 1.0, N, angles) # Radon transform operator
y_c = A @ x_gt # sinogram
r"""
Add Poisson noise to projections according to
$$\mathrm{counts} \sim \mathrm{Poi}\left(I_0 exp\left\{- \alpha A
\mathbf{x} \right\}\right)$$
$$\mathbf{y} = - \frac{1}{\alpha} \log\left(\mathrm{counts} /
I_0\right).$$
We use the NumPy random functionality so we can generate using 64-bit
numbers.
"""
counts = np.random.poisson(Io * snp.exp(-𝛼 * A @ x_gt))
counts = np.clip(counts, a_min=1, a_max=np.inf) # replace any 0s count with 1
y = -1 / 𝛼 * np.log(counts / Io)
y = jax.device_put(y) # convert back to float32
"""
Set up post processing. For this example, we clip all reconstructions
to the range of the ground truth.
"""
def postprocess(x):
return snp.clip(x, 0, snp.max(x_gt))
"""
Compute an FBP reconstruction as an initial guess.
"""
x0 = postprocess(A.fbp(y))
r"""
Set up and solve the un-weighted reconstruction problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} \;.$$
"""
# Note that rho and lambda were selected via a parameter sweep (not
# shown here).
ρ = 2.5e3 # ADMM penalty parameter
lambda_unweighted = 3e2 # regularization strength
maxiter = 100 # number of ADMM iterations
cg_tol = 1e-5 # CG relative tolerance
cg_maxiter = 10 # maximum CG iterations per ADMM iteration
f = loss.SquaredL2Loss(y=y, A=A)
admm_unweighted = ADMM(
f=f,
g_list=[lambda_unweighted * functional.L21Norm()],
C_list=[linop.FiniteDifference(x_gt.shape, append=0)],
rho_list=[ρ],
x0=x0,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
admm_unweighted.solve()
x_unweighted = postprocess(admm_unweighted.x)
r"""
Set up and solve the weighted reconstruction problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_W^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where
$$W = \mathrm{diag}\left\{ \mathrm{counts} / I_0 \right\} \;.$$
The data fidelity term in this formulation follows
:cite:`sauer-1993-local` (9) except for the scaling by $I_0$, which we
use to maintain balance between the data and regularization terms if
$I_0$ changes.
"""
lambda_weighted = 5e1
weights = jax.device_put(counts / Io)
f = loss.SquaredL2Loss(y=y, A=A, W=linop.Diagonal(weights))
admm_weighted = ADMM(
f=f,
g_list=[lambda_weighted * functional.L21Norm()],
C_list=[linop.FiniteDifference(x_gt.shape, append=0)],
rho_list=[ρ],
maxiter=maxiter,
x0=x0,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 10},
)
admm_weighted.solve()
x_weighted = postprocess(admm_weighted.x)
"""
Show recovered images.
"""
def plot_recon(x, title, ax):
"""Plot an image with title indicating error metrics."""
plot.imview(
x,
title=f"{title}\nSNR: {metric.snr(x_gt, x):.2f} (dB), MAE: {metric.mae(x_gt, x):.3f}",
fig=fig,
ax=ax,
)
fig, ax = plot.subplots(nrows=2, ncols=2, figsize=(11, 10))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0])
plot_recon(x0, "FBP Reconstruction", ax=ax[0, 1])
plot_recon(x_unweighted, "Unweighted TV Reconstruction", ax=ax[1, 0])
plot_recon(x_weighted, "Weighted TV Reconstruction", ax=ax[1, 1])
for ax_ in ax.ravel():
ax_.set_xlim(64, 448)
ax_.set_ylim(64, 448)
fig.subplots_adjust(left=0.1, right=0.99, top=0.95, bottom=0.05, wspace=0.2, hspace=0.01)
fig.colorbar(
ax[0, 0].get_images()[0], ax=ax, location="right", shrink=0.9, pad=0.05, label="arbitrary units"
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_astra_weighted_tv_admm.py | 0.908544 | 0.914023 | ct_astra_weighted_tv_admm.py | pypi |
import numpy as np
import jax
import matplotlib.pyplot as plt
import svmbir
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import metric, plot
from scico.functional import BM3D, NonNegativeIndicator
from scico.linop import Diagonal, Identity
from scico.linop.radon_svmbir import SVMBIRSquaredL2Loss, TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Generate a ground truth image.
"""
N = 256 # image size
density = 0.025 # attenuation density of the image
np.random.seed(1234)
x_gt = discrete_phantom(Foam(size_range=[0.05, 0.02], gap=0.02, porosity=0.3), size=N - 10)
x_gt = x_gt / np.max(x_gt) * density
x_gt = np.pad(x_gt, 5)
x_gt[x_gt < 0] = 0
"""
Generate tomographic projector and sinogram.
"""
num_angles = int(N / 2)
num_channels = N
angles = snp.linspace(0, snp.pi, num_angles, endpoint=False, dtype=snp.float32)
A = TomographicProjector(x_gt.shape, angles, num_channels)
sino = A @ x_gt
"""
Impose Poisson noise on sinogram. Higher max_intensity means less noise.
"""
max_intensity = 2000
expected_counts = max_intensity * np.exp(-sino)
noisy_counts = np.random.poisson(expected_counts).astype(np.float32)
noisy_counts[noisy_counts == 0] = 1 # deal with 0s
y = -np.log(noisy_counts / max_intensity)
"""
Reconstruct using default prior of SVMBIR :cite:`svmbir-2020`.
"""
weights = svmbir.calc_weights(y, weight_type="transmission")
x_mrf = svmbir.recon(
np.array(y[:, np.newaxis]),
np.array(angles),
weights=weights[:, np.newaxis],
num_rows=N,
num_cols=N,
positivity=True,
verbose=0,
)[0]
"""
Set up an ADMM solver.
"""
y, x0, weights = jax.device_put([y, x_mrf, weights])
ρ = 15 # ADMM penalty parameter
σ = density * 0.18 # denoiser sigma
f = SVMBIRSquaredL2Loss(y=y, A=A, W=Diagonal(weights), scale=0.5)
g0 = σ * ρ * BM3D()
g1 = NonNegativeIndicator()
solver = ADMM(
f=f,
g_list=[g0, g1],
C_list=[Identity(x_mrf.shape), Identity(x_mrf.shape)],
rho_list=[ρ, ρ],
x0=x0,
maxiter=20,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-4, "maxiter": 100}),
itstat_options={"display": True, "period": 1},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x_bm3d = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1 * density, vmax=1.2 * density)
fig, ax = plt.subplots(1, 3, figsize=[15, 5])
plot.imview(img=x_gt, title="Ground Truth Image", cbar=True, fig=fig, ax=ax[0], norm=norm)
plot.imview(
img=x_mrf,
title=f"MRF (PSNR: {metric.psnr(x_gt, x_mrf):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1],
norm=norm,
)
plot.imview(
img=x_bm3d,
title=f"BM3D (PSNR: {metric.psnr(x_gt, x_bm3d):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[2],
norm=norm,
)
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_svmbir_ppp_bm3d_admm_cg.py | 0.768125 | 0.616301 | ct_svmbir_ppp_bm3d_admm_cg.py | pypi |
import numpy as np
import jax
import matplotlib.pyplot as plt
import svmbir
from matplotlib.ticker import MaxNLocator
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import metric, plot
from scico.functional import BM3D
from scico.linop import Diagonal, Identity
from scico.linop.radon_svmbir import SVMBIRExtendedLoss, TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Generate a ground truth image.
"""
N = 256 # image size
density = 0.025 # attenuation density of the image
np.random.seed(1234)
pad_len = 5
x_gt = discrete_phantom(Foam(size_range=[0.05, 0.02], gap=0.02, porosity=0.3), size=N - 2 * pad_len)
x_gt = x_gt / np.max(x_gt) * density
x_gt = np.pad(x_gt, pad_len)
x_gt[x_gt < 0] = 0
"""
Generate tomographic projector and sinogram for fan beam and parallel beam.
For fan beam, use view angles spanning 2π since unlike parallel beam, views
at 0 and π are not equivalent.
"""
num_angles = int(N / 2)
num_channels = N
# Use angles in the range [0, 2*pi] for fan beam
angles = snp.linspace(0, 2 * snp.pi, num_angles, endpoint=False, dtype=snp.float32)
dist_source_detector = 1500.0
magnification = 1.2
A_fan = TomographicProjector(
x_gt.shape,
angles,
num_channels,
geometry="fan-curved",
dist_source_detector=dist_source_detector,
magnification=magnification,
)
A_parallel = TomographicProjector(
x_gt.shape,
angles,
num_channels,
geometry="parallel",
)
sino_fan = A_fan @ x_gt
"""
Impose Poisson noise on sinograms. Higher max_intensity means less noise.
"""
def add_poisson_noise(sino, max_intensity):
expected_counts = max_intensity * np.exp(-sino)
noisy_counts = np.random.poisson(expected_counts).astype(np.float32)
noisy_counts[noisy_counts == 0] = 1 # deal with 0s
y = -np.log(noisy_counts / max_intensity)
return y
y_fan = add_poisson_noise(sino_fan, max_intensity=500)
"""
Reconstruct using default prior of SVMBIR :cite:`svmbir-2020`.
"""
weights_fan = svmbir.calc_weights(y_fan, weight_type="transmission")
x_mrf_fan = svmbir.recon(
np.array(y_fan[:, np.newaxis]),
np.array(angles),
weights=weights_fan[:, np.newaxis],
num_rows=N,
num_cols=N,
positivity=True,
verbose=0,
stop_threshold=0.0,
geometry="fan-curved",
dist_source_detector=dist_source_detector,
magnification=magnification,
delta_channel=1.0,
delta_pixel=1.0 / magnification,
)[0]
x_mrf_parallel = svmbir.recon(
np.array(y_fan[:, np.newaxis]),
np.array(angles),
weights=weights_fan[:, np.newaxis],
num_rows=N,
num_cols=N,
positivity=True,
verbose=0,
stop_threshold=0.0,
geometry="parallel",
)[0]
"""
Push arrays to device.
"""
y_fan, x0_fan, weights_fan = jax.device_put([y_fan, x_mrf_fan, weights_fan])
x0_parallel = jax.device_put(x_mrf_parallel)
"""
Set problem parameters and BM3D pseudo-functional.
"""
ρ = 10 # ADMM penalty parameter
σ = density * 0.6 # denoiser sigma
g0 = σ * ρ * BM3D()
"""
Set up problem using `SVMBIRExtendedLoss`.
"""
f_extloss_fan = SVMBIRExtendedLoss(
y=y_fan,
A=A_fan,
W=Diagonal(weights_fan),
scale=0.5,
positivity=True,
prox_kwargs={"maxiter": 5, "ctol": 0.0},
)
f_extloss_parallel = SVMBIRExtendedLoss(
y=y_fan,
A=A_parallel,
W=Diagonal(weights_fan),
scale=0.5,
positivity=True,
prox_kwargs={"maxiter": 5, "ctol": 0.0},
)
solver_extloss_fan = ADMM(
f=None,
g_list=[f_extloss_fan, g0],
C_list=[Identity(x_mrf_fan.shape), Identity(x_mrf_fan.shape)],
rho_list=[ρ, ρ],
x0=x0_fan,
maxiter=20,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
solver_extloss_parallel = ADMM(
f=None,
g_list=[f_extloss_parallel, g0],
C_list=[Identity(x_mrf_parallel.shape), Identity(x_mrf_parallel.shape)],
rho_list=[ρ, ρ],
x0=x0_parallel,
maxiter=20,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the ADMM solvers.
"""
print(f"Solving on {device_info()}\n")
x_extloss_fan = solver_extloss_fan.solve()
hist_extloss_fan = solver_extloss_fan.itstat_object.history(transpose=True)
print()
x_extloss_parallel = solver_extloss_parallel.solve()
hist_extloss_parallel = solver_extloss_parallel.itstat_object.history(transpose=True)
"""
Show the recovered images. The parallel beam reconstruction is poor because
the parallel beam is a poor approximation of the specific fan beam geometry
used here.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1 * density, vmax=1.2 * density)
fig, ax = plt.subplots(1, 3, figsize=(20, 7))
plot.imview(img=x_gt, title="Ground Truth Image", cbar=True, fig=fig, ax=ax[0], norm=norm)
plot.imview(
img=x_mrf_parallel,
title=f"Parallel-beam MRF (PSNR: {metric.psnr(x_gt, x_mrf_parallel):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1],
norm=norm,
)
plot.imview(
img=x_extloss_parallel,
title=f"Parallel-beam Extended Loss (PSNR: {metric.psnr(x_gt, x_extloss_parallel):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[2],
norm=norm,
)
fig.show()
fig, ax = plt.subplots(1, 3, figsize=(20, 7))
plot.imview(img=x_gt, title="Ground Truth Image", cbar=True, fig=fig, ax=ax[0], norm=norm)
plot.imview(
img=x_mrf_fan,
title=f"Fan-beam MRF (PSNR: {metric.psnr(x_gt, x_mrf_fan):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1],
norm=norm,
)
plot.imview(
img=x_extloss_fan,
title=f"Fan-beam Extended Loss (PSNR: {metric.psnr(x_gt, x_extloss_fan):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[2],
norm=norm,
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plt.subplots(1, 2, figsize=(15, 6))
plot.plot(
snp.vstack((hist_extloss_parallel.Prml_Rsdl, hist_extloss_parallel.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals for parallel-beam reconstruction",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[0],
)
ax[0].set_ylim([5e-3, 1e0])
ax[0].xaxis.set_major_locator(MaxNLocator(integer=True))
plot.plot(
snp.vstack((hist_extloss_fan.Prml_Rsdl, hist_extloss_fan.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals for fan-beam reconstruction",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
ax[1].set_ylim([5e-3, 1e0])
ax[1].xaxis.set_major_locator(MaxNLocator(integer=True))
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_fan_svmbir_ppp_bm3d_admm_prox.py | 0.806853 | 0.555073 | ct_fan_svmbir_ppp_bm3d_admm_prox.py | pypi |
r"""
Convolutional Sparse Coding (ADMM)
==================================
This example demonstrates the solution of a simple convolutional sparse
coding problem
$$\mathrm{argmin}_{\mathbf{x}} \; \frac{1}{2} \Big\| \mathbf{y} -
\sum_k \mathbf{h}_k \ast \mathbf{x}_k \Big\|_2^2 + \lambda \sum_k
( \| \mathbf{x}_k \|_1 - \| \mathbf{x}_k \|_2 ) \;,$$
where the $\mathbf{h}$_k is a set of filters comprising the dictionary,
the $\mathbf{x}$_k is a corrresponding set of coefficient maps, and
$\mathbf{y}$ is the signal to be represented. The problem is solved via
an ADMM algorithm using the frequency-domain approach proposed in
:cite:`wohlberg-2014-efficient`.
"""
import numpy as np
import jax
import scico.numpy as snp
from scico import plot
from scico.examples import create_conv_sparse_phantom
from scico.functional import L1MinusL2Norm
from scico.linop import CircularConvolve, Identity, Sum
from scico.loss import SquaredL2Loss
from scico.optimize.admm import ADMM, FBlockCircularConvolveSolver
from scico.util import device_info
"""
Set problem size and create random convolutional dictionary (a set of
filters) and a corresponding sparse random set of coefficient maps.
"""
N = 128 # image size
Nnz = 128 # number of non-zeros in coefficient maps
h, x0 = create_conv_sparse_phantom(N, Nnz)
"""
Normalize dictionary filters and scale coefficient maps accordingly.
"""
hnorm = np.sqrt(np.sum(h**2, axis=(1, 2), keepdims=True))
h /= hnorm
x0 *= hnorm
"""
Convert numpy arrays to jax arrays.
"""
h = jax.device_put(h)
x0 = jax.device_put(x0)
"""
Set up sum-of-convolutions forward operator.
"""
C = CircularConvolve(h, input_shape=x0.shape, ndims=2)
S = Sum(input_shape=C.output_shape, axis=0)
A = S @ C
"""
Construct test image from dictionary $\mathbf{h}$ and coefficient maps
$\mathbf{x}_0$.
"""
y = A(x0)
"""
Set functional and solver parameters.
"""
λ = 1e0 # l1-l2 norm regularization parameter
ρ = 2e0 # ADMM penalty parameter
maxiter = 200 # number of ADMM iterations
"""
Define loss function and regularization. Note the use of the
$\ell_1 - \ell_2$ norm, which has been found to provide slightly better
performance than the $\ell_1$ norm in this type of problem
:cite:`wohlberg-2021-psf`.
"""
f = SquaredL2Loss(y=y, A=A)
g0 = λ * L1MinusL2Norm()
C0 = Identity(input_shape=x0.shape)
"""
Initialize ADMM solver.
"""
solver = ADMM(
f=f,
g_list=[g0],
C_list=[C0],
rho_list=[ρ],
alpha=1.8,
maxiter=maxiter,
subproblem_solver=FBlockCircularConvolveSolver(check_solve=True),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x1 = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered coefficient maps.
"""
fig, ax = plot.subplots(nrows=2, ncols=3, figsize=(12, 8.6))
plot.imview(x0[0], title="Coef. map 0", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0])
ax[0, 0].set_ylabel("Ground truth")
plot.imview(x0[1], title="Coef. map 1", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(x0[2], title="Coef. map 2", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 2])
plot.imview(x1[0], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 0])
ax[1, 0].set_ylabel("Recovered")
plot.imview(x1[1], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 1])
plot.imview(x1[2], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 2])
fig.tight_layout()
fig.show()
"""
Show test image and reconstruction from recovered coefficient maps.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 6))
plot.imview(y, title="Test image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[0])
plot.imview(A(x1), title="Reconstructed image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[1])
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/sparsecode_conv_admm.py | 0.944817 | 0.938181 | sparsecode_conv_admm.py | pypi |
r"""
Total Variation Denoising (ADMM)
================================
This example compares denoising via isotropic and anisotropic total
variation (TV) regularization :cite:`rudin-1992-nonlinear`
:cite:`goldstein-2009-split`. It solves the denoising problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - \mathbf{x}
\|_2^2 + \lambda R(\mathbf{x}) \;,$$
where $R$ is either the isotropic or anisotropic TV regularizer.
In SCICO, switching between these two regularizers is a one-line
change: replacing an
[L1Norm](../_autosummary/scico.functional.rst#scico.functional.L1Norm)
with a
[L21Norm](../_autosummary/scico.functional.rst#scico.functional.L21Norm).
Note that the isotropic version exhibits fewer block-like artifacts on
edges that are not vertical or horizontal.
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, plot
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # image size
phantom = SiemensStar(16)
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
x_gt = x_gt / x_gt.max()
"""
Add noise to create a noisy test image.
"""
σ = 0.75 # noise standard deviation
noise, key = scico.random.randn(x_gt.shape, seed=0)
y = x_gt + σ * noise
"""
Denoise with isotropic total variation.
"""
λ_iso = 1.4e0
f = loss.SquaredL2Loss(y=y)
g_iso = λ_iso * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
solver = ADMM(
f=f,
g_list=[g_iso],
C_list=[C],
rho_list=[1e1],
x0=y,
maxiter=100,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 20}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
solver.solve()
x_iso = solver.x
print()
"""
Denoise with anisotropic total variation for comparison.
"""
# Tune the weight to give the same data fidelty as the isotropic case.
λ_aniso = 1.2e0
g_aniso = λ_aniso * functional.L1Norm()
solver = ADMM(
f=f,
g_list=[g_aniso],
C_list=[C],
rho_list=[1e1],
x0=y,
maxiter=100,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 20}),
itstat_options={"display": True, "period": 10},
)
solver.solve()
x_aniso = solver.x
print()
"""
Compute and print the data fidelity.
"""
for x, name in zip((x_iso, x_aniso), ("Isotropic", "Anisotropic")):
df = f(x)
print(f"Data fidelity for {name} TV was {df:.2e}")
"""
Plot results.
"""
plt_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.5))
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(11, 10))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy version", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(x_iso, title="Isotropic TV denoising", fig=fig, ax=ax[1, 0], **plt_args)
plot.imview(x_aniso, title="Anisotropic TV denoising", fig=fig, ax=ax[1, 1], **plt_args)
fig.subplots_adjust(left=0.1, right=0.99, top=0.95, bottom=0.05, wspace=0.2, hspace=0.01)
fig.colorbar(
ax[0, 0].get_images()[0], ax=ax, location="right", shrink=0.9, pad=0.05, label="Arbitrary Units"
)
fig.suptitle("Denoising comparison")
fig.show()
# zoomed version
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(11, 10))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy version", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(x_iso, title="Isotropic TV denoising", fig=fig, ax=ax[1, 0], **plt_args)
plot.imview(x_aniso, title="Anisotropic TV denoising", fig=fig, ax=ax[1, 1], **plt_args)
ax[0, 0].set_xlim(N // 4, N // 4 + N // 2)
ax[0, 0].set_ylim(N // 4, N // 4 + N // 2)
fig.subplots_adjust(left=0.1, right=0.99, top=0.95, bottom=0.05, wspace=0.2, hspace=0.01)
fig.colorbar(
ax[0, 0].get_images()[0], ax=ax, location="right", shrink=0.9, pad=0.05, label="Arbitrary Units"
)
fig.suptitle("Denoising comparison (zoomed)")
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/denoise_tv_admm.py | 0.89241 | 0.828176 | denoise_tv_admm.py | pypi |
r"""
Training of DnCNN for Denoising
===============================
This example demonstrates the training and application of the DnCNN model
from :cite:`zhang-2017-dncnn` to denoise images that have been corrupted
with additive Gaussian noise.
"""
import os
from time import time
import numpy as np
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scico import flax as sflax
from scico import metric, plot
from scico.flax.examples import load_image_data
"""
Prepare parallel processing. Set an arbitrary processor count (only
applies if GPU is not available).
"""
os.environ["XLA_FLAGS"] = "--xla_force_host_platform_device_count=8"
platform = jax.lib.xla_bridge.get_backend().platform
print("Platform: ", platform)
"""
Read data from cache or generate if not available.
"""
size = 40 # patch size
train_nimg = 400 # number of training images
test_nimg = 16 # number of testing images
nimg = train_nimg + test_nimg
gray = True # use gray scale images
data_mode = "dn" # Denoising problem
noise_level = 0.1 # Standard deviation of noise
noise_range = False # Use fixed noise level
stride = 23 # Stride to sample multiple patches from each image
train_ds, test_ds = load_image_data(
train_nimg,
test_nimg,
size,
gray,
data_mode,
verbose=True,
noise_level=noise_level,
noise_range=noise_range,
stride=stride,
)
"""
Define configuration dictionary for model and training loop.
Parameters have been selected for demonstration purposes and relatively
short training. The depth of the model has been reduced to 6, instead of
the 17 of the original model. The suggested settings can be found in the
original paper.
"""
# model configuration
model_conf = {
"depth": 6,
"num_filters": 64,
}
# training configuration
train_conf: sflax.ConfigDict = {
"seed": 0,
"opt_type": "ADAM",
"batch_size": 128,
"num_epochs": 50,
"base_learning_rate": 1e-3,
"warmup_epochs": 0,
"log_every_steps": 5000,
"log": True,
}
"""
Construct DnCNN model.
"""
channels = train_ds["image"].shape[-1]
model = sflax.DnCNNNet(
depth=model_conf["depth"],
channels=channels,
num_filters=model_conf["num_filters"],
)
"""
Run training loop.
"""
workdir = os.path.join(os.path.expanduser("~"), ".cache", "scico", "examples", "dncnn_out")
train_conf["workdir"] = workdir
print(f"{'JAX process: '}{jax.process_index()}{' / '}{jax.process_count()}")
print(f"{'JAX local devices: '}{jax.local_devices()}")
trainer = sflax.BasicFlaxTrainer(
train_conf,
model,
train_ds,
test_ds,
)
start_time = time()
modvar, stats_object = trainer.train()
time_train = time() - start_time
"""
Evaluate on testing data.
"""
test_patches = 720
start_time = time()
fmap = sflax.FlaxMap(model, modvar)
output = fmap(test_ds["image"][:test_patches])
time_eval = time() - start_time
output = np.clip(output, a_min=0, a_max=1.0)
"""
Compare trained model in terms of reconstruction time and data fidelity.
"""
snr_eval = metric.snr(test_ds["label"][:test_patches], output)
psnr_eval = metric.psnr(test_ds["label"][:test_patches], output)
print(
f"{'DnCNNNet training':18s}{'epochs:':2s}{train_conf['num_epochs']:>5d}"
f"{'':21s}{'time[s]:':10s}{time_train:>7.2f}"
)
print(
f"{'DnCNNNet testing':18s}{'SNR:':5s}{snr_eval:>5.2f}{' dB'}{'':3s}"
f"{'PSNR:':6s}{psnr_eval:>5.2f}{' dB'}{'':3s}{'time[s]:':10s}{time_eval:>7.2f}"
)
"""
Plot comparison. Note that patches have small sizes, thus, plots may
correspond to unidentifiable fragments.
"""
np.random.seed(123)
indx = np.random.randint(0, high=test_patches)
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(test_ds["label"][indx, ..., 0], title="Ground truth", cbar=None, fig=fig, ax=ax[0])
plot.imview(
test_ds["image"][indx, ..., 0],
title="Noisy: \nSNR: %.2f (dB), PSNR: %.2f"
% (
metric.snr(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
metric.psnr(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
),
cbar=None,
fig=fig,
ax=ax[1],
)
plot.imview(
output[indx, ..., 0],
title="DnCNNNet Reconstruction\nSNR: %.2f (dB), PSNR: %.2f"
% (
metric.snr(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
metric.psnr(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
),
fig=fig,
ax=ax[2],
)
divider = make_axes_locatable(ax[2])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[2].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
"""
Plot convergence statistics. Statistics only generated if a training
cycle was done (i.e. not reading final epoch results from checkpoint).
"""
if stats_object is not None:
hist = stats_object.history(transpose=True)
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((hist.Train_Loss, hist.Eval_Loss)).T,
x=hist.Epoch,
ptyp="semilogy",
title="Loss function",
xlbl="Epoch",
ylbl="Loss value",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((hist.Train_SNR, hist.Eval_SNR)).T,
x=hist.Epoch,
title="Metric",
xlbl="Epoch",
ylbl="SNR (dB)",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/denoise_dncnn_train_bsds.py | 0.920994 | 0.649051 | denoise_dncnn_train_bsds.py | pypi |
import numpy as np
import jax
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot, random
from scico.optimize import ProximalADMM
from scico.util import device_info
"""
Create a ground truth image.
"""
np.random.seed(1234)
N = 512 # image size
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
"""
Set up forward operator $A$ and test signal consisting of blurred signal with
additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = random.randn(Ax.shape)
y = Ax + σ * noise
"""
Set up the problem to be solved. We want to minimize the functional
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + R(\mathbf{x}) \;$$
where $R(\cdot)$ is a pseudo-functional having the DnCNN denoiser as its
proximal operator. A slightly unusual variable splitting is used,\
including setting the $f$ functional to the $R(\cdot)$ term and the $g$
functional to the data fidelity term to allow the use of proximal ADMM,
which avoids the need for conjugate gradient sub-iterations in the solver
steps.
"""
f = functional.DnCNN(variant="17M")
g = loss.SquaredL2Loss(y=y)
"""
Set up proximal ADMM solver.
"""
ρ = 0.2 # ADMM penalty parameter
maxiter = 10 # number of proximal ADMM iterations
mu, nu = ProximalADMM.estimate_parameters(A)
solver = ProximalADMM(
f=f,
g=g,
A=A,
rho=ρ,
mu=mu,
nu=nu,
x0=A.T @ y,
maxiter=maxiter,
itstat_options={"display": True},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
x = snp.clip(x, 0, 1)
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = snp.clip(y[nc:-nc, nc:-nc], 0, 1)
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, yc), fig=fig, ax=ax[1])
plot.imview(x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, x), fig=fig, ax=ax[2])
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_ppp_dncnn_padmm.py | 0.81637 | 0.684989 | deconv_ppp_dncnn_padmm.py | pypi |
r"""
Comparison of Optimization Algorithms for Total Variation Denoising
===================================================================
This example compares the performance of alternating direction method of
multipliers (ADMM), linearized ADMM, proximal ADMM, and primal–dual
hybrid gradient (PDHG) in solving the isotropic total variation (TV)
denoising problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - \mathbf{x}
\|_2^2 + \lambda R(\mathbf{x}) \;,$$
where $R$ is the isotropic TV: the sum of the norms of the gradient
vectors at each point in the image $\mathbf{x}$.
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, plot
from scico.optimize import PDHG, LinearizedADMM, ProximalADMM
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Add noise to create a noisy test image.
"""
σ = 1.0 # noise standard deviation
noise, key = scico.random.randn(x_gt.shape, seed=0)
y = x_gt + σ * noise
"""
Construct operators and functionals and set regularization parameter.
"""
# The append=0 option makes the results of horizontal and vertical
# finite differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
f = loss.SquaredL2Loss(y=y)
λ = 1e0
g = λ * functional.L21Norm()
"""
The first step of the first-run solver is much slower than the
following steps, presumably due to just-in-time compilation of
relevant operators in first use. The code below performs a preliminary
solver step, the result of which is discarded, to reduce this bias in
the timing results. The precise cause of the remaining differences in
time required to compute the first step of each algorithm is unknown,
but it is worth noting that this difference becomes negligible when
just-in-time compilation is disabled (e.g. via the JAX_DISABLE_JIT
environment variable).
"""
solver_admm = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[1e1],
x0=y,
maxiter=1,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"maxiter": 1}),
)
solver_admm.solve(); # fmt: skip
# trailing semi-colon suppresses output in notebook
"""
Solve via ADMM with a maximum of 2 CG iterations.
"""
solver_admm = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[1e1],
x0=y,
maxiter=200,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"maxiter": 2}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
print("ADMM solver")
solver_admm.solve()
hist_admm = solver_admm.itstat_object.history(transpose=True)
"""
Solve via Linearized ADMM.
"""
solver_ladmm = LinearizedADMM(
f=f,
g=g,
C=C,
mu=1e-2,
nu=1e-1,
x0=y,
maxiter=200,
itstat_options={"display": True, "period": 10},
)
print("\nLinearized ADMM solver")
solver_ladmm.solve()
hist_ladmm = solver_ladmm.itstat_object.history(transpose=True)
"""
Solve via Proximal ADMM.
"""
mu, nu = ProximalADMM.estimate_parameters(C)
solver_padmm = ProximalADMM(
f=f,
g=g,
A=C,
rho=1e0,
mu=mu,
nu=nu,
x0=y,
maxiter=200,
itstat_options={"display": True, "period": 10},
)
print("\nProximal ADMM solver")
solver_padmm.solve()
hist_padmm = solver_padmm.itstat_object.history(transpose=True)
"""
Solve via PDHG.
"""
tau, sigma = PDHG.estimate_parameters(C, factor=1.5)
solver_pdhg = PDHG(
f=f,
g=g,
C=C,
tau=tau,
sigma=sigma,
maxiter=200,
itstat_options={"display": True, "period": 10},
)
print("\nPDHG solver")
solver_pdhg.solve()
hist_pdhg = solver_pdhg.itstat_object.history(transpose=True)
"""
Plot results. It is worth noting that:
1. PDHG outperforms ADMM both with respect to iterations and time.
2. Proximal ADMM has similar performance to PDHG with respect to iterations,
but is slightly inferior with respect to time.
3. ADMM greatly outperforms Linearized ADMM with respect to iterations.
4. ADMM slightly outperforms Linearized ADMM with respect to time. This is
possible because the ADMM $\mathbf{x}$-update can be solved relatively
cheaply, with only 2 CG iterations. If more CG iterations were required,
the time comparison would be favorable to Linearized ADMM.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack(
(hist_admm.Objective, hist_ladmm.Objective, hist_padmm.Objective, hist_pdhg.Objective)
).T,
ptyp="semilogy",
title="Objective function",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack(
(hist_admm.Prml_Rsdl, hist_ladmm.Prml_Rsdl, hist_padmm.Prml_Rsdl, hist_pdhg.Prml_Rsdl)
).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack(
(hist_admm.Dual_Rsdl, hist_ladmm.Dual_Rsdl, hist_padmm.Dual_Rsdl, hist_pdhg.Dual_Rsdl)
).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[2],
)
fig.show()
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack(
(hist_admm.Objective, hist_ladmm.Objective, hist_padmm.Objective, hist_pdhg.Objective)
).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_padmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Objective function",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack(
(hist_admm.Prml_Rsdl, hist_ladmm.Prml_Rsdl, hist_padmm.Prml_Rsdl, hist_pdhg.Prml_Rsdl)
).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_padmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack(
(hist_admm.Dual_Rsdl, hist_ladmm.Dual_Rsdl, hist_padmm.Dual_Rsdl, hist_pdhg.Dual_Rsdl)
).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_padmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[2],
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/denoise_tv_multi.py | 0.907091 | 0.909385 | denoise_tv_multi.py | pypi |
r"""
Parameter Tuning for TV-Regularized Abel Inversion
==================================================
This example demonstrates the use of
[scico.ray.tune](../_autosummary/scico.ray.tune.rst) to tune
parameters for the companion [example script](ct_abel_tv_admm.rst). The
`ray.tune` class API is used in this example.
This script is hard-coded to run on CPU only to avoid the large number of
warnings that are emitted when GPU resources are requested but not available,
and due to the difficulty of supressing these warnings in a way that does
not force use of the CPU only. To enable GPU usage, comment out the
`os.environ` statements near the beginning of the script, and change the
value of the "gpu" entry in the `resources` dict from 0 to 1. Note that
two environment variables are set to suppress the warnings because
`JAX_PLATFORMS` was intended to replace `JAX_PLATFORM_NAME` but this change
has yet to be correctly implemented
(see [google/jax#6805](https://github.com/google/jax/issues/6805) and
[google/jax#10272](https://github.com/google/jax/pull/10272).
"""
# isort: off
import os
os.environ["JAX_PLATFORM_NAME"] = "cpu"
os.environ["JAX_PLATFORMS"] = "cpu"
import numpy as np
import jax
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import create_circular_phantom
from scico.linop.abel import AbelProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.ray import tune
"""
Create a ground truth image.
"""
N = 256 # image size
x_gt = create_circular_phantom((N, N), [0.4 * N, 0.2 * N, 0.1 * N], [1, 0, 0.5])
"""
Set up the forward operator and create a test measurement.
"""
A = AbelProjector(x_gt.shape)
y = A @ x_gt
np.random.seed(12345)
y = y + np.random.normal(size=y.shape).astype(np.float32)
"""
Compute inverse Abel transform solution for use as initial solution.
"""
x_inv = A.inverse(y)
x0 = snp.clip(x_inv, 0.0, 1.0)
"""
Define performance evaluation class.
"""
class Trainable(tune.Trainable):
"""Parameter evaluation class."""
def setup(self, config, x_gt, x0, y):
"""This method initializes a new parameter evaluation object. It
is called once when a new parameter evaluation object is created.
The `config` parameter is a dict of specific parameters for
evaluation of a single parameter set (a pair of parameters in
this case). The remaining parameters are objects that are passed
to the evaluation function via the ray object store.
"""
# Put main arrays on jax device.
self.x_gt, self.x0, self.y = jax.device_put([x_gt, x0, y])
# Set up problem to be solved.
self.A = AbelProjector(self.x_gt.shape)
self.f = loss.SquaredL2Loss(y=self.y, A=self.A)
self.C = linop.FiniteDifference(input_shape=self.x_gt.shape)
self.reset_config(config)
def reset_config(self, config):
"""This method is only required when `scico.ray.tune.Tuner` is
initialized with `reuse_actors` set to ``True`` (the default). In
this case, a set of parameter evaluation processes and
corresponding objects are created once (including initialization
via a call to the `setup` method), and this method is called when
switching to evaluation of a different parameter configuration.
If `reuse_actors` is set to ``False``, then a new process and
object are created for each parameter configuration, and this
method is not used.
"""
# Extract solver parameters from config dict.
λ, ρ = config["lambda"], config["rho"]
# Set up parameter-dependent functional.
g = λ * functional.L1Norm()
# Define solver.
cg_tol = 1e-4
cg_maxiter = 25
self.solver = ADMM(
f=self.f,
g_list=[g],
C_list=[self.C],
rho_list=[ρ],
x0=self.x0,
maxiter=10,
subproblem_solver=LinearSubproblemSolver(
cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}
),
)
return True
def step(self):
"""This method is called for each step in the evaluation of a
single parameter configuration. The maximum number of times it
can be called is controlled by the `num_iterations` parameter
in the initialization of a `scico.ray.tune.Tuner` object.
"""
# Perform 10 solver steps for every ray.tune step
x_tv = snp.clip(self.solver.solve(), 0.0, 1.0)
return {"psnr": float(metric.psnr(self.x_gt, x_tv))}
"""
Define parameter search space and resources per trial.
"""
config = {"lambda": tune.loguniform(1e0, 1e2), "rho": tune.loguniform(1e1, 1e3)}
resources = {"gpu": 0, "cpu": 1} # gpus per trial, cpus per trial
"""
Run parameter search.
"""
tuner = tune.Tuner(
tune.with_parameters(Trainable, x_gt=x_gt, x0=x0, y=y),
param_space=config,
resources=resources,
metric="psnr",
mode="max",
num_samples=100, # perform 100 parameter evaluations
num_iterations=10, # perform at most 10 steps for each parameter evaluation
)
results = tuner.fit()
"""
Display best parameters and corresponding performance.
"""
best_result = results.get_best_result()
best_config = best_result.config
print(f"Best PSNR: {best_result.metrics['psnr']:.2f} dB")
print("Best config: " + ", ".join([f"{k}: {v:.2e}" for k, v in best_config.items()]))
"""
Plot parameter values visited during parameter search. Marker sizes are
proportional to number of iterations run at each parameter pair. The best
point in the parameter space is indicated in red.
"""
fig = plot.figure(figsize=(8, 8))
trials = results.get_dataframe()
for t in trials.iloc:
n = t["training_iteration"]
plot.plot(
t["config/lambda"],
t["config/rho"],
ptyp="loglog",
lw=0,
ms=(0.5 + 1.5 * n),
marker="o",
mfc="blue",
mec="blue",
fig=fig,
)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
title="Parameter search sampling locations\n(marker size proportional to number of iterations)",
xlbl=r"$\rho$",
ylbl=r"$\lambda$",
lw=0,
ms=5.0,
marker="o",
mfc="red",
mec="red",
fig=fig,
)
ax = fig.axes[0]
ax.set_xlim([config["rho"].lower, config["rho"].upper])
ax.set_ylim([config["lambda"].lower, config["lambda"].upper])
fig.show()
"""
Plot parameter values visited during parameter search and corresponding
reconstruction PSNRs.The best point in the parameter space is indicated
in red.
"""
𝜌 = [t["config/rho"] for t in trials.iloc]
𝜆 = [t["config/lambda"] for t in trials.iloc]
psnr = [t["psnr"] for t in trials.iloc]
minpsnr = min(max(psnr), 20.0)
𝜌, 𝜆, psnr = zip(*filter(lambda x: x[2] >= minpsnr, zip(𝜌, 𝜆, psnr)))
fig, ax = plot.subplots(figsize=(10, 8))
sc = ax.scatter(𝜌, 𝜆, c=psnr, cmap=plot.cm.plasma_r)
fig.colorbar(sc)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
lw=0,
ms=12.0,
marker="2",
mfc="red",
mec="red",
fig=fig,
ax=ax,
)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel(r"$\rho$")
ax.set_ylabel(r"$\lambda$")
ax.set_title("PSNR at each sample location\n(values below 20 dB omitted)")
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_abel_tv_admm_tune.py | 0.925626 | 0.755997 | ct_abel_tv_admm_tune.py | pypi |
r"""
Parameter Tuning for Image Deconvolution with TV Regularization (ADMM Solver)
=============================================================================
This example demonstrates the use of
[scico.ray.tune](../_autosummary/scico.ray.tune.rst) to tune parameters
for the companion [example script](deconv_tv_admm.rst). The `ray.tune`
function API is used in this example.
This script is hard-coded to run on CPU only to avoid the large number of
warnings that are emitted when GPU resources are requested but not available,
and due to the difficulty of supressing these warnings in a way that does
not force use of the CPU only. To enable GPU usage, comment out the
`os.environ` statements near the beginning of the script, and change the
value of the "gpu" entry in the `resources` dict from 0 to 1. Note that
two environment variables are set to suppress the warnings because
`JAX_PLATFORMS` was intended to replace `JAX_PLATFORM_NAME` but this change
has yet to be correctly implemented
(see [google/jax#6805](https://github.com/google/jax/issues/6805) and
[google/jax#10272](https://github.com/google/jax/pull/10272)).
"""
# isort: off
import os
os.environ["JAX_PLATFORM_NAME"] = "cpu"
os.environ["JAX_PLATFORMS"] = "cpu"
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.ray import report, tune
"""
Create a ground truth image.
"""
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
"""
Set up the forward operator and create a test signal consisting of a
blurred signal with additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = scico.random.randn(Ax.shape, seed=0)
y = Ax + σ * noise
"""
Define performance evaluation function.
"""
def eval_params(config, x_gt, psf, y):
"""Parameter evaluation function. The `config` parameter is a
dict of specific parameters for evaluation of a single parameter
set (a pair of parameters in this case). The remaining parameters
are objects that are passed to the evaluation function via the
ray object store.
"""
# Extract solver parameters from config dict.
λ, ρ = config["lambda"], config["rho"]
# Put main arrays on jax device.
x_gt, psf, y = jax.device_put([x_gt, psf, y])
# Set up problem to be solved.
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
f = loss.SquaredL2Loss(y=y, A=A)
g = λ * functional.L21Norm()
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
# Define solver.
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.adj(y),
maxiter=10,
subproblem_solver=LinearSubproblemSolver(),
)
# Perform 50 iterations, reporting performance to ray.tune every 10 iterations.
for step in range(5):
x_admm = solver.solve()
report({"psnr": float(metric.psnr(x_gt, x_admm))})
"""
Define parameter search space and resources per trial.
"""
config = {"lambda": tune.loguniform(1e-3, 1e-1), "rho": tune.loguniform(1e-2, 1e0)}
resources = {"cpu": 4, "gpu": 0} # cpus per trial, gpus per trial
"""
Run parameter search.
"""
tuner = tune.Tuner(
tune.with_parameters(eval_params, x_gt=x_gt, psf=psf, y=y),
param_space=config,
resources=resources,
metric="psnr",
mode="max",
num_samples=100, # perform 100 parameter evaluations
)
results = tuner.fit()
"""
Display best parameters and corresponding performance.
"""
best_result = results.get_best_result()
best_config = best_result.config
print(f"Best PSNR: {best_result.metrics['psnr']:.2f} dB")
print("Best config: " + ", ".join([f"{k}: {v:.2e}" for k, v in best_config.items()]))
"""
Plot parameter values visited during parameter search. Marker sizes are
proportional to number of iterations run at each parameter pair. The best
point in the parameter space is indicated in red.
"""
fig = plot.figure(figsize=(8, 8))
trials = results.get_dataframe()
for t in trials.iloc:
n = t["training_iteration"]
plot.plot(
t["config/lambda"],
t["config/rho"],
ptyp="loglog",
lw=0,
ms=(0.5 + 1.5 * n),
marker="o",
mfc="blue",
mec="blue",
fig=fig,
)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
title="Parameter search sampling locations\n(marker size proportional to number of iterations)",
xlbl=r"$\rho$",
ylbl=r"$\lambda$",
lw=0,
ms=5.0,
marker="o",
mfc="red",
mec="red",
fig=fig,
)
ax = fig.axes[0]
ax.set_xlim([config["rho"].lower, config["rho"].upper])
ax.set_ylim([config["lambda"].lower, config["lambda"].upper])
fig.show()
"""
Plot parameter values visited during parameter search and corresponding
reconstruction PSNRs.The best point in the parameter space is indicated
in red.
"""
𝜌 = [t["config/rho"] for t in trials.iloc]
𝜆 = [t["config/lambda"] for t in trials.iloc]
psnr = [t["psnr"] for t in trials.iloc]
minpsnr = min(max(psnr), 18.0)
𝜌, 𝜆, psnr = zip(*filter(lambda x: x[2] >= minpsnr, zip(𝜌, 𝜆, psnr)))
fig, ax = plot.subplots(figsize=(10, 8))
sc = ax.scatter(𝜌, 𝜆, c=psnr, cmap=plot.cm.plasma_r)
fig.colorbar(sc)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
lw=0,
ms=12.0,
marker="2",
mfc="red",
mec="red",
fig=fig,
ax=ax,
)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel(r"$\rho$")
ax.set_ylabel(r"$\lambda$")
ax.set_title("PSNR at each sample location\n(values below 18 dB omitted)")
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_tv_admm_tune.py | 0.917043 | 0.814828 | deconv_tv_admm_tune.py | pypi |
r"""
Image Deconvolution with TV Regularization (Proximal ADMM Solver)
=================================================================
This example demonstrates the solution of an image deconvolution problem
with isotropic total variation (TV) regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - C \mathbf{x}
\|_2^2 + \lambda \| D \mathbf{x} \|_{2,1} \;,$$
where $C$ is a convolution operator, $\mathbf{y}$ is the blurred image,
$D$ is a 2D finite fifference operator, and $\mathbf{x}$ is the
deconvolved image.
In this example the problem is solved via proximal ADMM, while standard
ADMM is used in a [companion example](deconv_tv_admm.rst).
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.optimize import ProximalADMM
from scico.util import device_info
"""
Create a ground truth image.
"""
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Set up the forward operator and create a test signal consisting of a
blurred signal with additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
C = linop.Convolve(h=psf, input_shape=x_gt.shape)
Cx = C(x_gt) # blurred image
noise, key = scico.random.randn(Cx.shape, seed=0)
y = Cx + σ * noise
r"""
Set up the problem to be solved. We want to minimize the functional
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - C \mathbf{x}
\|_2^2 + \lambda \| D \mathbf{x} \|_{2,1} \;,$$
where $C$ is the convolution operator and $D$ is a finite difference
operator. This problem can be expressed as
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}} \; (1/2) \| \mathbf{y} -
\mathbf{z}_0 \|_2^2 + \lambda \| \mathbf{z}_1 \|_{2,1} \;\;
\text{such that} \;\; \mathbf{z}_0 = C \mathbf{x} \;\; \text{and} \;\;
\mathbf{z}_1 = D \mathbf{x} \;,$$
which can be written in the form of a standard ADMM problem
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}} \; f(\mathbf{x}) + g(\mathbf{z})
\;\; \text{such that} \;\; A \mathbf{x} + B \mathbf{z} = \mathbf{c}$$
with
$$f = 0 \quad g = g_0 + g_1$$
$$g_0(\mathbf{z}_0) = (1/2) \| \mathbf{y} - \mathbf{z}_0 \|_2^2 \quad
g_1(\mathbf{z}_1) = \lambda \| \mathbf{z}_1 \|_{2,1}$$
$$A = \left( \begin{array}{c} C \\ D \end{array} \right) \quad
B = \left( \begin{array}{cc} -I & 0 \\ 0 & -I \end{array} \right) \quad
\mathbf{c} = \left( \begin{array}{c} 0 \\ 0 \end{array} \right) \;.$$
This is a more complex splitting than that used in the
[companion example](deconv_tv_admm.rst), but it allows the use of a
proximal ADMM solver in a way that avoids the need for the conjugate
gradient sub-iterations used by the ADMM solver in the
[companion example](deconv_tv_admm.rst).
"""
f = functional.ZeroFunctional()
g0 = loss.SquaredL2Loss(y=y)
λ = 2.0e-2 # L1 norm regularization parameter
g1 = λ * functional.L21Norm()
g = functional.SeparableFunctional((g0, g1))
D = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
A = linop.VerticalStack((C, D))
"""
Set up a proximal ADMM solver object.
"""
ρ = 1.0e-1 # ADMM penalty parameter
maxiter = 50 # number of ADMM iterations
mu, nu = ProximalADMM.estimate_parameters(D)
solver = ProximalADMM(
f=f,
g=g,
A=A,
B=None,
rho=ρ,
mu=mu,
nu=nu,
x0=C.adj(y),
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = y[nc:-nc, nc:-nc]
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, yc), fig=fig, ax=ax[1])
plot.imview(
solver.x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, solver.x), fig=fig, ax=ax[2]
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_tv_padmm.py | 0.927388 | 0.964355 | deconv_tv_padmm.py | pypi |
r"""
Total Variation Denoising with Constraint (APGM)
================================================
This example demonstrates the solution of the isotropic total variation
(TV) denoising problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - \mathbf{x}
\|_2^2 + \lambda R(\mathbf{x}) + \iota_C(\mathbf{x}) \;,$$
where $R$ is a TV regularizer, $\iota_C(\cdot)$ is the indicator function
of constraint set $C$, and $C = \{ \mathbf{x} \, | \, x_i \in [0, 1] \}$,
i.e. the set of vectors with components constrained to be in the interval
$[0, 1]$. The problem is solved seperately with $R$ taken as isotropic
and anisotropic TV regularization
The solution via APGM is based on the approach in :cite:`beck-2009-tv`,
which involves constructing a dual for the constrained denoising problem.
The APGM solution minimizes the resulting dual. In this case, switching
between the two regularizers corresponds to switching between two
different projectors.
"""
from typing import Callable, Optional, Union
import jax
import jax.numpy as jnp
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, operator, plot
from scico.numpy import Array, BlockArray
from scico.numpy.util import ensure_on_device
from scico.optimize.pgm import AcceleratedPGM, RobustLineSearchStepSize
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # image size
phantom = SiemensStar(16)
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
x_gt = x_gt / x_gt.max()
"""
Add noise to create a noisy test image.
"""
σ = 0.75 # noise standard deviation
noise, key = scico.random.randn(x_gt.shape, seed=0)
y = x_gt + σ * noise
"""
Define finite difference operator and adjoint.
"""
# The append=0 option appends 0 to the input along the axis
# prior to performing the difference to make the results of
# horizontal and vertical finite differences the same shape.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
A = C.adj
"""
Define a zero array as initial estimate.
"""
x0 = jnp.zeros(C(y).shape)
"""
Define the dual of the total variation denoising problem.
"""
class DualTVLoss(loss.Loss):
def __init__(
self,
y: Union[Array, BlockArray],
A: Optional[Union[Callable, operator.Operator]] = None,
lmbda: float = 0.5,
):
y = ensure_on_device(y)
self.functional = functional.SquaredL2Norm()
super().__init__(y=y, A=A, scale=1.0)
self.lmbda = lmbda
def __call__(self, x: Union[Array, BlockArray]) -> float:
xint = self.y - self.lmbda * self.A(x)
return -1.0 * self.functional(xint - jnp.clip(xint, 0.0, 1.0)) + self.functional(xint)
"""
Denoise with isotropic total variation. Define projector for isotropic
total variation.
"""
# Evaluation of functional set to zero.
class IsoProjector(functional.Functional):
has_eval = True
has_prox = True
def __call__(self, x: Union[Array, BlockArray]) -> float:
return 0.0
def prox(self, v: Array, lam: float, **kwargs) -> Array:
norm_v_ptp = jnp.sqrt(jnp.sum(jnp.abs(v) ** 2, axis=0))
x_out = v / jnp.maximum(jnp.ones(v.shape), norm_v_ptp)
out1 = v[0, :, -1] / jnp.maximum(jnp.ones(v[0, :, -1].shape), jnp.abs(v[0, :, -1]))
x_out = x_out.at[0, :, -1].set(out1)
out2 = v[1, -1, :] / jnp.maximum(jnp.ones(v[1, -1, :].shape), jnp.abs(v[1, -1, :]))
x_out = x_out.at[1, -1, :].set(out2)
return x_out
"""
Use RobustLineSearchStepSize object and set up AcceleratedPGM solver
object. Run the solver.
"""
reg_weight_iso = 1.4e0
f_iso = DualTVLoss(y=y, A=A, lmbda=reg_weight_iso)
g_iso = IsoProjector()
solver_iso = AcceleratedPGM(
f=f_iso,
g=g_iso,
L0=16.0 * f_iso.lmbda**2,
x0=x0,
maxiter=100,
itstat_options={"display": True, "period": 10},
step_size=RobustLineSearchStepSize(),
)
# Run the solver.
print(f"Solving on {device_info()}\n")
x = solver_iso.solve()
hist_iso = solver_iso.itstat_object.history(transpose=True)
# Project to constraint set.
x_iso = jnp.clip(y - f_iso.lmbda * f_iso.A(x), 0.0, 1.0)
"""
Denoise with anisotropic total variation for comparison. Define
projector for anisotropic total variation.
"""
# Evaluation of functional set to zero.
class AnisoProjector(functional.Functional):
has_eval = True
has_prox = True
def __call__(self, x: Union[Array, BlockArray]) -> float:
return 0.0
def prox(self, v: Array, lam: float, **kwargs) -> Array:
return v / jnp.maximum(jnp.ones(v.shape), jnp.abs(v))
"""
Use RobustLineSearchStepSize object and set up AcceleratedPGM solver
object. Weight was tuned to give the same data fidelty as the
isotropic case. Run the solver.
"""
reg_weight_aniso = 1.2e0
f = DualTVLoss(y=y, A=A, lmbda=reg_weight_aniso)
g = AnisoProjector()
solver = AcceleratedPGM(
f=f,
g=g,
L0=16.0 * f.lmbda**2,
x0=x0,
maxiter=100,
itstat_options={"display": True, "period": 10},
step_size=RobustLineSearchStepSize(),
)
# Run the solver.
print()
x = solver.solve()
# Project to constraint set.
x_aniso = jnp.clip(y - f.lmbda * f.A(x), 0.0, 1.0)
"""
Compute the data fidelity.
"""
df = hist_iso.Objective[-1]
print(f"\nData fidelity for isotropic TV was {df:.2e}")
hist = solver.itstat_object.history(transpose=True)
df = hist.Objective[-1]
print(f"Data fidelity for anisotropic TV was {df:.2e}")
"""
Plot results.
"""
plt_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.5))
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(11, 10))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy version", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(x_iso, title="Isotropic TV denoising", fig=fig, ax=ax[1, 0], **plt_args)
plot.imview(x_aniso, title="Anisotropic TV denoising", fig=fig, ax=ax[1, 1], **plt_args)
fig.subplots_adjust(left=0.1, right=0.99, top=0.95, bottom=0.05, wspace=0.2, hspace=0.01)
fig.colorbar(
ax[0, 0].get_images()[0], ax=ax, location="right", shrink=0.9, pad=0.05, label="Arbitrary Units"
)
fig.suptitle("Denoising comparison")
fig.show()
# zoomed version
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(11, 10))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy version", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(x_iso, title="Isotropic TV denoising", fig=fig, ax=ax[1, 0], **plt_args)
plot.imview(x_aniso, title="Anisotropic TV denoising", fig=fig, ax=ax[1, 1], **plt_args)
ax[0, 0].set_xlim(N // 4, N // 4 + N // 2)
ax[0, 0].set_ylim(N // 4, N // 4 + N // 2)
fig.subplots_adjust(left=0.1, right=0.99, top=0.95, bottom=0.05, wspace=0.2, hspace=0.01)
fig.colorbar(
ax[0, 0].get_images()[0], ax=ax, location="right", shrink=0.9, pad=0.05, label="Arbitrary Units"
)
fig.suptitle("Denoising comparison (zoomed)")
fig.show()
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/denoise_tv_pgm.py | 0.963265 | 0.883739 | denoise_tv_pgm.py | pypi |
import numpy as np
import jax
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot, random
from scico.examples import create_3d_foam_phantom, downsample_volume, tile_volume_slices
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
np.random.seed(1234)
N = 128 # phantom size
Nx, Ny, Nz = N, N, N // 4
upsamp = 2
x_gt_hires = create_3d_foam_phantom((upsamp * Nz, upsamp * Ny, upsamp * Nx), N_sphere=100)
x_gt = downsample_volume(x_gt_hires, upsamp)
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
"""
Set up forward operator and test signal consisting of blurred signal with
additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n, n)) / (n**3)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = random.randn(Ax.shape)
y = Ax + σ * noise
"""
Set up ADMM solver.
"""
f = loss.SquaredL2Loss(y=y, A=A)
C = linop.Identity(x_gt.shape)
λ = 40.0 / 255 # BM4D regularization strength
g = λ * functional.BM4D()
ρ = 1.0 # ADMM penalty parameter
maxiter = 10 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.T @ y,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
x = snp.clip(x, 0, 1)
hist = solver.itstat_object.history(transpose=True)
"""
Show slices of the recovered 3D volume.
"""
show_id = Nz // 2
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(tile_volume_slices(x_gt), title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = y[nc:-nc, nc:-nc, nc:-nc]
yc = snp.clip(yc, 0, 1)
plot.imview(
tile_volume_slices(yc),
title="Slices of blurred, noisy volume: %.2f (dB)" % metric.psnr(x_gt, yc),
fig=fig,
ax=ax[1],
)
plot.imview(
tile_volume_slices(x),
title="Slices of deconvolved volume: %.2f (dB)" % metric.psnr(x_gt, x),
fig=fig,
ax=ax[2],
)
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit") | /scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_ppp_bm4d_admm.py | 0.803868 | 0.543893 | deconv_ppp_bm4d_admm.py | pypi |
import math
import numbers
from cerberus import Validator
from scidash_api.exceptions import ScidashClientValidatorException
class ValidatorExtended(Validator):
def _validate_isnan(self, isnan, field, value):
"""
Check, is value NaN or not
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if not isinstance(value, numbers.Number):
return
if not isnan and math.isnan(value):
self._error(field, "Value can't be NaN")
class ScidashClientDataValidator():
errors = None
# Validation schema for raw score
SCORE_SCHEMA = {
'_class': {
'type': 'dict',
'schema': {
'url': {
'type': 'string',
'required': True
},
'name': {
'type': 'string',
'required': True
}
}
},
'model': {
'type': 'dict',
'schema': {
'_class': {
'type': 'dict',
'schema': {
'name': {
'type': 'string'
},
'url': {
'type': 'string',
'required': True
}
}
},
'attrs': {
'type': 'dict',
'required': False
},
'hash': {
'type': 'string',
'required': True
},
'_id': {
'type': 'number',
'required': True
},
'capabilities': {
'type': 'list',
'required': True,
'schema': {
'type': 'string'
}
},
'name': {
'type': 'string',
'required': True
},
'run_params': {
'type': 'dict',
'required': False
},
'url': {
'type': 'string',
'required': True
}
}
},
'observation': {
'type': 'dict',
'required': True
},
'prediction': {
'type': ['number', 'dict'],
'required': True,
'isnan': False
},
'raw': {
'type': 'string',
'required': True
},
'score': {
'type':['number', 'boolean'],
'isnan': False,
'required': True
},
'score_type': {
'type': 'string'
},
'sort_key': {
'type': 'number',
'isnan': False,
'required': False
},
'norm_score': {
'type': 'number',
'isnan': False,
'required': False
},
'summary': {
'type': 'string',
'required': True
},
'hash': {
'type': 'string',
'required': True
},
'_id': {
'type': 'number',
'required': True
},
'test': {
'type': 'dict',
'schema': {
'_class': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'required': True
},
'url': {
'type': 'string',
'required': True
}
},
'required': True
},
'description': {
'type': 'string',
'nullable': True,
'required': True
},
'hash': {
'type': 'string',
'required': True
},
'_id': {
'type': 'number',
'required': True
},
'name': {
'type': 'string',
'required': True
},
'observation': {
'type': 'dict',
'required': True
},
'verbose': {
'type': 'number',
'isnan': False,
'required': True
}
}
}
}
def validate_score(self, raw_data):
"""
Checks, is score raw data valid and can be processed
:raw_data: raw data dictionary
:returns: boolean
"""
validator = ValidatorExtended(self.SCORE_SCHEMA)
validator.allow_unknown = True
valid = validator.validate(raw_data)
if not valid:
self.errors = validator.errors
if not raw_data.get('sort_key', False):
if not raw_data.get('norm_score', False):
raise ScidashClientValidatorException("sort_key or norm_score"
"not found")
return valid
def get_errors(self):
"""
Returns errors from last validation procedure, if any
"""
return self.errors
def validate_suite(self, raw_data):
raise NotImplementedError("Not implemented yet") | /scidash-api-1.3.0.tar.gz/scidash-api-1.3.0/scidash_api/validator.py | 0.564098 | 0.182881 | validator.py | pypi |
import boto3
import itertools
import os
import os.path
import pandas
import pyarrow
import scidbpy
from .driver import Driver
from .coord import coord2delta, delta2coord
__version__ = '19.11.6'
type_map_pyarrow = dict(
[(t.__str__(), t) for t in (pyarrow.binary(),
pyarrow.bool_(),
pyarrow.int16(),
pyarrow.int32(),
pyarrow.int64(),
pyarrow.int8(),
pyarrow.string(),
pyarrow.uint16(),
pyarrow.uint32(),
pyarrow.uint64(),
pyarrow.uint8())] +
[('char', pyarrow.string()),
('datetime', pyarrow.timestamp('s')),
('double', pyarrow.float64()),
('float', pyarrow.float32())])
class Array(object):
"""Wrapper for SciDB array stored externally
Constructor parameters:
:param string url: URL of the SciDB array. Supported schemas are
``s3://`` and ``file://``.
:param string schema: SciDB array schema for creating a new
array. Can be specified as ``string`` or ``scidbpy.Schema``
"""
def __init__(self,
url,
schema=None,
format='arrow',
compression='lz4',
namespace='public',
index_split=100000):
self.url = url
if schema is None:
self._metadata = None
self._schema = None
else: # Create new array
if type(schema) is scidbpy.Schema:
self._schema = schema
else:
self._schema = scidbpy.Schema.fromstring(schema)
self._metadata = {
'attribute': 'ALL',
'format': format,
'version': '1',
'schema': self._schema.__str__(),
'compression': None if compression == 'none' else compression,
'index_split': index_split,
'namespace': namespace
}
Driver.init_array(url)
Driver.write_metadata(
url,
Array.metadata_to_string(self._metadata.copy()))
def __iter__(self):
return (i for i in (self.url, ))
def __eq__(self):
return tuple(self) == tuple(other)
def __repr__(self):
return ('{}(url={!r})').format(type(self).__name__, *self)
def __str__(self):
return self.url
@property
def metadata(self):
if self._metadata is None:
self._metadata = Array.metadata_from_string(
Driver.read_metadata(self.url))
return self._metadata
@property
def schema(self):
if self._schema is None:
self._schema = scidbpy.Schema.fromstring(
self.metadata['schema'])
return self._schema
def delete(self):
# Delete metadata file first, deleting large arrays could take sometime
Driver.delete('{}/metadata'.format(self.url))
Driver.delete_all(self.url)
def read_index(self):
# Read index as Arrow Table
tables = []
for index_url in Driver.list('{}/index'.format(self.url)):
tables.append(
Driver.read_table(index_url,
Driver.index_format,
Driver.index_compression))
if len(tables):
table = pyarrow.concat_tables(tables)
# Convert Arrow Table index to Pandas DataFrame
index = table.to_pandas(split_blocks=True, self_destruct=True)
# https://arrow.apache.org/docs/python/pandas.html#reducing-
# memory-use-i
del table
index.sort_values(by=list(index.columns),
inplace=True,
ignore_index=True)
return index
return pandas.DataFrame()
def build_index(self):
dims = self.schema.dims
index = pandas.DataFrame.from_records(
map(lambda x: Array.url_to_coords(x, dims),
Driver.list('{}/chunks'.format(self.url))),
columns=[d.name for d in dims])
index.sort_values(by=list(index.columns),
inplace=True,
ignore_index=True)
return index
def write_index(self, index, split_size=None):
# Check for a DataFrame
if not isinstance(index, pandas.DataFrame):
raise Exception("Value provided as argument " +
"is not a Pandas DataFrame")
# Check index columns matches array dimentions
dim_names = [d.name for d in self.schema.dims]
if len(index.columns) != len(dim_names):
raise Exception(
("Index columns count {} does not match " +
"array dimensions count {}").format(len(index.columns),
len(dim_names)))
if not (index.columns == dim_names).all():
raise Exception(
("Index columns {} does not match " +
"array dimensions {}").format(index.columns, dim_names))
# Check for coordinates outside chunk boundaries
for dim in self.schema.dims:
vals = index[dim.name]
if any(vals < dim.low_value):
raise Exception("Index values smaller than " +
"lower bound on dimension " + dim.name)
if dim.high_value != '*' and any(vals > dim.high_value):
raise Exception("Index values bigger than " +
"upper bound on dimension " + dim.name)
if (dim.chunk_length != '*'
and any((vals - dim.low_value) % dim.chunk_length != 0)):
raise Exception("Index values misaligned " +
"with chunk size on dimension " + dim.name)
# Check for duplicates
if index.duplicated().any():
raise Exception("Duplicate entries")
index.sort_values(by=list(index.columns),
inplace=True,
ignore_index=True)
if split_size is None:
split_size = int(self.metadata['index_split'])
index_schema = pyarrow.schema(
[(d.name, pyarrow.int64(), False) for d in self.schema.dims])
chunk_size = split_size // len(index.columns)
# Remove existing index
Driver.delete_all('{}/index'.format(self.url))
# Write new index
i = 0
for offset in range(0, len(index), chunk_size):
table = pyarrow.Table.from_pandas(
index.iloc[offset:offset + chunk_size], index_schema)
Driver.write_table(table,
'{}/index/{}'.format(self.url, i),
index_schema,
Driver.index_format,
Driver.index_compression)
i += 1
def get_chunk(self, *argv):
return Chunk(self, *argv)
@staticmethod
def metadata_from_string(input):
res = dict(ln.split('\t') for ln in input.strip().split('\n'))
try:
if res['compression'] == 'none':
res['compression'] = None
except KeyError:
pass
return res
@staticmethod
def metadata_to_string(input):
if input['compression'] is None:
input['compression'] = 'none'
return '\n'.join('{}\t{}'.format(k, v)
for (k, v) in input.items()) + '\n'
@staticmethod
def coords_to_url_suffix(coords, dims):
parts = ['c']
for (coord, dim) in zip(coords, dims):
if (coord < dim.low_value or
dim.high_value != '*' and coord > dim.high_value):
raise Exception(
('Coordinate value, {}, is outside of dimension range, '
'[{}:{}]').format(
coord, dim.low_value, dim.high_value))
part = coord - dim.low_value
if part % dim.chunk_length != 0:
raise Exception(
('Coordinate value, {}, is not a multiple of ' +
'chunk size, {}').format(
coord, dim.chunk_length))
part = part // dim.chunk_length
parts.append(part)
return '_'.join(map(str, parts))
@staticmethod
def url_to_coords(url, dims):
part = url[url.rindex('/') + 1:]
return tuple(
map(lambda x: int(x[0]) * x[1].chunk_length + x[1].low_value,
zip(part.split('_')[1:], dims)))
class Chunk(object):
"""Wrapper for SciDB array chunk stored externally"""
def __init__(self, array, *argv):
self.array = array
self.coords = argv
if (len(argv) == 1 and
type(argv[0]) is pandas.core.series.Series):
argv = tuple(argv[0])
dims = self.array.schema.dims
if len(argv) != len(dims):
raise Exception(
('Number of arguments, {}, does not match the number of ' +
'dimensions, {}. Please specify one start coordiante for ' +
'each dimension.').format(len(argv),
len(self.array.schema.dims)))
part = Array.coords_to_url_suffix(self.coords, dims)
self.url = '{}/chunks/{}'.format(self.array.url, part)
self._table = None
def __iter__(self):
return (i for i in (self.array, self.url))
def __eq__(self, other):
return tuple(self) == tuple(other)
def __repr__(self):
return ('{}(array={!r}, url={!r})').format(
type(self).__name__, *self)
def __str__(self):
return self.url
@property
def table(self):
if self._table is None:
self._table = Driver.read_table(
self.url,
format=self.array.metadata['format'],
compression=self.array.metadata['compression'])
return self._table
def to_pandas(self):
return delta2coord(
self.table.to_pandas(), self.array.schema, self.coords)
def from_pandas(self, pd):
# Check for a DataFrame
if not isinstance(pd, pandas.DataFrame):
raise Exception("Value provided as argument " +
"is not a Pandas DataFrame")
# Check for empty DataFrame
if pd.empty:
raise Exception("Pandas DataFrame is empty. " +
"Nothing to do.")
# Check that columns match array schema
dims = [d.name for d in self.array.schema.dims]
columns = [a.name for a in self.array.schema.atts] + dims
if len(pd.columns) != len(columns):
raise Exception(
("Argument columns count {} do not match " +
"array attributes plus dimensions count {}").format(
len(pd.columns), len(columns)))
if sorted(list(pd.columns)) != sorted(columns):
raise Exception(
("Argument columns {} does not match " +
"array schema {}").format(pd.columns, columns))
# Use schema order
pd = pd[columns]
# Sort by dimensions
pd = pd.sort_values(by=dims, ignore_index=True)
# Check for duplicates
if pd.duplicated(subset=dims).any():
raise Exception("Duplicate coordinates")
# Check for coordinates outside chunk boundaries
for (coord, dim) in zip(self.coords, self.array.schema.dims):
vals = pd[dim.name]
if (vals.iloc[0] < coord or
vals.iloc[-1] >= coord + dim.chunk_length):
raise Exception("Coordinates outside chunk boundaries")
# Build schema
schema = pyarrow.schema(
[(a.name, type_map_pyarrow[a.type_name], not a.not_null)
for a in self.array.schema.atts] +
[('@delta', pyarrow.int64(), False)])
pd['@delta'] = coord2delta(pd, self.array.schema.dims, self.coords)
self._table = pyarrow.Table.from_pandas(pd, schema)
self._table = self._table.replace_schema_metadata()
def save(self):
Driver.write_table(self._table,
self.url,
self._table.schema,
self.array.metadata['format'],
self.array.metadata['compression']) | /scidb-bridge-19.11.6.tar.gz/scidb-bridge-19.11.6/scidbbridge/__init__.py | 0.603231 | 0.236913 | __init__.py | pypi |
import os
import json
import abc
import shutil
from zipfile import ZipFile
from click import Path as ClickPath, UsageError
from clint.textui import progress
from typing import Dict, List
from pathlib import Path
import pprint
import requests
from loguru import logger
from .utils import option, command, Cli, setup_logger as default_setup_logger
from .models import FileRef, Output
class BaseModule(abc.ABC, Cli):
OUTPUT_FILENAME: str = os.getenv("OUTPUT_FILENAME", "outputs.json")
CHUNK_SIZE: int = 2391975
@abc.abstractmethod
def run_job_logic(self, parameters: dict, files: Dict[str, FileRef]) -> Output:
"""
This is the custom implementation of what will become an interface. Does the necessary setup
to execute the existing module code. This method should represent 90% or more of the custom code
required to create a module using pre existing logic.
Arguments:
parameters {dict} -- [description]
files {dict} -- [description]
output_path {str} -- [description]
"""
pass
@classmethod
def setup_logger(cls):
default_setup_logger()
def create_artifacts(
self, output: Output, artifact_path: str = "./", zip: bool = False
):
logger.info("Creating job artifacts")
if artifact_path != "./":
Path(artifact_path).mkdir(parents=True, exist_ok=True)
outfile_path = os.path.join(artifact_path, self.OUTPUT_FILENAME)
with open(outfile_path, "w") as outfile:
outfile.write(output.output_json + "\n")
logger.info(f"Output JSON saved to {outfile_path}")
if output.files is not None:
to_zip = []
logger.info(f"Ensuring output files are in correct folder: {artifact_path}")
for _file in output.files:
target = Path(os.path.join(artifact_path, f"{_file.name}"))
if not target.exists() and _file.path is not None:
logger.info(f"Moving {_file.path} to {target}")
shutil.move(_file.path, target)
to_zip.append({"path": str(target), "name": f"{_file.name}"})
if zip:
zip_path = os.path.join(artifact_path, "files.zip")
logger.info(f"Creating output files zip: {zip_path}")
with ZipFile(zip_path, "w") as zipObj:
for zf in to_zip:
zipObj.write(zf["path"], zf["name"])
logger.info(f"Added {zf['name']} to {zip_path}")
def download_files(
self, file_refs: List[dict], files_path: str = "./"
) -> Dict[str, FileRef]:
output_file_refs = {}
for _fr in file_refs:
file_ref = FileRef(**_fr)
if file_ref is None:
raise ValueError(
f"File Ref {file_ref.name} has no url to download the file"
)
r = requests.get(file_ref.url, stream=True) # type: ignore
target_path = Path(os.path.join(files_path, f"{file_ref.name}"))
target_path.parent.mkdir(parents=True, exist_ok=True)
with open(target_path, "wb") as _file:
length = r.headers.get("content-length")
total_length = None
if length is not None:
total_length = int(length)
logger.info(
f"Downloading {file_ref.name} Size: {length} to {target_path}"
)
if total_length is not None:
for ch in progress.bar(
r.iter_content(chunk_size=self.CHUNK_SIZE),
expected_size=(total_length / 1024) + 1,
):
if ch:
_file.write(ch)
else:
for ch in r.iter_content(chunk_size=self.CHUNK_SIZE):
_file.write(ch)
file_ref.path = str(target_path)
output_file_refs[file_ref.id] = file_ref
return output_file_refs
@command("run-job")
@option(
"params_path",
"--params-path",
default=None,
envvar="PARAMS_PATH",
type=ClickPath(exists=True),
)
@option(
"params_json", "--params-json", default=None, envvar="PARAMS_JSON", type=str
)
@option(
"file_refs_json",
"--files-json",
default=None,
envvar="FILE_REFS_JSON",
type=str,
)
@option(
"file_refs_path",
"--files-path",
default=None,
envvar="FILE_REFS_PATH",
type=ClickPath(exists=True),
)
@option(
"input_path",
"--input",
default="input",
envvar="FILES_IN_PATH",
type=ClickPath(),
)
@option(
"output_path",
"--output",
default="output",
envvar="OUTPUT_PATH",
type=ClickPath(),
)
@option("--zip", is_flag=True)
def run_job(
self,
params_path,
params_json,
file_refs_json,
file_refs_path,
input_path,
output_path,
zip,
):
self.setup_logger()
if params_json:
parameters = json.loads(params_json)
elif params_path:
with open(params_path) as json_file:
parameters = json.load(json_file)
else:
err_str = "One of either --params-json or --params-path is required"
logger.error(err_str)
raise UsageError(err_str)
logger.info(f"--- Using Parameters --- \n {pprint.pformat(parameters)}")
file_refs = None
if file_refs_json:
file_refs = json.loads(file_refs_json)
elif file_refs_path:
with open(file_refs_path) as json_file:
file_refs = json.load(json_file)
# Download inputs
if file_refs is not None:
file_refs = self.download_files(file_refs, input_path)
if file_refs is not None:
logger.info("--- Using Files ---")
for fr in file_refs.keys():
logger.info(f"{fr} - Path: {file_refs[fr].path}")
else:
logger.info("--- No Input Files ---")
output = self.run_job_logic(parameters, file_refs)
# Package up outputs
self.create_artifacts(output, output_path, zip) | /scidra_module_utils-0.2.1-py3-none-any.whl/scidra/module_utils/base_module.py | 0.512205 | 0.164785 | base_module.py | pypi |
# Clea
This project is an XML front matter metadata reader for documents
that *almost* follows the [SciELO Publishing Schema],
extracting and sanitizing the values regarding the affiliations.
## Installation
One can install Clea with either:
```
pip install scielo-clea # Minimal
pip install scielo-clea[cli] # Clea with CLI (recommended)
pip install scielo-clea[server] # Clea with the testing/example server
pip install scielo-clea[all] # Clea with both CLI and the server
```
Actually all these commands installs everything,
only the dependencies aren't the same.
The first is an installation with minimal requirements,
intended for use within Python, as an imported package.
## Running the command line interface
The CLI is a way to use Clea as an article XML to JSONL converter
(one JSON output line for each XML input):
```
clea -o output.jsonl article1.xml article2.xml article3.xml
```
The same can be done with ``python -m clea`` instead of ``clea``.
The output is the standard output stream.
See ``clea --help`` for more information.
## Running the testing server
You can run the development server using the flask CLI.
For example, for listening at 8080 from every host:
```
FLASK_APP=clea.server flask run -h 0.0.0.0 -p 8080
```
In a production server with 4 worker processes for handling requests,
you can, for example:
- Install gunicorn (it's not a dependency)
- Run `gunicorn -b 0.0.0.0:8080 -w 4 clea.server:app`
## Clea as a library
A simple example to see all the extracted data is:
```python
from clea import Article
from pprint import pprint
art = Article("some_file.xml")
pprint(art.data_full)
```
That's a dictionary of lists with all the "raw" extracted data.
The keys of that dictionary can be directly accessed,
so one can avoid extracting everything from the XML
by getting just the specific items/attributes
(e.g. `art["journal_meta"][0].data_full`
or `art.journal_meta[0].data_full`
instead of `art.data_full["journal_meta"][0]`).
These items/attributes are always lists, for example:
* `art["aff"]`: List of `clea.core.Branch` instances
* `art["sub_article"]`: List of `clea.core.SubArticle` instances
* `art["contrib"][0]["contrib_name"]`: List of strings
Where the `art["contrib"][0]` is a `Branch` instance,
and all such instances behave in the same way
(there's no nested branches).
That can be seen as another way to navigate in the former dictionary,
the last example should return the same list one would get with
`art.data_full["contrib"][0]["contrib_name"]`,
but without extracting everything else
that appears in the `art.data_full` dictionary.
More simple stuff that can be done:
```python
len(art.aff) # Number of <aff> entries
len(art.sub_article) # Number of <sub-article>
art.contrib[0].data_full # Data from the first contributor as a dict
# Something like {"type": ["translation"], "lang": ["en"]},
# the content from <sub-article> attributes
art["sub_article"][0]["article"][0].data_full
# A string with the article title, accessing just the desired content
art["article_meta"][0]["article_title"][0]
```
All `SubArticle`, `Article` and `Branch` instances
have the `data_full` property and the `get` method,
the latter being internally used for item/attribute getting.
Their behavior is:
* `Branch.get` always returns a list of strings
* `Article.get("sub_article")` returns a list of `SubArticle`
* `Article.get(...)` returns a list of `Branch`
* `SubArticle` behaves like `Article`
The extracted information is not exhaustive!
Its result should not be seen as a replacement of the raw XML.
One of the goals of this library was
to help on creating a tabular data from a given XML
with as many rows as required
to have a pair of a matching `<aff>` and `<contrib>` in each row.
These are the `Article` methods/properties that does that matching:
* `art.aff_contrib_inner_gen()`
* `art.aff_contrib_full_gen()`
* `art.aff_contrib_inner`
* `art.aff_contrib_full`
* `art.aff_contrib_inner_indices`
* `art.aff_contrib_full_indices`
The most useful ones are probably the last ones,
which return a list of pairs (tuples) of indices (ints),
so one can use a `(ai, ci)` result
to access the `(art.aff[ai], art.contrib[ci])` pair,
unless the index is `-1` (not found).
The ones with the `_gen` suffix are generator functions
that yields a tuple with two `Branch` entries (or `None`),
the ones without a suffix return a list of merged dictionaries
in an almost tabular format (dictionary of lists of strings).
Each list regarding these elements for these specific elements
should usually have at most one string,
but that's not always the case even for these specific elements,
then one should be careful when using the `data` property.
The `inner` and `full` in the names
regards to `INNER JOIN` and `FULL OUTER JOIN` from SQL,
meaning the unmatched elements
(all `<aff>` and `<contrib>` unreferred nodes)
are discarded in the former strategy,
whereas they're forcefully matched with `None` in the latter.
To print all the extracted data from a XML
including the indices of matching `<aff>` and `<contrib>` pairs
performed in the `FULL OUTER JOIN` sense,
similar to the test server response:
```python
pprint({
**article.data_full,
"aff_contrib_pairs": article.aff_contrib_full_indices,
})
```
[SciELO Publishing Schema]: http://docs.scielo.org/projects/scielo-publishing-schema
| /scielo-clea-0.4.4.tar.gz/scielo-clea-0.4.4/README.md | 0.427038 | 0.92597 | README.md | pypi |
from .misc import get_lev
def aff_contrib_inner_gen(article):
"""Generator of matching <aff> and <contrib> of an article
as pairs of Branch instances,
using a strategy based on SQL's INNER JOIN."""
affs_ids = [get_lev(aff.node, "id") for aff in article.aff]
contrib_rids = [[get_lev(xref, "rid")
for xref in contrib.get_field_nodes("xref_aff")]
for contrib in article.contrib]
for aff_id, aff in zip(affs_ids, article.aff):
for rid_list, contrib in zip(contrib_rids, article.contrib):
for rid in rid_list:
if rid == aff_id:
yield aff, contrib
def aff_contrib_full_gen(article):
"""Generator of matching <aff> and <contrib> of an article
as pairs of Branch instances,
using a strategy based on SQL's FULL OUTER JOIN."""
affs_ids = [get_lev(aff.node, "id") for aff in article.aff]
contrib_rids = [[get_lev(xref, "rid")
for xref in contrib.get_field_nodes("xref_aff")]
for contrib in article.contrib]
contrib_missing = set(range(len(article.contrib)))
for aff_id, aff in zip(affs_ids, article.aff):
amiss = True
for cidx, (rid_list, contrib) in enumerate(zip(contrib_rids,
article.contrib)):
for rid in rid_list:
if rid == aff_id:
yield aff, contrib
amiss = False
contrib_missing.discard(cidx)
if amiss:
yield aff, None
for cidx in sorted(contrib_missing):
yield None, article.contrib[cidx]
def aff_contrib_inner(article):
"""Inner join list of matching <aff> and <contrib> entries."""
return [{**aff.data_full, **contrib.data_full}
for aff, contrib in aff_contrib_inner_gen(article)]
def aff_contrib_full(article):
"""Full outer join list of matching <aff> and <contrib> entries."""
return [{**(aff.data_full if aff else {}),
**(contrib.data_full if contrib else {}),
} for aff, contrib in aff_contrib_full_gen(article)]
def aff_contrib_inner_indices(article):
"""List of ``(ia, ic)`` tuples of indices for all matching
``(article["aff"][ia], article["contrib"][ic])`` pairs,
using a strategy based on SQL's INNER JOIN.
"""
affs = [None] + article["aff"]
contribs = [None] + article["contrib"]
return [(affs.index(aff) - 1, contribs.index(contrib) - 1)
for aff, contrib in aff_contrib_inner_gen(article)]
def aff_contrib_full_indices(article):
"""List of ``(ia, ic)`` tuples of indices for all matching
``(article["aff"][ia], article["contrib"][ic])`` pairs,
using a strategy based on SQL's FULL OUTER JOIN.
"""
affs = [None] + article["aff"]
contribs = [None] + article["contrib"]
return [(affs.index(aff) - 1, contribs.index(contrib) - 1)
for aff, contrib in aff_contrib_full_gen(article)] | /scielo-clea-0.4.4.tar.gz/scielo-clea-0.4.4/clea/join.py | 0.637934 | 0.391813 | join.py | pypi |
import json
import re
from accessstats.client import ThriftClient
REGEX_ISSN = re.compile("^[0-9]{4}-[0-9]{3}[0-9xX]$")
REGEX_ISSUE = re.compile("^[0-9]{4}-[0-9]{3}[0-9xX][0-2][0-9]{3}[0-9]{4}$")
REGEX_ARTICLE = re.compile("^S[0-9]{4}-[0-9]{3}[0-9xX][0-2][0-9]{3}[0-9]{4}[0-9]{5}$")
def _code_type(code):
if not code:
return None
if REGEX_ISSN.match(code):
return 'issn'
if REGEX_ISSUE.match(code):
return 'issue'
if REGEX_ARTICLE.match(code):
return 'pid'
def _compute_downloads_per_year(query_result):
result = []
for item in query_result['aggregations']['access_year']['buckets']:
result.append(
(item['key'], int(item['access_total']['value']))
)
return result
def downloads_per_year(collection, code, raw=False):
"""
This method retrieve the total of downloads per year.
arguments
collection: SciELO 3 letters Acronym
code: (Journal ISSN, Issue PID, Article PID)
return
[
("2017", "20101"),
("2016", "11201"),
("2015", "12311"),
...
]
"""
tc = ThriftClient()
body = {"query": {"filtered": {}}}
fltr = {}
query = {
"query": {
"bool": {
"must": [
{
"match": {
"collection": collection
}
}
]
}
}
}
aggs = {
"aggs": {
"access_year": {
"terms": {
"field": "access_year",
"size": 0,
"order": {
"_term": "asc"
}
},
"aggs": {
"access_total": {
"sum": {
"field": "access_total"
}
}
}
}
}
}
body['query']['filtered'].update(fltr)
body['query']['filtered'].update(query)
body.update(aggs)
code_type = _code_type(code)
if code_type:
query["query"]["bool"]["must"].append({
"match": {
code_type: code
}
})
query_parameters = [
('size', '0')
]
query_result = tc.search(json.dumps(body), query_parameters)
return query_result if raw is True else _compute_downloads_per_year(query_result) | /scielo_accessstatsapi-1.1.0.tar.gz/scielo_accessstatsapi-1.1.0/accessstats/queries.py | 0.400984 | 0.208642 | queries.py | pypi |
import logging
import sys
import re
import numpy as np
import string
from six import string_types
from unidecode import unidecode
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import WhitespaceTokenizer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import normalize
from .vectorizer import LogEntropyVectorizer
from .recommend import build_nearest_neighbors, get_rocchio_topic
logger = logging.getLogger('scienceconcierge')
logger.addHandler(logging.StreamHandler())
stemmer = PorterStemmer()
w_tokenizer = WhitespaceTokenizer()
punct_re = re.compile('[{}]'.format(re.escape(string.punctuation)))
def set_log_level(verbose):
"""Convenience function for setting the log level.
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
"""
if isinstance(verbose, bool):
if verbose is True:
verbose = 'INFO'
else:
verbose = 'WARNING'
if isinstance(verbose, str):
verbose = verbose.upper()
logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
WARNING=logging.WARNING, ERROR=logging.ERROR,
CRITICAL=logging.CRITICAL)
if verbose not in logging_types:
raise ValueError('verbose must be of a valid type')
verbose = logging_types[verbose]
logger.setLevel(verbose)
class ScienceConcierge:
"""Science Concierge
Recommendation class using Latent Semantic Analysis on list of abstracts
Process workflow are as follows
- Word tokenize and stemming (optional)
- Create tf-idf matrix, unigram or bigram recommended
- Latent Semantic Analysis (LSA) i.e. reduce dimension of using
truncated SVD
- Nearest neighbor assignment for recommendation
Parameters
----------
* parameters for preprocessing
stemming: boolean, if True it will apply Porter stemmer as a preprocessor
to , default: True
parallel: boolean, if True multipleprocessing will used to apply preprocessing
to abstract text, default: True
* parameters for term frequenct weighting scheme
weighting: str, options from ['count', 'tfidf', 'entropy']
min_df: int or float [0.0, 1.0] ignore term that appear less than min_df or has
weight less than min_df, default: 3
max_df: int or float [0.0, 1.0] ignore term that appear more than max_df or has
weight greater than max_df, default: 0.8
ngram_range: tuple, parameter for tfidf transformation
(1, 1) for unigram, (1, 2) for bigram, default (1, 2) i.e. bigram
norm: 'l2', 'l1' or None, default: 'l2'
* parameters for dimensionality reduction
algorithm: str, 'arpack' or 'randomized', default 'arpack'
n_components: int, number of components of reduced dimension vector in LSA,
default=200
n_iter: int, iteration for LSA
* For recommendation
w_like: weight term for liked documents (called alpha in literature)
w_dislike: wieght term for disliked documents
n_recommend: number of total documents that want to be recommended, if None it will be
set to total number of documents
TO DO
-----
- update nearest neighbor model so that it allows larger scale of documents
- print logging output for preprocessing step
"""
def __init__(self, stemming=True, parallel=True,
weighting='tfidf', strip_accents='unicode',
norm='l2', lowercase=True,
min_df=3, max_df=0.8, ngram_range=(1,2),
algorithm='arpack',
n_components=200, n_iter=150,
n_recommend=None, save=False,
verbose=False):
self.docs = None
self.docs_preprocess = None
self.stemming = stemming
self.parallel = parallel
self.weighting = weighting
self.strip_accents = strip_accents
self.min_df = min_df
self.max_df = max_df
self.ngram_range = ngram_range
self.analyzer = 'word'
self.token_pattern = r'\w{1,}'
self.stop_words = 'english'
self.lowercase = lowercase
self.norm = norm
self.n_components = int(n_components)
self.n_iter = int(n_iter)
self.algorithm = algorithm
self.vectors = None
self.nbrs_model = None # holder for nearest neighbor model
self.n_recommend = n_recommend
self.save = False
set_log_level(verbose)
def preprocess(self, text):
"""
Apply Porter stemmer to input string
Parameters
----------
text: str, input string
Returns
-------
text_preprocess: str, output stemming string
"""
if isinstance(text, (type(None), float)):
text_preprocess = ''
else:
text = unidecode(text).lower()
text = punct_re.sub(' ', text) # remove punctuation
if self.stemming:
text_preprocess = [stemmer.stem(token) for token in w_tokenizer.tokenize(text)]
else:
text_preprocess = w_tokenizer.tokenize(text)
text_preprocess = ' '.join(text_preprocess)
return text_preprocess
def preprocess_docs(self, docs):
"""
Preprocess string or list of strings
"""
if isinstance(docs, string_types):
docs = [docs]
if self.stemming is True:
if not self.parallel:
logger.info('preprocess %i documents without multiprocessing' % len(docs))
docs_preprocess = list(map(self.preprocess, docs))
else:
if sys.version_info[0] == 3:
from multiprocessing import Pool
pool = Pool()
n_processes = pool._processes
docs_preprocess = pool.map(self.preprocess, docs)
logger.info('preprocess %i documents with %i workers' % (len(docs), n_processes))
else:
logger.info('using simple map for preprocessing abstracts')
docs_preprocess = list(map(self.preprocess, docs))
else:
logger.info('no prepocess function apply')
docs_preprocess = docs
return docs_preprocess
def fit_document_matrix(self, X):
"""
Reduce dimension of sparse matrix X
using Latent Semantic Analysis and
build nearst neighbor model
Parameters
----------
X: sparse csr matrix, sparse term frequency matrix or
others weighting matrix from documents
"""
n_components = self.n_components
n_iter = self.n_iter
algorithm = self.algorithm
lsa_model = TruncatedSVD(n_components=n_components,
n_iter=n_iter,
algorithm=algorithm)
# reduce dimension using Latent Semantic Analysis
vectors = lsa_model.fit_transform(X)
self.vectors = vectors
# build nearest neighbor model
nbrs_model = build_nearest_neighbors(vectors, n_recommend=self.n_recommend)
self.nbrs_model = nbrs_model
return self
def fit(self, docs):
"""
Create recommendation vectors and nearest neighbor model
from list of documents
Parameters
----------
docs: list of string, list of documents' text or abstracts from papers or
publications or posters
"""
# parameters from class
weighting = self.weighting
strip_accents = self.strip_accents
token_pattern = self.token_pattern
lowercase = self.lowercase
min_df = self.min_df
max_df = self.max_df
norm = self.norm
ngram_range = self.ngram_range
analyzer = self.analyzer
stop_words = self.stop_words
# preprocess text
docs_preprocess = self.preprocess_docs(docs)
self.docs = docs
if self.save:
self.docs_preprocess = docs_preprocess
# weighting documents
if self.weighting == 'count':
model = CountVectorizer(min_df=min_df, max_df=max_df,
lowercase=lowercase,
strip_accents=strip_accents, analyzer=analyzer,
token_pattern=token_pattern, ngram_range=ngram_range,
stop_words=stop_words)
elif self.weighting == 'tfidf':
model = TfidfVectorizer(min_df=min_df, max_df=max_df,
lowercase=lowercase, norm=norm,
strip_accents=strip_accents, analyzer=analyzer,
token_pattern=token_pattern, ngram_range=ngram_range,
use_idf=True, smooth_idf=True, sublinear_tf=True,
stop_words=stop_words)
elif self.weighting == 'entropy':
model = LogEntropyVectorizer(min_df=min_df, max_df=max_df,
lowercase=lowercase, norm=norm,
token_pattern=token_pattern,
ngram_range=ngram_range, analyzer=analyzer,
smooth_idf=False,
stop_words=stop_words)
else:
logger.error('choose one weighting scheme from count, tfidf or entropy')
# text transformation and latent-semantic-analysis
logger.info('apply %s weighting to documents' % self.weighting)
X = model.fit_transform(docs_preprocess)
# fit documents matrix from sparse matrix
logger.info('perform Latent Semantic Analysis with %i components' % self.n_components)
self.fit_document_matrix(X)
return self
def recommend(self, likes=list(), dislikes=list(), w_like=1.8, w_dislike=0.2):
"""
Apply Rocchio algorithm and nearest neighbor to
recommend related documents:
x_pref = w_like * mean(x_likes) - w_dislike * mean(x_dislikes)
see article on how to cross-validate parameters. Use recommend
after fit method
Parameters
----------
likes: list, list of index of liked documents
dislikes: list, list of index of disliked documents
w_like: float, weight for liked documents, default 1.8 (from cross-validation)
w_dislike: float, weight for disliked documents, default 0.2
(we got 0.0 from cross-validation)
Returns
-------
recommend_index: 1d array, array of recommended index from documents
"""
self.w_like = w_like
self.w_dislike = w_dislike
# compute preference vector
topic_pref = get_rocchio_topic(self.vectors, likes, dislikes, w_like, w_dislike)
# nearest neighbor to suggest related abstract with close topic
_, recommend_index = self.nbrs_model.kneighbors(topic_pref)
return recommend_index.flatten() | /science_concierge-0.1.tar.gz/science_concierge-0.1/science_concierge/science_concierge.py | 0.649245 | 0.298453 | science_concierge.py | pypi |
import json
from typing import Dict, List
import json
from pathlib import Path
import abc
class JSONObject:
def to_json(self) -> str:
return json.dumps(self, default=lambda o: o.__dict__(),
sort_keys=True,
indent=4)
class Author(JSONObject):
def __init__(self,
author_id: int,
name: str) -> None:
self._author_id = author_id
self._name = name
def __dict__(self):
return {
"id": self._author_id,
"name": self._name
}
def __str__(self) -> str:
return "{:d} {:s}".format(self._author_id,
self._name)
@staticmethod
def from_json(json_data: Dict) -> Dict[int, "Author"]:
content = list(json_data.items())
return dict(map(lambda item: (int(item[0]), Author(item[1]["id"], item[1]["name"])), content))
class Meta(JSONObject):
def __init__(self,
path: Path) -> None:
self._path = path
self._authors = {} # type: Dict[int, Author]
def __dict__(self) -> Dict:
result = {
"path": str(self._path),
"authors": self._authors
}
return result
def write(self) -> None:
output_line = self.to_json()
self._path.write_text(output_line)
@staticmethod
def read(path: Path) -> "Meta":
with path.open("r") as content:
text = content.read()
return Meta.from_json(path, json.loads(text))
@staticmethod
def from_json(path: Path, json_data: Dict) -> "Meta":
meta = Meta(path)
meta.authors = Author.from_json(json_data["authors"])
return meta
@property
def authors(self) -> Dict[int, Author]:
return self._authors
@authors.setter
def authors(self,
authors: Dict[int, Author]) -> None:
self._authors = authors
def __str__(self) -> None:
line = "{:s} authors = ".format(str(self._path))
line += str(self._authors)
return line
def remove(self) -> None:
self._path.unlink() | /science_data_structure-0.0.4.tar.gz/science_data_structure-0.0.4/science_data_structure/descriptions.py | 0.637031 | 0.168309 | descriptions.py | pypi |
from pathlib import Path
from typing import List
from author import Author
from core import JSONObject
from logger import LogEntry
import uuid
import json
from typing import Dict
import abc
from datetime import datetime
class NodeProperty(JSONObject):
@abc.abstractproperty
def name(self):
raise NotImplementedError("Must override the property name")
class Meta(JSONObject):
id_counter = 0
def __init__(self,
path: Path,
dataset_id: int,
branch_id: int,
description: str = "",
authors: List[Author] = [],
log: Dict[int, LogEntry] = {},
additional_properties: Dict[str, NodeProperty] = {}):
self._path = path
self._dataset_id = dataset_id
self._branch_id = branch_id
self._description = description
self._authors = authors
self._log = log
self._additional_properties = additional_properties
def write(self):
self.path.write_text(self.to_json())
def __str__(self):
line = "meta information \n"
line += "dataset id \t {:d} \n".format(self._dataset_id)
line += "branch id \t {:d} \n".format(self._branch_id)
line += "description \t {:s} \n".format(self._description)
line += "\n"
line += "authors: \n"
for author in self.authors:
line += "{:s} \n \n".format(str(author))
line += "\n"
for name in self._additional_properties.keys():
line += "{:s}\n".format(str(self._additional_properties[name]))
return line
def __dict__(self):
base_dict = {
"dataset_id": self._dataset_id,
"branch_id": self._branch_id,
"authors": self._authors,
"description": self._description,
"log": self._log
}
for property_name in self._additional_properties.keys():
base_dict[property_name] = self._additional_properties[property_name].__dict__()
return base_dict
@property
def path(self):
return self._path
@path.setter
def path(self, path):
self._path = path
@property
def dataset_id(self):
return self._dataset_id
@property
def branch_id(self):
return self._branch_id
@property
def authors(self):
return self._authors
@property
def description(self):
return self._description
@description.setter
def description(self, description: str):
self._description = description
@staticmethod
def create_top_level_meta(path: Path,
author: Author,
description: str = ""):
# create a uuid for the dataset
dataset_id = uuid.uuid4().int
branch_id = 0
Meta.id_counter += 1
meta = Meta(path,
dataset_id,
branch_id,
description,
[author])
return meta
@staticmethod
def create_meta(top_level_meta: "Meta",
path):
dataset_id = top_level_meta.dataset_id
branch_id = Meta.id_counter
Meta.id_counter += 1
meta = Meta(path / ".meta.json", dataset_id, branch_id)
return meta
@staticmethod
def from_json(path: Path) -> "Meta":
text = path.read_text()
json_data = json.loads(text)
authors = list(map(lambda author_content: Author.from_dict(author_content), json_data["authors"]))
return Meta(path, int(json_data["dataset_id"]),
int(json_data["branch_id"]),
json_data["description"], authors)
def add_property(self, node_property: NodeProperty):
self._additional_properties[node_property.name] = node_property
def __getitem__(self, name: str) -> NodeProperty:
return self._additional_properties[name]
def add_log_entry(self, log_entry):
self._log[log_entry.log_id] = log_entry
class FileProperty(NodeProperty):
def __init__(self):
# properties
self._size = None # type: int
self._n_childs = None # type: int
@property
def size(self) -> int:
return self._size
@size.setter
def size(self, size):
self._size = size
@property
def n_childs(self) -> int:
return self._n_childs
@n_childs.setter
def n_childs(self, n_childs):
self._n_childs = n_childs
@staticmethod
def from_dict(content: Dict) -> "FileProperty":
file_property = FileProperty()
file_property.size = int(content["size"])
file_property.n_childs = int(content["n_childs"])
return file_property
def __dict__(self):
return {
"size": self._size,
"n_childs": self._n_childs
}
@property
def name(self) -> str:
return "file_properties" | /science_data_structure-0.0.4.tar.gz/science_data_structure-0.0.4/science_data_structure/meta.py | 0.786623 | 0.158597 | meta.py | pypi |
import numpy as np
from typing import List
class Variable:
"""Class for optimization variables.
"""
# attributes
_x_min = None # variables
_x_max = None # variables
_x_type = None # variables' type
def __init__(self, x_min: np.ndarray, x_max: np.ndarray, x_type: List[str]=None):
"""Constructor of variables.
Args:
x_min : (np.ndarray) (n x 1)-array with lower bounds.
x_max : (np.ndarray) (n x 1)-array with upper bounds.
x_type: (np.ndarray) (n x 1)-list with variables' type ('c': continuous or 'd': discrete).
"""
# set bounds
self.x_min = x_min
self.x_max = x_max
self.x_type = x_type
# getters
@property
def x_min(self):
return self._x_min
@property
def x_max(self):
return self._x_max
@property
def x_type(self):
return self._x_type
# setters
@x_min.setter
def x_min(self, x_lb):
"""Setter of x_min.
Args:
x_lb: (n x 1)-numpy array
"""
# check numpy
if not isinstance(x_lb, np.ndarray):
raise ValueError("x_min must be a numpy array!")
# check dimension
if not x_lb.shape[1]:
raise ValueError("x_min must be a (n x 1)-numpy array!")
# check consistency
if self._x_min is not None:
n = self._x_min.shape[0]
if n != x_lb.shape[0] and n > 0:
raise ValueError("x_min must be a ({} x 1)-numpy array!".format(n))
# set
self._x_min = x_lb
@x_max.setter
def x_max(self, x_ub):
"""Setter of x_max.
Args:
x_ub: (n x 1)-numpy array
"""
# check numpy
if not isinstance(x_ub, np.ndarray):
raise ValueError("x_max must be a numpy array!")
# check dimension
if not x_ub.shape[1]:
raise ValueError("x_max must be a (n x 1)-numpy array!")
# check dimension consistency
n = self._x_min.shape[0]
if n != x_ub.shape[0] and n > 0:
raise ValueError("x_max must be a ({} x 1)-numpy array!".format(n))
# check range consistency
if np.any((x_ub - self._x_min) < 0):
raise ValueError("x_max must be greater than or equal x_min!")
# set
self._x_max = x_ub
@x_type.setter
def x_type(self, x_type):
"""Setter of x_min.
Args:
x_type: (n )-list
"""
if x_type is not None:
# check numpy
if not isinstance(x_type, list):
raise ValueError("x_type must be a list!")
# check consistency
n = self._x_min.shape[0]
if n != len(x_type) and n > 0:
raise ValueError("x_type must be a list of {} elements!".format(n))
# check values
if (x_type.count('c') + x_type.count('d')) != n:
raise ValueError("x_type must be either 'c' or 'd'.")
self._x_type = x_type
else:
self.x_type = ['c'] * self.x_min.shape[0]
def dimension(self):
"""Return variable dimension."""
return self.x_min.shape[0] | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/builder/variable.py | 0.912089 | 0.510802 | variable.py | pypi |
from science_optimization.solvers.pareto_samplers import BaseParetoSamplers
from science_optimization.solvers import OptimizationResults
from science_optimization.builder import OptimizationProblem
from science_optimization.function import GenericFunction, LinearFunction
from typing import Any
import numpy as np
from copy import deepcopy
class LambdaSampler(BaseParetoSamplers):
"""p-lambda Pareto front sampler."""
def __init__(self,
optimization_problem: OptimizationProblem,
algorithm: Any = None,
n_samples: int = None):
"""Constructor of optimizer class.
Args:
optimization_problem: (OptimizationProblem) optimization problem instance.
algorithm : (Any) an algorithm instance.
n_samples : (int) number os samples.
"""
# instantiate super class
super().__init__(optimization_problem, algorithm, n_samples)
def sample_aux(self) -> OptimizationResults:
""" p-lambda sampler.
Returns:
output: (OptimizationResults) optimization results.
"""
# cardinalities
n = self.optimization_problem.variables.dimension()
o = self.optimization_problem.objective.objectives.n_functions
# verify
if self.optimization_problem.objective.objectives.n_functions != 2:
raise ValueError("Sampler only implemented for bi-objective optimization problems.")
# generate lambda values from [0, 1]
l = np.linspace(0, 1, self.n_samples) # remove vertices
# sample
x = np.zeros((n, 0))
fx = np.zeros((o, 0))
for k in range(self.n_samples):
# p-lambda optimization problem
op = self.op_lambda(l[k])
# optimize
o = self.algorithm.optimize(optimization_problem=op, debug=False)
x = np.hstack((x, o.x))
fx = np.hstack((fx, self.optimization_problem.objective.objectives.eval(o.x)))
# output
output = OptimizationResults()
output.x = x
output.fx = fx
return output
def op_lambda(self, l):
""" Builds a p-lambda optimization problem.
Args:
l : used in the weighted sum of two objectives.
Returns:
op: optimization problem.
"""
# copy of optimization problem
op = deepcopy(self.optimization_problem)
obj = deepcopy(self.optimization_problem.objective)
# nonparametric functions
w = np.array([[1-l, l]])
if not obj.objectives.is_linear():
def fo(x):
return obj.objectives.eval(x, composition='series', weights=w)
# delete original objectives and evaluate
op.objective.objectives.clear() # delete functions
op.objective.objectives.add(GenericFunction(func=fo, n=op.variables.dimension()))
else:
# new objective parameters
c = w @ obj.C()
# delete original objectives and evaluate
op.objective.objectives.clear() # delete functions
op.objective.objectives.add(LinearFunction(c=c.T))
return op | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/solvers/pareto_samplers/lambda_sampler.py | 0.942593 | 0.587174 | lambda_sampler.py | pypi |
import numpy as np
from science_optimization.builder import OptimizationProblem
from science_optimization.function import GenericFunction
from science_optimization.solvers import Optimizer
from science_optimization.problems import SeparableResourceAllocation
from science_optimization.algorithms.decomposition import DualDecomposition
def decomposition_example():
"""Decomposition problem example.
Solve problem:
min f_1(x_1) + f_2(x_2), f_i(x_i) = e^(-2*x_i)
s.t. x_1 + x_2 - 10 <= 0
2 <= x_i <= 6
"""
# dimension
n = 2
# objective functions
def f_1(x):
return np.exp(-2*x[0, :]) + 0 * x[1, :]
def f_2(x):
return np.exp(-2*x[1, :]) + 0 * x[0, :]
# inequality constraints functions
def g_1(x):
return x[0, :] - 10
def g_2(x):
return x[1, :]
# input lists
f_i = [GenericFunction(func=f_1, n=n), GenericFunction(func=f_2, n=n)] # f_i list
g_i = [GenericFunction(func=g_1, n=n), GenericFunction(func=g_2, n=n)] # g_i list
# bounds
x_min = np.array([2, 2]).reshape(-1, 1) # lower
x_max = np.array([6, 6]).reshape(-1, 1) # upper
x_bounds = np.hstack((x_min, x_max))
# build generic problem instance
generic = OptimizationProblem(builder=SeparableResourceAllocation(f_i=f_i,
coupling_eq_constraints=[],
coupling_ineq_constraints=g_i,
x_bounds=x_bounds
))
# starting point
x0 = np.array([0, 0]).reshape(-1, 1)
# builder optimization
optimizer = Optimizer(opt_problem=generic, algorithm=DualDecomposition(x0=x0))
results = optimizer.optimize()
# result
results.info()
if __name__ == "__main__":
# run example
decomposition_example() | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/examples/decomposition_example.py | 0.779196 | 0.557243 | decomposition_example.py | pypi |
import numpy as np
from science_optimization.builder import OptimizationProblem
from science_optimization.function import QuadraticFunction
from science_optimization.solvers.pareto_samplers import NonDominatedSampler, EpsilonSampler, LambdaSampler, MuSampler
from science_optimization.problems import GenericProblem
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def pareto_sampling_cs0(s):
"""Multiobjective problem example.
Args:
s: nondominated_sampler.
"""
# parameters objective function 1
Q = np.array([[1, 0], [0, 1]])
c1 = np.array([[0], [0]])
d1 = np.array([0])
# parameters objective function 2
c2 = np.array([[-2], [-2]])
d2 = np.array([2])
# objectives
f1 = QuadraticFunction(Q=Q, c=c1, d=d1)
f2 = QuadraticFunction(Q=Q, c=c2, d=d2)
f = [f1, f2]
# constraints
ineq_cons = []
eq_cons = []
# bounds
x_min = np.array([-5, -5]).reshape(-1, 1) # lower
x_max = np.array([5, 5]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
# build generic problem instance
generic = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=eq_cons, ineq_cons=ineq_cons, x_bounds=x_lim))
# builder pareto sampler
if s == 0:
sampler = EpsilonSampler(optimization_problem=generic)
elif s == 1:
sampler = NonDominatedSampler(optimization_problem=generic)
elif s == 2:
sampler = MuSampler(optimization_problem=generic)
else:
sampler = LambdaSampler(optimization_problem=generic)
results = sampler.sample()
# contour
delta = 0.02
x = np.arange(-5, 5, delta)
y = np.arange(-5, 5, delta)
X, Y = np.meshgrid(x, y)
XY = np.vstack((X.reshape(1, -1), Y.reshape(1, -1)))
f1eval = np.reshape(f1.eval(XY), X.shape)
f2eval = np.reshape(f2.eval(XY), X.shape)
# contour plot of individual functions
fig, ax = plt.subplots()
ax.contour(X, Y, f1eval, 17, colors='k', linewidths=.8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# contour plot of individual functions
fig, ax = plt.subplots()
ax.contour(X, Y, f2eval, 17, colors='k', linewidths=.8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# contour plot of functions and solution
fig, ax = plt.subplots()
ax.contour(X, Y, f1eval, 17, colors='k', linewidths=.8)
ax.contour(X, Y, f2eval, 17, colors='r', linewidths=.8)
plt.scatter(results.x[0, :], results.x[1, :], s=8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# pareto front plot
plt.figure()
plt.scatter(results.fx[0, :], results.fx[1, :], s=8)
plt.xlabel(r'$f_1$')
plt.ylabel(r'$f_2$')
plt.show()
if __name__ == "__main__":
# run example
pareto_sampling_cs0(s=2) | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/examples/pareto_sampling_cs0.py | 0.847858 | 0.60542 | pareto_sampling_cs0.py | pypi |
import numpy as np
from science_optimization.solvers import Optimizer
from science_optimization.builder import OptimizationProblem
from science_optimization.function import GenericFunction
from science_optimization.problems import Quadratic, GenericProblem
from science_optimization.algorithms.derivative_free import NelderMead
def generate_grid(x_min, x_max, n):
coords = []
for i in range(n):
coords.append(np.arange(x_min[i][0], x_max[i][0]+1, 5))
g = np.meshgrid(*coords)
for i in range(n):
coords[i] = g[i].reshape((np.prod(g[i].shape), )).reshape(-1, 1)
return np.hstack(coords)
def quadratic(Q, c, d):
# bounds
x_min = np.array([-10, -10]).reshape(-1, 1) # lower bound
x_max = np.array([10, 10]).reshape(-1, 1) # upper bound
x_bounds = np.hstack((x_min, x_max))
# builder quadratic problem instance
quadratic = OptimizationProblem(builder=Quadratic(Q=Q, c=c, d=d, x_bounds=x_bounds))
# builder optimization
x0 = np.array([[5], [6]])
delta_r = 1.0
delta_e = 2.0
delta_ic = 0.5
delta_oc = 0.5
delta_s = 0.5
optimizer = Optimizer(
opt_problem=quadratic,
algorithm=NelderMead(x0, delta_r, delta_e, delta_ic, delta_oc, delta_s)
)
results = optimizer.optimize()
# result
results.info()
def generic_fun(f, x0, x_lim, ineq_cons, eq_cons):
delta_r = 1.0
delta_e = 2.0
delta_ic = 0.5
delta_oc = 0.5
delta_s = 0.5
generic = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=eq_cons, ineq_cons=ineq_cons, x_bounds=x_lim))
optimizer = Optimizer(
opt_problem=generic,
algorithm=NelderMead(x0, delta_r, delta_e, delta_ic, delta_oc, delta_s)
)
optimizer.algorithm.n_max = 500
results = optimizer.optimize(debug=True)
results.info()
return results
def get_bm_1_problem(n):
def obj_func(x):
a = [10 for i in range(n)]
b = [100 for i in range(n)]
s = 0
for i in range(n):
s += a[i] * np.abs(x[i][0] / b[i])
return s
def c_1(x):
c = 4
s = 0
for i in range(n):
s += np.power(x[i][0], 3)
s -= c
return s
def c_2(x):
d = 1 / np.pi
s = 0
for i in range(n):
s += np.power(-1 + x[i][0], 2)
s -= d
return s
def c_3(x):
d = 3
m = 100
s = 0
for i in range(n):
s += x[i][0]
s = d - m * np.sqrt(s)
return s
x_min = np.full((n, 1), -10) # lower
x_max = np.full((n, 1), 10) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=obj_func, n=n)]
ineq_cons = [
GenericFunction(func=c_1, n=n),
GenericFunction(func=c_2, n=n),
GenericFunction(func=c_3, n=n)
]
eq_cons = []
return f, ineq_cons, eq_cons, x_min, x_max, x_lim
def get_bm_2_problem(n):
def obj_func(x):
s = 1
for i in range(n):
s *= x[i][0]
s *= -1 * np.power(np.sqrt(n), n)
return s
def c_1(x):
s = 0
for i in range(n):
s += x[i][0]
return s - 1
return obj_func, c_1
def get_bm_3_problem():
def obj_func(x):
s = np.sum(x[0:4, ])
s -= np.sum(np.power(x[0:4, ], 2))
s -= np.sum(x[4:13, ])
return s
def c_1(x):
return 2*x[0][0] + 2*x[1][0] + x[9][0] + x[10][0] - 10
def c_2(x):
return 2*x[0][0] + 2*x[2][0] + x[9][0] + x[11][0] - 10
def c_3(x):
return 2*x[0][0] + 2*x[2][0] + x[10][0] + x[11][0] - 10
def c_4(x):
return -8 * x[0][0] + x[9][0]
def c_5(x):
return -8 * x[1][0] + x[10][0]
def c_6(x):
return -8 * x[2][0] + x[11][0]
def c_7(x):
return -2 * x[3][0] - x[4][0] + x[9][0]
def c_8(x):
return -2 * x[5][0] - x[6][0] + x[10][0]
def c_9(x):
return -2 * x[7][0] - x[8][0] + x[11][0]
x_min = np.zeros((13, 1))
x_max = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 100, 100, 100, 1]).reshape(-1, 1)
x_bounds = np.hstack((x_min, x_max))
x0 = np.array([.5, .5, .5, .5, .5, .5, .5, .5, .5, 3, 3, 3, .5]).reshape(-1, 1)
f = [GenericFunction(obj_func, 13)]
ineq_cons = [
GenericFunction(func=c_1, n=13),
GenericFunction(func=c_2, n=13),
GenericFunction(func=c_3, n=13),
GenericFunction(func=c_4, n=13),
GenericFunction(func=c_5, n=13),
GenericFunction(func=c_6, n=13),
GenericFunction(func=c_7, n=13),
GenericFunction(func=c_8, n=13),
GenericFunction(func=c_9, n=13)
]
eq_cons = []
return x0, x_bounds, f, ineq_cons, eq_cons
def get_bm_4_problem():
def obj_func(x):
a = np.sum(np.power(np.cos(x), 4))
b = np.prod(np.power(np.cos(x), 2))
c = np.sqrt(np.sum(np.arange(1, 21).reshape(-1, 1) * np.power(x, 2)))
s = np.abs((a - 2*b)/c)
return s
def c_1(x):
return 0.75 - np.prod(x)
def c_2(x):
return np.sum(x) - 7.5 * x.shape[0]
x_min = np.zeros((20, 1))
x_max = np.full((20, 1), 10)
x_bounds = np.hstack((x_min, x_max))
x0 = np.full((20, 1), 5)
f = [GenericFunction(func=obj_func, n=20)]
ineq_cons = [
GenericFunction(func=c_1, n=20),
GenericFunction(func=c_2, n=20)
]
eq_cons = []
return x0, x_bounds, f, ineq_cons, eq_cons
def get_bm_5_problem():
def obj_func(x):
return np.abs(np.power(x[0][0], 2) + np.power(x[1][0], 2)) + np.abs(np.sin(x[0][0])) + np.abs(np.cos(x[1][0]))
def c_1(x):
c = 4
s = 0
for i in range(2):
s += np.power(x[i][0], 3)
s -= c
return s
def c_2(x):
d = 1 / np.pi
s = 0
for i in range(2):
s += np.power(-1 + x[i][0], 2)
s -= d
return s
def c_3(x):
d = 3
m = 100
s = 0
for i in range(2):
s += x[i][0]
s = d - m * np.sqrt(s)
return s
# bounds
x_min = np.array([-10, -10]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=obj_func, n=2)]
ineq_cons = [
GenericFunction(func=c_1, n=2),
GenericFunction(func=c_2, n=2),
GenericFunction(func=c_3, n=2)
]
eq_cons = []
x0 = np.array([[5.0], [1.0]])
return x0, x_lim, f, ineq_cons, eq_cons
def neldermead_example(problem=1):
"""
Args:
problem:
Returns:
"""
np.set_printoptions(precision=9, suppress=True)
if problem == 1:
# Problem: (x[0]-1)^2 + 4.0*x[1]^2
Q = np.array([[1, 0], [0, 4]])
c = np.array([-2, 0]).reshape(-1, 1)
d = 1
quadratic(Q, c, d)
elif problem == 2:
# Problem: x[0]^2 + 3.0*x[1]^2
Q = np.array([[1, 0], [0, 3]])
c = np.array([0, 0]).reshape(-1, 1)
d = 0
quadratic(Q, c, d)
elif problem == 3:
def f_obj(x): return np.max(np.abs(x * (.5 + 1e-2) - .5 * np.sin(x) * np.cos(x)), axis=0)
# bounds
x_min = np.array([-5, -5]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=f_obj, n=2)]
ineq_cons = []
eq_cons = []
x0 = np.array([[2], [2]])
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 4:
def f_obj(x): return x[0][0]*x[0][0] + x[1][0]*x[1][0] - x[0][0]*x[1][0]
# bounds
x_min = np.array([-5, -5]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=f_obj, n=2)]
ineq_cons = []
eq_cons = []
x0 = np.array([[2], [2]])
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 5:
def f_obj(x): return 200 * x[0][0]*x[0][0] + x[1][0]*x[1][0]
# bounds
x_min = np.array([-10, -10]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=f_obj, n=2)]
ineq_cons = []
eq_cons = []
x0 = np.array([[10], [10]])
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 6:
def f_obj(x): return 100 * np.square((x[1][0] - np.square(x[0][0]))) + np.square(1 - x[0][0])
# bounds
x_min = np.array([-5, -5]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=f_obj, n=2)]
ineq_cons = []
eq_cons = []
x0 = np.array([[-2], [1]])
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 7:
def f_obj(x):
return np.square(x[0][0] + 10 * x[1][0]) + 5 * np.square(x[2][0] - x[3][0]) + \
np.power((x[1][0] - 2 * x[2][0]), 4) + 10 * np.power(x[0][0] - x[3][0], 4)
# bounds
x_min = np.array([-5, -5, -5, -5]).reshape(-1, 1) # lower
x_max = np.array([10, 10, 10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=f_obj, n=4)]
ineq_cons = []
eq_cons = []
x0 = np.array([[3], [-1], [0], [1]])
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 8:
n = 5
f, ineq_cons, eq_cons, x_min, x_max, x_lim = get_bm_1_problem(n)
x0 = np.full((n, 1), 1.0)
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 9:
x0, x_lim, obj_func, ineq_cons, eq_cons = get_bm_3_problem()
generic_fun(obj_func, x0, x_lim, ineq_cons, eq_cons)
elif problem == 10:
x0, x_lim, obj_func, ineq_cons, eq_cons = get_bm_4_problem()
generic_fun(obj_func, x0, x_lim, ineq_cons, eq_cons)
elif problem == 11:
x0, x_bounds, f, ineq_cons, eq_cons = get_bm_5_problem()
generic_fun(f, x0, x_bounds, ineq_cons, eq_cons)
else:
raise Warning("Undefined problem example.")
if __name__ == '__main__':
neldermead_example(problem=1) | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/examples/neldermead_example.py | 0.687945 | 0.544922 | neldermead_example.py | pypi |
import numpy as np
from science_optimization.solvers import Optimizer
from science_optimization.builder import OptimizationProblem
from science_optimization.function import GenericFunction
from science_optimization.problems import Quadratic, GenericProblem
from science_optimization.algorithms.derivative_free import NelderMead
def generic_fun(f, x0, x_lim, ineq_cons, eq_cons):
delta_r = 1.0
delta_e = 2.0
delta_ic = 0.5
delta_oc = 0.5
delta_s = 0.5
generic = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=eq_cons, ineq_cons=ineq_cons, x_bounds=x_lim))
optimizer = Optimizer(
opt_problem=generic,
algorithm=NelderMead(x0, delta_r, delta_e, delta_ic, delta_oc, delta_s)
)
optimizer.algorithm.n_max = 2000
results = optimizer.optimize(debug=False)
# results.info()
return results
def generate_points(x_min, x_max, dim, n=30):
points = []
for i in range(n):
p = x_min + np.random.random_sample((dim, 1)) * (x_max - x_min)
points.append(p)
return points
def get_bm_1_problem(n):
def obj_func(x):
a = [10 for i in range(n)]
b = [100 for i in range(n)]
s = 0
for i in range(n):
s += a[i] * np.abs(x[i][0] / b[i])
return s
def c_1(x):
c = 4
s = 0
for i in range(n):
s += np.power(x[i][0], 3)
s -= c
return s
def c_2(x):
d = 1 / np.pi
s = 0
for i in range(n):
s += np.power(-1 + x[i][0], 2)
s -= d
return s
def c_3(x):
d = 3
m = 100
s = 0
for i in range(n):
s += x[i][0]
s = d - m * np.sqrt(s)
return s
x_min = np.full((n, 1), -10) # lower
x_max = np.full((n, 1), 10) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=obj_func, n=n)]
ineq_cons = [
GenericFunction(func=c_1, n=n),
GenericFunction(func=c_2, n=n),
GenericFunction(func=c_3, n=n)
]
eq_cons = []
return f, ineq_cons, eq_cons, x_min, x_max, x_lim
def write_x0_result(dim, x0, fx, n_evals, stop_crit):
with open(str(dim) + "_dim_x0_results.txt", "a+") as fp:
fp.write(str(x0.T[0].tolist()) + "\t" + str(fx) + "\t" + str(n_evals) + stop_crit)
fp.write("\n")
def write_dim_result(dim, fx_min, fx_median, fx_std, fx_max, n_evals_mean):
with open("results.txt", "a+") as fp:
fp.write(
str(dim) + "\t" +
str(fx_min) + "\t" +
str(fx_median) + "\t" +
str(fx_std) + "\t" +
str(fx_max) + "\t" +
str(n_evals_mean)
)
fp.write("\n")
def run_tests():
for dim in range(11, 16):
fx = []
n_evals = []
f, ineq_cons, eq_cons, x_min, x_max, x_lim = get_bm_1_problem(dim)
initial_points = generate_points(x_min, x_max, dim, n=30)
for p in range(len(initial_points)):
x0 = initial_points[p].reshape(-1, 1)
results = generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
n_evals.append(results.n_function_evaluations)
fx.append(results.fx)
with open(str(dim) + "_dim_x0_results.txt", "a+") as fp:
fp.write(str(x0.T[0].tolist()) + "\t" + str(results.fx) + "\t" + str(results.n_function_evaluations))
fp.write("\n")
# print(x0.T[0].tolist(), results.fx, results.n_function_evaluations)
fx = np.array(fx)
n_evals = np.array(n_evals)
n_data = [np.min(fx), np.median(fx), np.std(fx), np.max(fx), np.mean(n_evals)]
with open("results.txt", "a+") as fp:
fp.write(
str(dim) + "\t" +
str(np.min(fx)) + "\t" +
str(np.median(fx)) + "\t" +
str(np.std(fx)) + "\t" +
str(np.max(fx)) + "\t" +
str(np.mean(n_evals))
)
fp.write("\n")
print(n_data)
if __name__ == "__main__":
run_tests() | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/examples/neldermead_article_example.py | 0.486819 | 0.462473 | neldermead_article_example.py | pypi |
import numpy as np
from science_optimization.builder import OptimizationProblem
from science_optimization.function import QuadraticFunction
from science_optimization.function import GenericFunction
from science_optimization.solvers.pareto_samplers import NonDominatedSampler, EpsilonSampler, LambdaSampler, MuSampler
from science_optimization.problems import GenericProblem
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def pareto_sampling_cs1(s):
"""Multiobjective problem example.
Args:
s: nondominated_sampler.
"""
# objective function 1
def f_obj1(x): return np.max(np.abs(x * (.5 + 1e-2) - .5 * np.sin(x) * np.cos(x)), axis=0)
# parameters objective function 2
Q = np.array([[10, 9], [9, 10]])
c = np.array([[-90], [-100]])
d = np.array([250])
# objectives
f1 = GenericFunction(func=f_obj1, n=2)
f2 = QuadraticFunction(Q=Q, c=c, d=d)
f = [f1, f2]
# constraints
ineq_cons = []
eq_cons = []
# bounds
x_min = np.array([-5, -5]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
# build generic problem instance
generic = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=eq_cons, ineq_cons=ineq_cons, x_bounds=x_lim))
# builder sampler
if s == 0:
sampler = EpsilonSampler(optimization_problem=generic, n_samples=13)
elif s == 1:
sampler = NonDominatedSampler(optimization_problem=generic, n_samples=13)
elif s == 2:
sampler = MuSampler(optimization_problem=generic, n_samples=13)
else:
sampler = LambdaSampler(optimization_problem=generic, n_samples=13)
results = sampler.sample()
# contour
delta = 0.02
x = np.arange(-5, 10, delta)
y = np.arange(-5, 10, delta)
X, Y = np.meshgrid(x, y)
XY = np.vstack((X.reshape(1, -1), Y.reshape(1, -1)))
f1eval = np.reshape(f_obj1(XY), X.shape)
f2eval = np.reshape(f2.eval(XY), X.shape)
# contour plot of individual functions
fig, ax = plt.subplots()
ax.contour(X, Y, f1eval, 17, colors='k', linewidths=.8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax.yaxis.set_major_locator(ticker.MultipleLocator(5))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# contour plot of individual functions
fig, ax = plt.subplots()
ax.contour(X, Y, f2eval, 17, colors='k', linewidths=.8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax.yaxis.set_major_locator(ticker.MultipleLocator(5))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# contour plot of functions and solution
fig, ax = plt.subplots()
ax.contour(X, Y, f1eval, 17, colors='k', linewidths=.8)
ax.contour(X, Y, f2eval, 17, colors='r', linewidths=.8)
plt.scatter(results.x[0, :], results.x[1, :], s=8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax.yaxis.set_major_locator(ticker.MultipleLocator(5))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# pareto front plot
plt.figure()
plt.scatter(results.fx[0, :], results.fx[1, :], s=8)
plt.xlabel(r'$f_1$')
plt.ylabel(r'$f_2$')
plt.show()
if __name__ == "__main__":
# run example
s = 1
pareto_sampling_cs1(s) | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/examples/pareto_sampling_cs1.py | 0.832169 | 0.574335 | pareto_sampling_cs1.py | pypi |
import numpy as np
from science_optimization.builder import OptimizationProblem
from science_optimization.function import QuadraticFunction
from science_optimization.solvers import Optimizer
from science_optimization.problems import GenericProblem
from science_optimization.algorithms.cutting_plane import EllipsoidMethod
def multiobjective_example():
"""Multiobjective problem example.
"""
# objective functions
xf = np.array([1, 1, 1]).reshape(-1, 1)
Af = 2 * np.identity(3)
bf = -np.matmul(Af, xf)
cf = .5 * np.matmul(np.transpose(xf), np.matmul(Af, xf))
xf2 = np.array([-1, -1, -1]).reshape(-1, 1)
Af2 = np.diag([1, 2, 4])
bf2 = -np.matmul(Af2, xf2)
cf2 = .5 * np.matmul(np.transpose(xf2), np.matmul(Af2, xf2))
f = [QuadraticFunction(Q=.5*Af, c=bf, d=cf), QuadraticFunction(Q=.5*Af2, c=bf2, d=cf2)]
# inequality constraints
Ag = 2 * np.identity(3)
bg = np.zeros((3, 1))
cg = -1
xg2 = np.array([1, 1, 1]).reshape(-1, 1)
Ag2 = 2 * np.identity(3)
bg2 = -np.matmul(Ag2, xg2)
cg2 = .5 * np.matmul(np.transpose(xg2), np.matmul(Ag2, xg2)) - 1
ineq_cons = [QuadraticFunction(Q=.5*Ag, c=bg, d=cg), QuadraticFunction(Q=.5*Ag2, c=bg2, d=cg2)]
# equality constraints
eq_cons = []
# bounds
x_min = np.array([-10, -10, -10]).reshape(-1, 1) # lower
x_max = np.array([10, 10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
# build generic problem instance
generic = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=eq_cons, ineq_cons=ineq_cons, x_bounds=x_lim))
# starting point
x0 = np.array([20, 20, 20]).reshape(-1, 1)
# cut option
shallow_cut = 0
# builder optimization
optimizer = Optimizer(opt_problem=generic, algorithm=EllipsoidMethod(x0=x0, shallow_cut=shallow_cut))
results = optimizer.optimize(debug=True, n_step=5)
# result
results.info()
if __name__ == "__main__":
# run example
multiobjective_example() | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/examples/multiobjective_example.py | 0.770206 | 0.634656 | multiobjective_example.py | pypi |
import numpy as np
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.builder import OptimizationProblem
from science_optimization.problems import GenericProblem
from science_optimization.function import GenericFunction, FunctionsComposite
from science_optimization.solvers import OptimizationResults
from science_optimization.algorithms.unidimensional import GoldenSection
import copy
class DualDecomposition(BaseAlgorithms):
"""Dual decomposition method.
"""
# attributes
_x0 = None
def __init__(self,
x0: np.ndarray=np.array([[]]).reshape(-1, 1),
n_max: int=None,
eps: float=None):
"""Dual decomposition method constructor.
Args:
x0 : (np.ndarray) initial point
n_max: (int) maximum number of iterations for stop criterion
eps : (float) maximum uncertainty for stop criterion
"""
# parameters
self.x0 = 1.0 * x0
if n_max is not None:
self.n_max = n_max
if eps is not None:
self.eps = eps
# getters
@property
def x0(self):
return self._x0
# setters
@x0.setter
def x0(self, x0):
if x0.shape[1] == 1:
self._x0 = x0
else:
raise ValueError("Initial point must be a column vector.")
def optimize(self, optimization_problem, debug=False, n_step=5):
"""Optimization core of Decomposition method.
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
# optimization parameters
f = optimization_problem.objective.objectives
x_bounds = np.hstack((optimization_problem.variables.x_min, optimization_problem.variables.x_max))
# check whether inequality of inequality
if not optimization_problem.constraints.inequality_constraints.functions:
g = optimization_problem.constraints.equality_constraints
constraint_type = 1
else:
g = optimization_problem.constraints.inequality_constraints
constraint_type = 0
# instantiate sub-problem and its solver
sp_solver = GoldenSection(eps=self.eps, n_max=int(self.n_max / 2))
sp = OptimizationProblem(builder=GenericProblem(f=[GenericFunction(func=lambda: 1, n=1)],
eq_cons=[],
ineq_cons=[],
x_bounds=np.zeros((0, 2))))
# solve master problem evaluator
def f_master(n):
return -self.master_eval(f=f, g=g, nu=n, x_bounds=x_bounds, op=sp, solver=sp_solver)[1]
# master problem bounds (nu bounds)
if constraint_type:
# equality constraint
x_bounds_master = np.array([[-self.eps**-1, self.eps**-1]])
else:
# inequality constraint
x_bounds_master = np.array([[0, self.eps**-1]])
# optimization parameters
nu = 1.
x = self.x0
k = 0
k_max = int(self.n_max / 10)
stop = False
# master problem and solver
mp = OptimizationProblem(builder=GenericProblem(f=[GenericFunction(func=f_master, n=1)],
eq_cons=[],
ineq_cons=[],
x_bounds=x_bounds_master))
# main loop
mp_solver = GoldenSection(eps=self.eps, n_max=self.n_max)
results = OptimizationResults()
while not stop and k < k_max:
# run algorithm
output = mp_solver.optimize(optimization_problem=mp, debug=False)
# new price (nu)
nu_new = output.x
nu_diff = np.abs(nu - nu_new)
nu = copy.copy(nu_new)
# evaluate master problem
x, fx, gx = self.master_eval(f=f, g=g, nu=nu, x_bounds=x_bounds, op=sp, solver=sp_solver)
# update nu: bounds of master problem
h = 2
x_lb = nu-h*np.abs(nu) if constraint_type else np.maximum(0, nu-h*np.abs(nu))
x_bounds_master = np.array([[x_lb, nu+h*np.abs(nu)]])
# update problem bounds
mp.variables.x_min = x_bounds_master[:, 0].reshape(-1, 1)
mp.variables.x_max = x_bounds_master[:, 1].reshape(-1, 1)
# stop criteria
stop = (np.abs(gx) < self.eps and constraint_type) or (np.abs(nu) < self.eps) or \
(np.diff(x_bounds_master) < self.eps) or (nu_diff < self.eps and k > 0)
# update counter
k += 1
# output
results.x = x
results.fx = f.eval(x)
results.parameter = {'nu': nu}
results.n_iterations = k
return results
@staticmethod
def master_eval(f: FunctionsComposite,
g: FunctionsComposite,
nu: float,
x_bounds: np.ndarray,
op: OptimizationProblem,
solver: GoldenSection):
""" Evaluates master problem.
Args:
f : (FunctionsComposite) objective functions.
g : (FunctionsComposite) constraints.
nu : (float) allocation factor.
x_bounds: (np.ndarray) bounds.
op : (OptimizationProblem) optimization problem.
solver : (GoldenSection) algorithm solver
Returns:
x : (np.ndarray) sub-problems' solution.
fx_master: (np.ndarray) objective evaluation at x.
gx : (np.ndarray) constraint evaluation at x.
"""
# build and solve sub-problems
n = x_bounds.shape[0] # number of variables
x_out = np.zeros((n, 1))
# build generic problem instance
for i in range(f.n_functions):
# sub-problem
def f_i(x):
y = np.zeros((n, 1))
y[i, :] = x
return f.functions[i].eval(y) + nu * g.functions[i].eval(y)
# update problem objective
op.objective.objectives.remove()
op.objective.objectives.add(GenericFunction(func=f_i, n=1))
# update problem bounds
op.variables.x_min = x_bounds[i, 0].reshape(-1, 1)
op.variables.x_max = x_bounds[i, 1].reshape(-1, 1)
output = solver.optimize(optimization_problem=op, debug=False)
x_out[i, 0] = output.x
# master eval
gx = g.eval(x_out, composition='series')
fx_master = f.eval(x_out, composition='series') + nu * gx
return x_out, fx_master, gx | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/algorithms/decomposition/dual_decomposition.py | 0.889042 | 0.45641 | dual_decomposition.py | pypi |
import numpy as np
from science_optimization.algorithms.derivative_free import NelderMead
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.algorithms.search_direction import QuasiNewton, GradientAlgorithm, NewtonAlgorithm
from science_optimization.builder import OptimizationProblem
from science_optimization.function.lagrange_function import AugmentedLagrangeFunction
from science_optimization.problems import GenericProblem
from science_optimization.solvers import OptimizationResults
from typing import Tuple, Any
class AugmentedLagrangian(BaseAlgorithms):
"""
Augmented Lagrangian algorithm
"""
def __init__(self,
x0: np.ndarray,
n_max: int = None,
eps: float = None,
randx: bool = False,
algorithm: Any = None,
c: float = 1.1):
"""Algorithm constructor.
Args:
x0 : (np.ndarray) initial point
n_max: (int) maximum number of iterations for stop criterion
eps : (float) maximum uncertainty for stop criterion
randx: (bool) True to use a different initial point in each Lagrangian iteration
alg_choose: (int) chooses the method to solve the unconstrained problem
(0 -> Quasi Newton (BFGS) / 1 -> Gradient method / 2 -> Newton method / 3 -> Nelder Mead)
c: (float) parameter used to update the rho value
"""
# parameters
self.x0 = x0
if n_max is not None:
self.n_max = n_max
if eps is not None:
self.eps = eps
self.randx = randx
if algorithm is not None:
self.algorithm = algorithm
else:
self.algorithm = QuasiNewton(x0=x0)
if c <= 1:
raise Exception('Invalid value, must be greater than one')
self.c = c
# getters
@property
def algorithm(self):
return self._algorithm
@algorithm.setter
def algorithm(self, algorithm):
# verify instances
if issubclass(type(algorithm), QuasiNewton) or issubclass(type(algorithm), GradientAlgorithm) \
or issubclass(type(algorithm), NewtonAlgorithm) or issubclass(type(algorithm), NelderMead):
self._algorithm = algorithm
else:
raise Warning("Invalid algorithm, must solve constrained problems")
def optimize(self, optimization_problem, debug=False, n_step=5):
"""Optimization core of Augmented Lagrangian method
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
optimization_results = OptimizationResults()
optimization_results.message = 'Stop by maximum number of iterations.'
f_obj = optimization_problem.objective.objectives
x_bounds = np.hstack((optimization_problem.variables.x_min, optimization_problem.variables.x_max))
n = len(optimization_problem.variables.x_type)
h = optimization_problem.constraints.equality_constraints
g = optimization_problem.constraints.inequality_constraints
x0 = self.x0
la_function = AugmentedLagrangeFunction(f_obj=f_obj, g=g, h=h, rho=1, c=self.c)
# only parameter that changes through the iterations is f
op_generic = OptimizationProblem(builder=GenericProblem(f=[la_function],
eq_cons=[], ineq_cons=[], x_bounds=x_bounds))
stop_criteria = False
k = 0
prev_x = x0
x_hist = np.array(x0)
f_hist = [f_obj.eval(x0)]
while k < self.n_max and not stop_criteria:
self.algorithm.x0 = x0
results = self.algorithm.optimize(optimization_problem=op_generic, debug=False)
x_new = results.x
if debug:
x_hist = np.hstack((x_hist, x_new))
f_hist.append(results.fx)
# update Lagrange multipliers
la_function.update_multipliers(x_new)
k += 1
if np.linalg.norm(x_new - prev_x) < self.eps:
optimization_results.message = 'Stop by unchanged x value.'
stop_criteria = True
prev_x = x_new
if self.randx:
x0 = np.random.uniform(x_bounds[:, 0], x_bounds[:, 1], (1, n)).transpose()
else:
x0 = x_new
if debug:
optimization_results.x = x_hist
optimization_results.fx = np.array(f_hist)
else:
optimization_results.x = prev_x
optimization_results.fx = f_obj.eval(prev_x)
optimization_results.n_iterations = k
optimization_results.parameter = {'lambda': la_function.lag_eq, 'mu': la_function.lag_ineq}
return optimization_results | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/algorithms/lagrange/augmented_lagrangian.py | 0.910468 | 0.551151 | augmented_lagrangian.py | pypi |
import copy
import numpy as np
from science_optimization.algorithms.utils import box_constraints
from science_optimization.solvers import OptimizationResults
from science_optimization.builder import OptimizationProblem
from science_optimization.function import BaseFunction
from science_optimization.algorithms import BaseAlgorithms
class NelderMead(BaseAlgorithms):
"""
Nelder-Mead simplex algorithm to minimize derivative-free non-linear functions.
"""
# starting point
_x0 = None
_x_min = None
_x_max = None
_x_bounds = None
_x_min_norm = None
_x_max_norm = None
# problem dimensio
_dim = None
# function
_f = None
# constraint
_g = None
# function values
_fx = None
_gx = None
# algorithm constants
_delta_r = None
_delta_e = None
_delta_ic = None
_delta_oc = None
_delta_s = None
# simplex point lists
_simplex = None
def __init__(self, x0, delta_r=1.0, delta_e=2.0, delta_ic=0.5, delta_oc=0.5, delta_s=0.5):
"""
Args:
x0:
delta_r:
delta_e:
delta_ic:
delta_oc:
delta_s:
"""
self.x0 = x0
self.dim = x0.shape[0]
self.x_min_norm = np.zeros((self.dim, 1))
self.x_max_norm = np.full((self.dim, 1), 100)
self.delta_r = delta_r
self.delta_e = delta_e
self.delta_ic = delta_ic
self.delta_oc = delta_oc
self.delta_s = delta_s
self.simplex = []
self.fx = None
self.gx = None
self.x_min = None
self.x_max = None
self.x_bounds = None
@property
def x0(self):
return self._x0
@property
def x_min(self):
return self._x_min
@property
def x_max(self):
return self._x_max
@property
def x_bounds(self):
return self._x_bounds
@property
def x_min_norm(self):
return self._x_min_norm
@property
def x_max_norm(self):
return self._x_max_norm
@property
def dim(self):
return self._dim
@property
def f(self):
return self._f
@property
def g(self):
return self._g
@property
def fx(self):
return self._fx
@property
def gx(self):
return self._gx
@property
def delta_r(self):
return self._delta_r
@property
def delta_e(self):
return self._delta_e
@property
def delta_ic(self):
return self._delta_ic
@property
def delta_oc(self):
return self._delta_oc
@property
def delta_s(self):
return self._delta_s
@property
def simplex(self):
return self._simplex
@x0.setter
def x0(self, value):
self._x0 = value
@x_min.setter
def x_min(self, value):
self._x_min = value
@x_max.setter
def x_max(self, value):
self._x_max = value
@x_bounds.setter
def x_bounds(self, value):
self._x_bounds = value
@x_min_norm.setter
def x_min_norm(self, value):
self._x_min_norm = value
@x_max_norm.setter
def x_max_norm(self, value):
self._x_max_norm = value
@dim.setter
def dim(self, value):
self._dim = value
@f.setter
def f(self, value):
if not isinstance(value, BaseFunction):
raise Exception("The function must be an instance of BaseFunction!")
self._f = value
@g.setter
def g(self, value):
if not isinstance(value, BaseFunction):
raise Exception("The function must be an instance of BaseFunction!")
self._g = value
@fx.setter
def fx(self, value):
self._fx = value
@gx.setter
def gx(self, value):
self._gx = value
@delta_r.setter
def delta_r(self, value):
self._delta_r = value
@delta_e.setter
def delta_e(self, value):
self._delta_e = value
@delta_ic.setter
def delta_ic(self, value):
self._delta_ic = value
@delta_oc.setter
def delta_oc(self, value):
self._delta_oc = value
@delta_s.setter
def delta_s(self, value):
self._delta_s = value
@simplex.setter
def simplex(self, value):
self._simplex = value
def initialize_fminsearch(self):
"""
Args:
dim:
Returns:
"""
simplex = [self.x0]
for i in range(self.dim):
e_i = np.eye(1, self.dim, i).reshape(self.dim, 1)
h_i = 0.05 if self.x0[i][0] != 0 else 0.00025
simplex.append(box_constraints(self.x0 + h_i * e_i, self.x_bounds))
self.simplex = simplex
def initialize_simplex_size(self, size):
"""
Args:
size:
Returns:
"""
dim = self.dim
simplex = [self.x0]
p = size / (dim * np.sqrt(2))
p = p * ((np.sqrt(dim+1)) + dim - 1)
q = size / (dim * np.sqrt(2))
q = q * ((np.sqrt(dim + 1)) - 1)
e = np.identity(dim)
for i in range(1, dim+1):
point_sum = np.zeros((dim, 1))
p_sign = 1
e[i - 1][i - 1] = 0
for j in range(dim):
if self.x0[j][0] > (self.x_min_norm[j][0] + self.x_max_norm[j][0]) / 2:
point_sum += -1 * q * e[:, j].reshape(dim, 1)
else:
point_sum += q * e[:, j].reshape(dim, 1)
e[i - 1][i - 1] = 1
if self.x0[i - 1][0] > (self.x_min_norm[i - 1][0] + self.x_min_norm[i - 1][0]) / 2:
p_sign = -1
new_point = self.x0 + p_sign * p * e[i - 1].reshape(dim, 1) + point_sum
simplex.append(new_point)
self.simplex = simplex
def centroid(self, xw_index):
"""
Args:
xw_index:
Returns:
"""
simplex = copy.deepcopy(self.simplex)
del(simplex[xw_index])
return np.mean(simplex, axis=0)
def reflect(self, x_centroid, xw_index):
"""
Args:
x_centroid:
Returns:
"""
return x_centroid + self.delta_r * (x_centroid - self.simplex[xw_index])
def expand(self, x_centroid, x_reflect):
"""
Args:
x_centroid:
x_reflect:
Returns:
"""
return x_centroid + self.delta_e * (x_reflect - x_centroid)
def inside_contraction(self, x_centroid, x_reflect):
"""
Args:
x_centroid:
x_reflect:
Returns:
"""
return x_centroid - self.delta_ic * (x_reflect - x_centroid)
def outside_contraction(self, x_centroid, x_reflect):
"""
Args:
x_centroid:
x_reflect:
Returns:
"""
return x_centroid + self.delta_oc * (x_reflect - x_centroid)
def shrink(self, x_best):
"""
Args:
x_best:
Returns:
"""
for j in range(1, len(self.simplex)):
x_new = x_best + self.delta_s * (self.simplex[j] - x_best)
fx_new, gx_new = self.eval_fg(self.norm2real(x_new))
self.replace_point(idx=j, x=x_new, fx=fx_new, gx=gx_new)
def box_feasible(self, x):
"""
Args:
x:
Returns:
"""
return not(any(np.less(x, self.x_min_norm)) or any(np.greater(x, self.x_max_norm)))
@staticmethod
def is_less_than(fx_1, gx_1, fx_2, gx_2):
"""
Args:
fx_1:
gx_1:
fx_2:
gx_2:
Returns:
"""
if gx_1 > 0 and gx_2 > 0:
return gx_1 < gx_2
elif gx_1 <= 0 and gx_2 <= 0:
return fx_1 < fx_2
else:
return gx_1 <= 0
def norm2real(self, x_norm):
"""
Args:
x_norm:
Returns:
"""
x = 0.01 * x_norm
x = (self.x_max - self.x_min) * x
x = x + self.x_min
return x
def real2norm(self, x):
"""
Args:
x:
Returns:
"""
x_norm = (x - self.x_min) / (self.x_max - self.x_min)
x_norm = x_norm * 100
return x_norm
def constraint_sum(self, x):
"""
Args:
x:
Returns:
"""
if self.g is not None:
gx_eval = self.g.eval(x)
return np.sum(gx_eval[np.where(gx_eval > self.eps)])
else:
return 0
def eval_fg(self, x):
"""
Args:
x:
Returns:
"""
fx = self.f.eval(x)
gx = self.constraint_sum(x=x)
return fx, gx
def replace_point(self, idx, x, fx, gx):
"""
Args:
idx:
x:
fx:
gx:
Returns:
"""
self.simplex[idx] = x
self.fx[idx] = fx
self.gx[idx] = gx
def min(self, x, y):
"""
Args:
x:
y:
Returns:
"""
x_real = self.norm2real(x)
y_real = self.norm2real(y)
fx, gx = self.eval_fg(x_real)
fy, gy = self.eval_fg(y_real)
if self.is_less_than(fx, gx, fy, gy):
return x
return y
def sort_simplex(self):
"""
Returns:
"""
index = [x for x in range(len(self.fx))]
gx_fx_idx = [(x, y, z) for x, y, z in zip(self.gx, self.fx, index)]
result = [t[2] for t in sorted(gx_fx_idx)]
return result
def optimize(self, optimization_problem, debug=False, n_step=10):
"""
Args:
optimization_problem:
debug:
n_step:
Returns:
"""
if not isinstance(optimization_problem, OptimizationProblem):
raise Exception("Optimize must have and OptimizationProblem instance as argument!")
if optimization_problem.objective.objectives.n_functions != 1:
raise Exception("Method able to optimize only one function.")
optimization_results = OptimizationResults()
optimization_results.message = 'Stop by maximum number of iterations.'
self.f = optimization_problem.objective.objectives.functions[0]
if optimization_problem.has_inequality_constraints():
self.g = optimization_problem.constraints.inequality_constraints
self.x_min = optimization_problem.variables.x_min
self.x_max = optimization_problem.variables.x_max
self.x_bounds = np.hstack((optimization_problem.variables.x_min, optimization_problem.variables.x_max))
self.x0 = box_constraints(self.x0, self.x_bounds)
self.x0 = self.real2norm(self.x0)
self.initialize_simplex_size(size=10)
self.fx = np.array([self.f.eval(self.norm2real(x)) for x in self.simplex])
optimization_results.n_function_evaluations += len(self.simplex)
if self.g is not None:
gx = []
for x in self.simplex:
gx.append(self.constraint_sum(x=self.norm2real(x)))
self.gx = np.array(gx)
else:
self.gx = np.zeros(len(self.simplex))
index = self.sort_simplex()
b = index[0]
s = index[-2]
w = index[-1]
stop = False
while optimization_results.n_iterations < self.n_max and not stop:
x_c = self.centroid(xw_index=w)
x_r = self.reflect(x_c, w)
x_b = self.simplex[b]
x_s = self.simplex[s]
x_w = self.simplex[w]
fx_b, gx_b = self.eval_fg(self.norm2real(x_b))
fx_s, gx_s = self.eval_fg(self.norm2real(x_s))
fx_w, gx_w = self.eval_fg(self.norm2real(x_w))
optimization_results.n_function_evaluations += 3
if self.box_feasible(x_r):
fx_r, gx_r = self.eval_fg(self.norm2real(x_r))
optimization_results.n_function_evaluations += 1
if self.is_less_than(fx_r, gx_r, fx_b, gx_b):
x_e = self.expand(x_centroid=x_c, x_reflect=x_r)
use_reflection = True
if self.box_feasible(x_e):
fx_e, gx_e = self.eval_fg(self.norm2real(x_e))
optimization_results.n_function_evaluations += 1
if self.is_less_than(fx_e, gx_e, fx_r, gx_r):
self.replace_point(idx=w, x=x_e, fx=fx_e, gx=gx_e)
use_reflection = False
if debug:
print("expansion")
if use_reflection:
self.replace_point(idx=w, x=x_r, fx=fx_r, gx=gx_r)
if debug:
print("reflection e")
elif self.is_less_than(fx_r, gx_r, fx_s, gx_s):
self.replace_point(idx=w, x=x_r, fx=fx_r, gx=gx_r)
if debug:
print("reflection r")
elif self.is_less_than(fx_r, gx_r, fx_w, gx_w):
x_oc = self.outside_contraction(x_centroid=x_c, x_reflect=x_r)
use_reflection = True
if self.box_feasible(x_oc):
fx_oc, gx_oc = self.eval_fg(self.norm2real(x_oc))
optimization_results.n_function_evaluations += 1
if self.is_less_than(fx_oc, gx_oc, fx_r, gx_r):
self.replace_point(idx=w, x=x_oc, fx=fx_oc, gx=gx_oc)
use_reflection = False
if debug:
print("outside contract")
if use_reflection:
self.replace_point(idx=w, x=x_r, fx=fx_r, gx=gx_r)
if debug:
print("reflection oc")
else:
x_ic = self.inside_contraction(x_centroid=x_c, x_reflect=x_r)
use_shrink = True
if self.box_feasible(x_ic):
fx_ic, gx_ic = self.eval_fg(self.norm2real(x_ic))
optimization_results.n_function_evaluations += 1
if self.is_less_than(fx_ic, gx_ic, fx_r, gx_r):
self.replace_point(idx=w, x=x_ic, fx=fx_ic, gx=gx_ic)
use_shrink = False
if debug:
print("inside contract")
if use_shrink:
self.shrink(x_best=x_b)
optimization_results.n_function_evaluations += self.dim
if debug:
print("shrink")
else:
x_oc = self.outside_contraction(x_centroid=x_c, x_reflect=x_r)
x_ic = self.inside_contraction(x_centroid=x_c, x_reflect=x_r)
fx_ic, gx_ic = self.eval_fg(self.norm2real(x_ic))
if debug:
print("xr infeasible")
if self.box_feasible(x_oc):
x_new = self.min(x_oc, self.min(x_ic, x_w))
optimization_results.n_function_evaluations += 4
if not all(np.equal(x_new, x_w)):
fx_new, gx_new = self.eval_fg(x_new)
optimization_results.n_function_evaluations += 1
self.replace_point(idx=w, x=x_new, fx=fx_new, gx=gx_new)
else:
self.shrink(x_best=x_b)
optimization_results.n_function_evaluations += self.dim
elif self.is_less_than(fx_ic, gx_ic, fx_w, gx_w):
self.replace_point(idx=w, x=x_ic, fx=fx_ic, gx=gx_ic)
else:
self.shrink(x_best=x_b)
optimization_results.n_function_evaluations += self.dim
index = self.sort_simplex()
b = index[0]
s = index[-2]
w = index[-1]
x_norms = [np.linalg.norm(x - self.simplex[b], ord=np.inf, axis=0) for x in self.simplex]
if max(x_norms) < self.eps:
optimization_results.message = "Stop by norm of the max edge of the simplex less than " + str(self.eps)
stop = True
fx_norms = [np.abs(self.f.eval(x) - self.f.eval(self.simplex[b])) for x in self.simplex]
if max(fx_norms) < self.eps:
optimization_results.message = "Stop by norm of the max image of the simplex points less than " +\
str(self.eps)
stop = True
optimization_results.n_iterations += 1
optimization_results.x = self.norm2real(self.simplex[b])
optimization_results.fx = self.fx[b]
return optimization_results
def print_simplex(self):
simplex = np.array(self.simplex)
print(simplex, '\n') | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/algorithms/derivative_free/nelder_mead.py | 0.787646 | 0.533944 | nelder_mead.py | pypi |
import nlpalg
import numpy as np
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.solvers import OptimizationResults
from science_optimization.builder import OptimizationProblem
class EllipsoidMethod(BaseAlgorithms):
"""Ellipsoid algorithm method.
"""
# attributes
_x0 = None
_Q0 = None
_max_cuts = None
_shallow_cut = None
_decomposition = None
_memory = None
def __init__(self,
x0: np.ndarray=np.array([[]]).reshape(-1, 1),
Q0: np.ndarray=np.array([[]]),
max_cuts: int=32,
shallow_cut: float=0,
decomposition: bool=True,
memory: bool=True,
n_max: int=None,
eps: float=None):
"""Ellipsoid algorithm constructor.
Args:
x0 : (np.ndarray) initial point.
Q0 : (np.ndarray) initial inverse ellipsoid matrix.
max_cuts : (int) maximum number of ellipsoid cuts per iteration.
shallow_cut : (float) shallow cut option [0, 1].
decomposition: (bool) is matrix decomposition indicator (True: sqrt decomposition).
memory : (bool) cut memory indicator.
n_max : (int) maximum number of iterations for stop criterion.
eps : (float) maximum uncertainty for stop criterion.
"""
# parameters
self.x0 = 1.0 * x0
self.Q0 = Q0
self.max_cuts = max_cuts
self.shallow_cut = shallow_cut
self.decomposition = decomposition
self.memory = memory
if n_max is not None:
self.n_max = n_max
if eps is not None:
self.eps = eps
# getters
@property
def x0(self):
return self._x0
@property
def Q0(self):
return self._Q0
@property
def max_cuts(self):
return self._max_cuts
@property
def shallow_cut(self):
return self._shallow_cut
@property
def decomposition(self):
return self._decomposition
@property
def memory(self):
return self._memory
# setters
@x0.setter
def x0(self, x0):
if x0.shape[1] == 1:
self._x0 = x0
else:
raise ValueError("Initial point must be a column vector.")
@Q0.setter
def Q0(self, Q0):
# check if input is numpy
if not isinstance(Q0, np.ndarray):
raise Warning("x must be a numpy array!")
else:
self._Q0 = Q0
@max_cuts.setter
def max_cuts(self, k):
if k > 0:
self._max_cuts = k
else:
raise ValueError("Maximum number of cuts must be a positive number!")
@shallow_cut.setter
def shallow_cut(self, s):
if 0 <= s <= 1:
self._shallow_cut = s
else:
raise ValueError("Shallow cut must be in [0, 1).")
@decomposition.setter
def decomposition(self, d):
# check if input is numpy
if not isinstance(d, bool):
raise Warning("Decomposition must be a boolean!")
else:
self._decomposition = d
@memory.setter
def memory(self, m):
# check if input is numpy
if not isinstance(m, bool):
raise Warning("Memory must be a boolean!")
else:
self._memory = m
def optimize(self,
optimization_problem: OptimizationProblem,
debug: bool=True,
n_step: int=5) -> OptimizationResults:
"""Optimization core of Ellipsoid method.
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
# get input arguments
f, df, _, _, g, dg, A, b, Aeq, beq, x_min, x_max, _ = optimization_problem.op_arguments()
# optimization results
optimization_results = OptimizationResults()
# call method
if not debug:
# method output
xb, fxb, _, _, _, stop = nlpalg.ellipsoidmethod(f, df, g, dg, A, b, Aeq, beq, x_min, x_max, self.x0,
self.Q0, self.eps, self.n_max, self.max_cuts,
self.shallow_cut, self.decomposition, self.memory, debug)
# results
optimization_results.x = xb
optimization_results.fx = fxb
else:
# TODO (matheus): implement iterative run
_, _, x, fx, Qi, stop = nlpalg.ellipsoidmethod(f, df, g, dg, A, b, Aeq, beq, x_min, x_max, self.x0, self.Q0,
self.eps, self.n_max, self.max_cuts, self.shallow_cut,
self.decomposition, self.memory, debug)
# optimization results
optimization_results.n_iterations = x.shape[1] # number of iterations
optimization_results.x = x[:, 0::n_step]
optimization_results.fx = fx[:, 0::n_step]
optimization_results.parameter = {'Q': Qi[..., 0::n_step]}
# stop criteria
if stop == 0:
optimization_results.message = 'Stop by maximum number of iterations.'
elif stop == 1:
optimization_results.message = 'Stop by ellipsoid volume reduction.'
elif stop == 2:
optimization_results.message = 'Stop by empty localizing set.'
elif stop == 3:
optimization_results.message = 'Stop by degenerate ellipsoid.'
else:
optimization_results.message = 'Unknown termination cause.'
return optimization_results | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/algorithms/cutting_plane/ellipsoid_method.py | 0.82151 | 0.428592 | ellipsoid_method.py | pypi |
import abc
import numpy as np
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.algorithms.unidimensional import GoldenSection, MultimodalGoldenSection
from science_optimization.solvers import OptimizationResults
from science_optimization.algorithms.utils import hypercube_intersection
from science_optimization.algorithms.utils import box_constraints
from science_optimization.function import GenericFunction, BaseFunction
from science_optimization.problems import GenericProblem
from science_optimization.builder import OptimizationProblem
from typing import Tuple
class BaseSearchDirection(BaseAlgorithms):
"""Base class for search direction algorithms.
"""
# attributes
_x0 = None
_x_bounds = None
_uni_dimensional_opt_strategy = None
_fun = None
def __init__(self,
x0: np.ndarray,
n_max: int = None,
eps: float = None,
line_search_method: str='gs'):
"""Constructor of search direction algorithms.
Args:
x0 : (np.ndarray) initial point.
n_max : (int) maximum number of iterations.
eps : (float) maximum uncertainty for stop criterion.
line_search_method: (str) line search strategy ('gs': golden section or 'mgs' multimodal gs).
"""
self.x0 = 1.0 * x0
self.uni_dimensional_opt_strategy = line_search_method
if n_max is not None:
self.n_max = n_max
if eps is not None:
self.eps = eps
# attributes interface
@property
def x0(self):
return self._x0
@property
def x_bounds(self):
return self._x_bounds
@property
def uni_dimensional_opt_strategy(self):
return self._uni_dimensional_opt_strategy
@property
def fun(self):
return self._fun
# setters
@x0.setter
def x0(self, x0):
if x0.shape[1] == 1:
self._x0 = x0
else:
raise ValueError("Initial point must be a column vector.")
@x_bounds.setter
def x_bounds(self, x_bounds):
if x_bounds.shape[1] == 2:
self._x_bounds = x_bounds
else:
raise ValueError("x_bounds must be a nx2-array.")
@fun.setter
def fun(self, fun):
self._fun = fun
@uni_dimensional_opt_strategy.setter
def uni_dimensional_opt_strategy(self, uni_d_strategy):
self._uni_dimensional_opt_strategy = uni_d_strategy
def correct_direction_by_box(self, d: np.ndarray, x: np.ndarray, alpha):
"""
check for values too near the box limits, and avoid the direction to go that way
Args:
d: current direction
x: current x value
alpha: previous value of alpha (unidimensional optimization)
Returns:
"""
for i, d_each in enumerate(d):
if x[i] + d_each * alpha > self.x_bounds[i][1] + self.eps:
d[i] = self.eps ** 2
d = d / np.linalg.norm(d, 2)
if x[i] + d_each * alpha < self.x_bounds[i][0] + self.eps:
d[i] = self.eps ** 2
d = d / np.linalg.norm(d, 2)
# methods
def optimize(self,
optimization_problem: OptimizationProblem,
debug: bool,
n_step: int=5):
"""Optimization core of Search direction methods.
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
# instantiate results
optimization_results = OptimizationResults()
optimization_results.message = 'Stop by maximum number of iterations.'
# define functions
self.fun = optimization_problem.objective.objectives
# initial point
x = self.x0
# bounds
self.x_bounds = np.hstack((optimization_problem.variables.x_min,
optimization_problem.variables.x_max))
# correct x to bounds
x = box_constraints(x, self.x_bounds)
# initial results
nf = optimization_problem.objective.objectives.n_functions # number of functions
fx = np.zeros((nf, 0))
optimization_results.x = np.zeros((x.shape[0], 0))
# store parameters in debug option
debug = False # TODO(Matheus): debug
if debug:
optimization_results.parameter = {'alpha': np.zeros((0,))}
alpha = 1
# main loop
stop = False
while optimization_results.n_iterations < self.n_max and not stop:
# compute search direction
d = self._search_direction(fun=self.fun, x=x)
self.correct_direction_by_box(d, x, alpha)
# compute search interval
interval = self._search_interval(x=x, d=d)
# uni-dimensional optimization
alpha, nfe = self._uni_dimensional_optimization(x=x, d=d, fun=self.fun, interval=interval,
strategy=self.uni_dimensional_opt_strategy, debug=debug)
if debug:
alpha = alpha[:, -1]
# update function evaluation count
optimization_results.n_function_evaluations += nfe
# step towards search direction
y = x + alpha*d
fx_x = self.fun.eval(x)
fx_y = self.fun.eval(y)
# stop criteria: stalled
if np.linalg.norm(x-y, 2) < self.eps:
optimization_results.message = 'Stop by stalled search.'
stop = True
# stop criteria: unchanged function value
if np.abs(fx_x - fx_y) < self.eps:
optimization_results.message = 'Stop by unchanged function value.'
stop = True
# stop criteria: null gradient
if np.linalg.norm(self.fun.gradient(y), 2) < self.eps:
optimization_results.message = 'Stop by null gradient.'
stop = True
# update x
x = y.copy()
fx_x = fx_y.copy()
# update results
if debug and not (optimization_results.n_iterations + 1) % n_step:
optimization_results.x = np.hstack((optimization_results.x, x))
fx = np.hstack((fx, fx_x))
optimization_results.fx = fx
optimization_results.parameter['alpha'] = np.hstack((optimization_results.parameter['alpha'],
np.array(alpha)))
if not debug:
optimization_results.x = x
optimization_results.fx = fx_x
# update count
optimization_results.n_iterations += 1
return optimization_results
@abc.abstractmethod
def _search_direction(self, **kwargs) -> np.ndarray:
"""Abstract search direction."""
pass
@staticmethod
def _uni_dimensional_optimization(x: np.ndarray,
d: np.ndarray,
fun: BaseFunction,
interval: list,
strategy: str,
debug: bool) -> Tuple[np.ndarray, int]:
"""Unidimensional optimization.
Args:
x : (np.ndarray) current point.
d : (np.ndarray) search direction.
fun : (BaseFunction) function object.
interval: (list) interval of search [a, b].
strategy: (str) which uni-dimensional strategy to use.
debug : (bool) debug option indicator.
Returns:
alpha: optimal step
nfe : number of function evaluations
"""
# objective function
def line_search_function(a):
return fun.eval(x + a*d)
# function encapsulation
f = [GenericFunction(func=line_search_function, n=1)]
interval = np.array(interval).reshape(1, -1)
# build problem
op = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=[], ineq_cons=[], x_bounds=interval))
# instantiate uni-dimensional optimization class
if strategy == "gs":
op_result = GoldenSection()
elif strategy == 'mgs':
op_result = MultimodalGoldenSection(all_minima=False)
else:
raise Warning("Unknown unidimensional optimization strategy.")
# optimize
output = op_result.optimize(optimization_problem=op, debug=debug)
alpha = output.x
nfe = output.n_function_evaluations
return alpha, nfe
def _search_interval(self, x: np.ndarray, d: np.ndarray) -> list:
"""Determination of search interval.
Args:
x: (np.ndarray) current point.
d: (np.ndarray) search direction.
Returns:
interval: (list) [a, b] search interval.
"""
# interval
a = 0
if np.linalg.norm(d) < self.eps:
b = a
else:
b, _ = hypercube_intersection(x=x, d=d, x_bounds=self.x_bounds) # maximum step
interval = [a, b]
return interval | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/algorithms/search_direction/base_search_direction.py | 0.809803 | 0.443902 | base_search_direction.py | pypi |
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.solvers import OptimizationResults
from science_optimization.function import LinearFunction
from science_optimization.builder import OptimizationProblem
from scipy.optimize import linprog
import numpy as np
class ScipyBaseLinear(BaseAlgorithms):
"""Base scipy linear method.
"""
# parameters
_method = None
def __init__(self, method=None, n_max=None):
"""Constructor.
Args:
method: 'simplex' or 'interior-point'.
n_max: maximum number of iterations.
"""
if n_max is not None:
self.n_max = n_max
if method is not None:
self.method = method
# get
@property
def method(self):
"""Gets method."""
return self._method
# sets
@method.setter
def method(self, method):
"""Sets method."""
if method == 'simplex' or method == 'interior-point':
self._method = method
else:
raise ValueError("method must be either 'simplex' or 'interior-point'!")
# optimize method
def optimize(self,
optimization_problem: OptimizationProblem,
debug: bool,
n_step: int) -> OptimizationResults:
"""Optimization core.
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
# optimization problem check
self.input(optimization_problem)
# get input arguments
_, _, c, d, _, _, A, b, Aeq, beq, x_min, x_max, _ = optimization_problem.op_arguments()
# output
optimization_results = OptimizationResults()
output = linprog(c.ravel(), method=self.method, A_ub=A, b_ub=b, A_eq=Aeq, b_eq=beq,
bounds=np.hstack((x_min, x_max)), options={'maxiter': self.n_max})
optimization_results.x = output.x.reshape(-1, 1) if isinstance(output.x, np.ndarray) else output.x
optimization_results.fx = output.fun
optimization_results.message = output.message
optimization_results.n_iterations = output.nit
return optimization_results
@staticmethod
def input(op: OptimizationProblem):
"""Optimization problem input.
Args:
op: (OptimizationProblem) an optimization problem instance.
"""
# number of functions test
if op.objective.objectives.n_functions > 1:
raise ValueError('Not yet implemented multiobjective linear programming.')
# linear objective function test
if not isinstance(op.objective.objectives.functions[0], LinearFunction):
raise ValueError('Objective function must be linear!')
if op.nonlinear_functions_indices(op.constraints.inequality_constraints.functions) \
or op.nonlinear_functions_indices(op.constraints.equality_constraints.functions):
raise ValueError('Constraints must be linear.') | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/algorithms/linear_programming/scipy_base_linear.py | 0.944382 | 0.388241 | scipy_base_linear.py | pypi |
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.solvers import OptimizationResults
from science_optimization.function import LinearFunction
from science_optimization.builder import OptimizationProblem
from ortools.linear_solver import pywraplp
import numpy as np
class Glop(BaseAlgorithms):
"""Interface to Google GLOP solver (https://developers.google.com/optimization/install/)."""
# parameters
_t_max = None
def __init__(self, t_max: float=5):
"""Constructor of glop optimization solver.
Args:
t_max: (float) time limit in seconds.
"""
self.t_max = t_max
# get
@property
def t_max(self):
"""Gets method."""
return self._t_max
# sets
@t_max.setter
def t_max(self, t_max):
"""Sets method."""
self._t_max = int(t_max/1e3)
# optimize method
def optimize(self,
optimization_problem: OptimizationProblem,
debug: bool = False,
n_step: int = 0) -> OptimizationResults:
"""Optimization core.
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
# optimization problem check
self.input(optimization_problem)
# get input arguments
_, _, c, d, _, _, A, b, Aeq, beq, x_min, x_max, x_type = optimization_problem.op_arguments()
# instantiate solver object
if 'd' in x_type:
problem_type = 'MIP'
problem_solver = pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING
else:
problem_type = 'LP'
problem_solver = pywraplp.Solver.GLOP_LINEAR_PROGRAMMING
solver = pywraplp.Solver(problem_type, problem_solver)
# create variables
n = x_min.shape[0]
x = []
for i in range(n):
if x_type[i] == 'c':
x.append(solver.NumVar(float(x_min[i, 0]), float(x_max[i, 0]), "x_"+str(i)))
elif x_type[i] == 'd':
x.append(solver.IntVar(float(x_min[i, 0]), float(x_max[i, 0]), "x_"+str(i)))
else:
raise ValueError("Variable type must be either 'c' or 'd'.")
# create inequality constraints (A*x <= b)
mi = A.shape[0]
ic = [[]] * mi
for i in range(mi):
ic[i] = solver.Constraint(-solver.infinity(), float(b[i, 0]))
for j in range(n):
ic[i].SetCoefficient(x[j], float(A[i, j]))
# create equality constraints (Aeq*x = beq)
me = Aeq.shape[0] if Aeq is not None else 0
ec = [[]] * me
for i in range(me):
ec[i] = solver.Constraint(float(beq[i, 0]), float(beq[i, 0]))
for j in range(n):
ec[i].SetCoefficient(x[j], float(Aeq[i, j]))
# set objective function
objective = solver.Objective()
for i in range(n):
objective.SetCoefficient(x[i], float(c[0, i]))
objective.SetMinimization()
# set time limit
solver.SetTimeLimit(self.t_max)
# solver
solver.Solve()
# output
op_results = OptimizationResults()
xb = np.zeros((n, 1))
for i in range(n):
xb[i, 0] = x[i].solution_value()
op_results.x = xb
op_results.fx = np.array([solver.Objective().Value()])
return op_results
@staticmethod
def input(op: OptimizationProblem):
"""Optimization problem input.
Args:
op: (OptimizationProblem)an optimization problem instance
"""
# number of functions test
if op.objective.objectives.n_functions > 1:
raise ValueError('Not yet implemented multiobjective linear programming.')
# linear objective function test
if not isinstance(op.objective.objectives.functions[0], LinearFunction):
raise ValueError('Objective function must be linear!')
if op.nonlinear_functions_indices(op.constraints.inequality_constraints.functions) \
or op.nonlinear_functions_indices(op.constraints.equality_constraints.functions):
raise ValueError('Constraints must be linear.') | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/algorithms/linear_programming/glop.py | 0.932176 | 0.500854 | glop.py | pypi |
import numpy as np
from .base_function import BaseFunction
class PolynomialFunction(BaseFunction):
"""
Class that implements a polynomial function
"""
_flag_num_g = False # this function uses analytical gradient
def __init__(self, exponents, coefficients):
"""The constructor for the polynomial function instance.
Args:
exponents: A matrix with the exponents of the function in order of the variables
for each element of the function
coefficients: A vector with the coefficients of each element of the function
Example:
For the function ax² + bxy + cy²:
exponents : [[2,0],[1,1],[0,2]]
coefficients : [a, b, c]
"""
# parameters check
self.numpy_check(exponents, coefficients)
self.parameters = {'e': exponents,
'c': coefficients}
@staticmethod
def aux_eval(f, i, x):
return ((np.tile((x[:, i]).transpose(), (f.parameters['e'].shape[0], 1)) ** f.parameters['e']).prod(axis=1)
* f.parameters['c']).sum(axis=0)
# TODO: explain this function
def aux_grad_j(self, i, j, x, dfdx):
C = np.copy(self.parameters['e'])
val = np.copy(C[:, j])
d = np.where(val > 0)
C[d, j] = C[d, j] - 1
dfdx[j, i] = ((np.tile((x[:, i]).transpose(), (self.parameters['e'].shape[0], 1)) ** C).prod(axis=1) * val *
self.parameters['c']).sum(axis=0)
# TODO: explain this function
def aux_grad_i(self, i, j, x, dfdx):
grad_j_vec = np.vectorize(PolynomialFunction.aux_grad_j, excluded=['self', 'i', 'x', 'dfdx'], otypes=[float])
grad_j_vec(self, i=i, j=j, x=x, dfdx=dfdx)
def dimension(self):
return len(self.parameters['e'][0])
def eval(self, x):
""" Polynomial function evaluation.
Args:
x: A matrix with the evaluation points, the structure of the matrix should have the tuples in the
columns, so each column is an evaluation point
Returns:
aux: Returns a vector with the evaluation value in each point (the index of the value matches the index
of the column of the evaluation point)
Example:
For the function ax² + bxy + cy²:
With x = [[1,2,3],[3,2,1]]
Returns: [a + 3b + 9c, 4a + 4b + 4c, 9a + 3b + c]
For the function x³ + y³ + z³
With x = [[1],[2],[3]]
Returns: [36]
"""
# input check
self.input_check(x)
# eval
num = x.shape[1]
fx = np.arange(start=0, stop=num, step=1)
eval_vec = np.vectorize(self.aux_eval, excluded=['f', 'x'])
fx = eval_vec(f=self, i=fx, x=x)
return fx
def gradient(self, x):
"""Polynomial gradient evaluation.
Args:
x: A matrix with the evaluation points, the structure of the matrix should have the tuples in the
columns, so each column is an evaluation point
Returns:
dfdx: Returns a matrix with the gradient vector in each point (the index of the row where the gradient is
matches the index of the column of the evaluation point)
Example:
For the function ax² + bxy + cy²:
With x = [[1,2,3],[3,2,1]]
The gradient should be : [2ax + by, 2cy + bx]
Returns:[[2a + 3b, 6c + b],[4a + 2b, 4c + 2b],[6a + b, 2c + 3b]]
For the function x³ + y³ + z³
With x = [[1],[2],[3]]
The gradient should be : [3x²,3y²,3z²]
Returns: [3, 12, 27]
"""
# input check
self.input_check(x)
# gradient
rows, columns = x.shape
if self.parameters['c'].size <= 1:
dfdx = np.zeros((rows, columns))
else:
dfdx = np.zeros((rows, columns))
auxi = np.arange(start=0, stop=columns, step=1)
auxj = np.arange(start=0, stop=rows, step=1)
grad_i_vec = \
np.vectorize(PolynomialFunction.aux_grad_i, excluded=['self', 'j', 'x', 'dfdx'], otypes={object})
np.array(grad_i_vec(self, i=auxi, j=auxj, x=x, dfdx=dfdx))
return dfdx
def aux_hes_k(self, i, j, k, x, hfdx):
C = np.copy(self.parameters['e'])
valj = np.copy(C[:, j])
d = np.where(valj > 0) # PolynomialFunction.indices(valj, lambda x: x > 0)
for a in d:
C[a, j] = C[a, j] - 1
valk = np.copy(C[:, k])
d = np.where(valk > 0) # PolynomialFunction.indices(valk, lambda x: x > 0)
for a in d:
C[a, k] = C[a, k] - 1
hfdx[j, k, i] = ((np.tile((x[:, i]).transpose(), (self.parameters['e'].shape[0], 1)) ** C).prod(
axis=1) * valj * valk * self.parameters['c']).sum(axis=0)
hfdx[k, j, i] = hfdx[j, k, i]
return hfdx
def aux_hes_j(self, i, j, k, x, hfdx):
C = np.copy(self.parameters['e'])
val = np.copy(C[:, j])
val = val * (val - 1)
d = np.where(val > 1) # PolynomialFunction.indices(val, lambda x: x > 1)
for a in d:
C[a, j] = C[a, j] - 2
hfdx[j, j, i] = ((np.tile((x[:, i]).transpose(), (self.parameters['e'].shape[0], 1)) ** C).prod(axis=1) * val *
self.parameters['c']).sum(axis=0)
grad_hes_k = np.vectorize(PolynomialFunction.aux_hes_k, excluded=['i', 'j', 'x', 'hfdx'], otypes={object})
grad_hes_k(self, i=i, j=j, k=k, x=x, hfdx=hfdx)
def aux_hes_i(self, i, j, k, x, hfdx):
grad_hes_j = np.vectorize(PolynomialFunction.aux_hes_j, excluded=['i', 'k', 'x', 'hfdx'], otypes={object})
grad_hes_j(self, i=i, j=j, k=k, x=x, hfdx=hfdx)
def hessian(self, x):
"""Polynomial hessian evaluation.
Args:
x: A matrix with the evaluation points, the structure of the matrix should have the tuples in the
columns, so each column is an evaluation point
Returns:
hfdx: Returns a vector of matrices with the hessian matrix in each point (the index of the row where
the hessian is matches the index of the column of the evaluation point)
Example:
For the function ax² + bxy + cy²:
With x = [[1,2,3],[3,2,1]]
The gradient should be : [2ax + by, 2cy + bx]
So the hessian should be : [[2a,b],[b,2c]]
Returns:[[[2a,b],[b,2c]],[[2a,b],[b,2c]],[[2a,b],[b,2c]]]
For the function x³ + y³ + z³
With x = [[1],[2],[3]]
The gradient should be : [3x²,3y²,3z²]
So the hessian should be : [[6x,0,0],[0,6y,0],[0,0,6z]]
Returns: [[6,0,0],[0,12,0],[0,0,18]]
"""
# input check
self.input_check(x)
# hessian
rows, columns = x.shape
if self.parameters['c'].size < rows:
hfdx = np.zeros((rows, rows, columns))
else:
hfdx = np.zeros((rows, rows, columns))
auxi = np.arange(start=0, stop=columns, step=1)
auxj = np.arange(start=0, stop=rows, step=1)
auxk = np.arange(start=0, stop=rows, step=1)
hes_i_vec = np.vectorize(PolynomialFunction.aux_hes_i, excluded=['self', 'j', 'k', 'x', 'hfdx'],
otypes={object})
np.array(hes_i_vec(self, i=auxi, j=auxj, k=auxk, x=x, hfdx=hfdx))
return hfdx.transpose()
def input_check(self, x):
"""Check input dimension.
Args:
x: point to be evaluated.
Returns:
indicator: indicator if input os consistent
"""
# check if input is numpy
self.numpy_check(x)
# check dimension
x_dim = x.shape
param_dim = len(self.parameters['e'][0])
if len(x_dim) == 1:
raise Warning("x must be a {}xm (m>0) array!".format(param_dim))
if not x_dim[0] == param_dim:
raise Warning("x must be a {}xm array!".format(param_dim))
if not all(len(e) == param_dim for e in self.parameters['e']):
raise Warning("List of exponents must have the same dimension!") | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/function/polynomial_function.py | 0.818845 | 0.755997 | polynomial_function.py | pypi |
from .base_function import BaseFunction
class GenericFunction(BaseFunction):
"""Class to convert a python function to a BaseFunction instance."""
def __init__(self, func, n, grad_func=None):
"""Constructor of a generic function.
Args:
func : (callable) instance of a python function for function evaluation
n : (int) number of function arguments
grad_func: (callable) instance of a python function for gradient evaluation
"""
# check if object is a function
if not callable(func):
raise Warning("func must be callable.")
if grad_func is not None and not callable(grad_func):
raise Warning("grad_func must be callable.")
if grad_func is not None:
self.flag_num_g = False
# set parameters
self.parameters = {'func': func,
'n': n,
'grad_func': grad_func}
def dimension(self):
return self.parameters['n']
def eval(self, x):
"""Evaluates generic function
Args:
x: (numpy array) evaluation point.
Returns:
fx: (numpy array) function evaluation at point x.
"""
# input check
self.input_check(x)
# function evaluation
f = self.parameters['func']
fx = f(x)
return fx
def gradient(self, x):
"""Gradient of generic function
Args:
x: (numpy array) evaluation point.
Returns:
dfx: (numpy array) function evaluation at point x.
"""
# gradient evaluation
df = self.parameters['grad_func']
# input check
self.input_check(x)
if df is not None:
# evaluate
dfx = df(x)
# check dimension
if dfx.shape[0] != self.parameters['n']:
raise ValueError('Callable grad_func must return a {}xm array'.format(self.parameters['n']))
else:
dfx = self.numerical_gradient(x)
return dfx
def input_check(self, x):
"""Check input dimension.
Args:
x: (numpy array) point to be evaluated.
"""
# check if input is numpy
self.numpy_check(x)
if not x.shape[0] == self.parameters['n']:
raise Warning("Point x must have {} dimensions.".format(self.parameters['n'])) | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/function/generic_function.py | 0.91501 | 0.437944 | generic_function.py | pypi |
import numpy as np
import numpy.matlib
from .base_function import BaseFunction
class QuadraticFunction(BaseFunction):
"""
Class that implements a quadratic function
"""
_flag_num_g = False # this function uses analytical gradient
def __init__(self, Q, c, d=0):
""" Set parameters for x'Qx + c'x + d.
Args:
Q: quadratic coefficients of equations (n x n)-matrix
c: scaling n-vector coefficients of equations
d: constants of equations
"""
# parameters check
self.numpy_check(Q, c)
# set parameters
self.parameters = {'Q': Q,
'c': c,
'd': d}
def dimension(self):
return self.parameters['Q'].shape[0]
def eval(self, x):
""" Quadratic function evaluation.
Args:
x: evaluation point
Returns:
fx: evaluates the point value in the function
"""
# input check
self.input_check(x)
# define parameters
Q = self.parameters['Q']
c = self.parameters['c']
d = self.parameters['d']
# evaluates the point
fx = np.sum(x*(np.dot(Q, x)), axis=0) + np.dot(c.T, x) + d
return fx
def gradient(self, x):
"""Derivative relative to input.
Args:
x: evaluation point
Returns:
dfdx: derivative at evaluation points
"""
# input check
self.input_check(x)
# define parameters
Q = self.parameters['Q']
c = self.parameters['c']
# quadratic function gradient
dfdx = np.matlib.repmat(c, 1, x.shape[1])
dfdx = dfdx + np.dot((Q + Q.T), x)
return dfdx
def hessian(self, x):
"""Second derivative relative to input.
Args:
x: evaluation point
Returns:
hfdx: second derivative at evaluation points
"""
# input check
self.input_check(x)
# define parameters
Q = self.parameters['Q']
# quadratic function hessian
hfdx = np.tile(Q + Q.T, (x.shape[1], 1, 1))
return hfdx
def input_check(self, x):
"""Check input dimension.
Args:
x: point to be evaluated.
Returns:
indicator: indicator if input os consistent
"""
# check if input is numpy
self.numpy_check(x)
# check dimension
x_dim = x.shape
param_dim = self.parameters['Q'].shape[0]
if len(x_dim) == 1:
raise Warning("x must be a {}xm (m>0) array!".format(param_dim))
if not x_dim[0] == param_dim:
raise Warning("x must be a {}xm array!".format(param_dim)) | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/function/quadratic_function.py | 0.880964 | 0.632588 | quadratic_function.py | pypi |
import numpy as np
from science_optimization.function import BaseFunction, LinearFunction, FunctionsComposite
class AugmentedLagrangeFunction(BaseFunction):
"""
Class that deals with the function used in the Augmented Lagrangian method
"""
eq_aux_func = None
ineq_aux_func = None
aux_rho = None
_flag_num_g = False # this function uses analytical gradient
def input_check(self, x):
"""Check input dimension.
Args:
x: (numpy array) point to be evaluated.
"""
# check if input is numpy
self.numpy_check(x)
if not x.shape[0] == self.dimension():
raise Warning("Point x must have {} dimensions.".format(self.parameters['n']))
def eval(self, x):
"""
Args:
x:
Returns:
"""
if self.ineq_aux_func is not None:
aux_max = self.ineq_aux_func.eval(x=x)
aux_max[aux_max < 0] = 0
ineq_part = 0.5 * self.rho * sum(aux_max ** 2)
else:
ineq_part = 0
if self.eq_aux_func is not None:
eq_part = 0.5 * sum((self.aux_rho * (self.eq_aux_func * self.eq_aux_func)).eval(x=x))
else:
eq_part = 0
return self.f_obj.eval(x) + eq_part + ineq_part
def gradient(self, x):
if self.ineq_aux_func is not None:
aux_max = self.ineq_aux_func.eval(x=x)
aux_max[aux_max < 0] = 0
ineq_part = self.rho * np.dot(self.g.gradient(x), aux_max)
else:
ineq_part = 0
if self.eq_aux_func is not None:
eq_part = self.rho * np.dot(self.h.gradient(x), self.eq_aux_func.eval(x))
else:
eq_part = 0
return self.f_obj.gradient(x) + eq_part + ineq_part
def hessian(self, x):
if self.ineq_aux_func is not None:
aux_grad = self.g.gradient(x)
aux_hess = self.g.hessian(x)
aux_max = self.ineq_aux_func.eval(x=x)
aux_max[aux_max < 0] = 0
ineq_part = np.zeros((self.dimension(), self.dimension()))
for i in range(self.g.n_functions):
if aux_max[i] > 0:
ineq_part += (
(aux_hess[i] * aux_max[i]) +
np.dot(aux_grad[0], aux_grad[0].transpose())
)
ineq_part = self.rho * ineq_part
else:
ineq_part = 0
if self.eq_aux_func is not None:
aux_grad = self.h.gradient(x)
aux_hess = self.h.hessian(x)
eq_part = np.zeros((self.dimension(), self.dimension()))
# TODO (Feres) tirar o for
for i in range(self.h.n_functions):
eq_part += (
(aux_hess[i] * self.eq_aux_func.eval(x)[i]) +
np.dot(aux_grad[0], aux_grad[0].transpose())
)
eq_part = self.rho * eq_part
else:
eq_part = 0
return self.f_obj.hessian(x) + eq_part + ineq_part
def dimension(self):
return self.f_obj.dimension()
def __init__(self, f_obj, g, h, rho, c):
"""
Initialize functions and multipliers properly
Args:
f_obj: (FunctionsComposite) objective function
g: (FunctionsComposite) inequality constraints
h: (FunctionsComposite) inequality constraints
rho: (float) initial rho value (penalty parameter)
c: (float) constant used to update rho value
"""
self.f_obj = f_obj
self.g = g
self.h = h
self.lag_eq = np.zeros((h.n_functions, 1)) # lagrangian multipliers (equality constraints)
self.lag_ineq = np.zeros((g.n_functions, 1)) # lagrangian multipliers (equality constraints)
self.rho = rho
self.c = c
self.update_aux_functions()
def update_aux_functions(self):
"""
Uses current multipliers and rho value to update auxiliary functions use to evaluate function
Returns:
"""
self.aux_rho = LinearFunction(c=np.zeros((self.dimension(), 1)), d=self.rho)
aux_lag_eq = FunctionsComposite()
for aux in self.lag_eq:
aux_lag_eq.add(LinearFunction(
c=np.zeros((self.dimension(), 1)), d=aux
))
aux_lag_ineq = FunctionsComposite()
for aux in self.lag_ineq:
aux_lag_ineq.add(LinearFunction(
c=np.zeros((self.dimension(), 1)), d=aux
))
if self.h.n_functions > 0:
self.eq_aux_func = (self.h + aux_lag_eq / self.aux_rho)
if self.g.n_functions > 0:
self.ineq_aux_func = (self.g + aux_lag_ineq / self.aux_rho)
def update_multipliers(self, x_new):
"""
Uses current point to update lagrange multipliers properly
Args:
x_new: (np array) new point found by the unconstrained optimization
Returns:
"""
h_val = self.h.eval(x_new)
self.lag_eq = self.lag_eq + self.rho * h_val
g_val = self.g.eval(x_new)
self.lag_ineq = self.lag_ineq + self.rho * g_val
self.lag_ineq[self.lag_ineq < 0] = 0
# TODO (Feres) adicionar condicional aqui
self.rho = self.c * self.rho
self.update_aux_functions() | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/function/lagrange_function.py | 0.758421 | 0.443721 | lagrange_function.py | pypi |
import numpy as np
import numpy.matlib
from .base_function import BaseFunction
class LinearFunction(BaseFunction):
"""
Class that implements a linear function
"""
_flag_num_g = False # this function uses analytical gradient
def parameter_check(self, c: np.ndarray, d):
# checking c parameter
self.numpy_check(c)
if len(c.shape) != 2 or c.shape[1] != 1:
raise Exception("Invalid format for 'c' parameter")
# checking d parameter
try:
int(d)
except (ValueError, TypeError):
raise Exception("'d' parameter must be a valid number")
def __init__(self, c, d=0):
""" Linear Function constructor: c'x + d.
Args:
c: scaling n-vector coefficients of equations
d: constants of equations
"""
self.parameter_check(c, d)
# set parameters
self.parameters = {'c': c,
'd': d}
def dimension(self):
"""Linear problem dimension."""
return self.parameters['c'].shape[0]
def eval(self, x):
""" Linear function evaluation.
Args:
x: evaluation point
Returns:
fx: evaluates the point value in the function
"""
# input check
self.input_check(x)
# define parameters
c = self.parameters['c']
d = self.parameters['d']
# evaluates the point
fx = np.dot(c.T, x) + d
return fx
def gradient(self, x):
"""Derivative relative to input.
Args:
x: evaluation point
Returns:
dfdx: derivative at evaluation points
"""
# input check
self.input_check(x)
# define parameters
c = self.parameters['c']
# linear function gradient
dim = x.shape
if len(dim) == 1:
dfdx = c
else:
dfdx = np.matlib.repmat(c, 1, dim[1])
return dfdx
def hessian(self, x):
"""Second derivative relative to input.
Args:
x: evaluation point
Returns:
hfdx: second derivative at evaluation points
"""
# input check
self.input_check(x)
# linear function hessian
dim = x.shape
input_dimension = dim[0]
if len(dim) == 1:
input_number = 1
else:
input_number = dim[1]
hfdx = np.zeros((input_number, input_dimension, input_dimension))
return hfdx
def input_check(self, x):
"""Check input dimension.
Args:
x: point to be evaluated.
Returns:
indicator: indicator if input os consistent
"""
# check if input is numpy
self.numpy_check(x)
# check dimension
x_dim = x.shape
param_dim = self.parameters['c'].shape[0]
if len(x_dim) == 1:
raise Warning("x must be a {}xm (m>0) array!".format(param_dim))
if not x_dim[0] == param_dim:
raise Warning("x must be a {}xm array!".format(param_dim)) | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/function/linear_function.py | 0.861188 | 0.573081 | linear_function.py | pypi |
import numpy as np
from science_optimization.builder import BuilderOptimizationProblem, Objective, Variable, Constraint
from science_optimization.function import BaseFunction, FunctionsComposite
class RosenSuzukiProblem(BuilderOptimizationProblem):
"""Concrete builder implementation.
This class builds the Rosen-Suzuki problem.
"""
def build_objectives(self):
obj_fun = FunctionsComposite()
obj_fun.add(RosenSuzukiFunction(self.n, self.Q0, self.c))
objective = Objective(objective=obj_fun)
return objective
def build_variables(self):
variables = Variable(x_min=self.x_min, x_max=self.x_max)
return variables
def build_constraints(self):
constraints = Constraint(eq_cons=FunctionsComposite(), ineq_cons=RosenSuzukiConstraints(self.n, self.b))
return constraints
def __init__(self, n):
"""
Constructor of Rosen-Suzuki optimization problem.
Args:
n: desired dimension
"""
# Step 1
self.n = n
x_star = []
u_star = []
for i in range(1, self.n):
x_star.append((-1) ** i)
u_star.append((-1) ** i + 1)
x_star.append((-1) ** self.n)
self.x_star = np.array(x_star).reshape((-1, 1))
self.u_star = np.array(u_star).reshape((-1, 1))
self.x_min = np.ones((self.n, 1)) * (-5)
self.x_max = np.ones((self.n, 1)) * 5
# Step 2
mdg = []
b = []
for j in range(1, self.n):
v = []
a = []
for i in range(1, self.n+1):
v.append(2 - (-1) ** (i + j))
a.append(1 + (-1) ** j + (-1) ** i)
a = np.array(a).reshape((-1, 1))
v = np.array(v).reshape((-1, 1))
Q = np.diag(v.transpose()[0])
g_now = np.dot(
np.dot(self.x_star.transpose(), Q), self.x_star
) + np.dot(a.transpose(), self.x_star)
mdg.append(2*np.dot(Q, self.x_star) + a)
if self.u_star[j-1] > 0:
b.append(-g_now)
else:
b.append(-g_now - 1)
self.b = np.array(b).reshape((-1, 1))
mdg = np.array(mdg).transpose()[0]
# Step 3
v = []
for i in range(1, self.n + 1):
v.append(2 - (-1) ** i)
v = np.array(v).reshape((-1, 1))
self.Q0 = np.diag(v.transpose()[0])
df = 2 * np.dot(self.Q0, self.x_star)
self.c = -df - np.dot(mdg, self.u_star)
class RosenSuzukiFunction(BaseFunction):
"""
Rosen-Suzuki objective function
"""
n = None
def __init__(self, n, Q0, c):
# Step 1
self.n = n
self.Q0 = Q0
self.c = c
def dimension(self):
return self.n
def eval(self, x: np.ndarray):
self.input_check(x)
return np.dot(np.dot(x.transpose(), self.Q0), x) + np.dot(self.c.transpose(), x)
def gradient(self, x: np.ndarray):
self.input_check(x)
return 2 * np.dot(self.Q0, x) + self.c
def input_check(self, x):
# check if input is numpy
self.numpy_check(x)
if not x.shape[0] == self.dimension():
raise Warning("Point x must have {} dimensions.".format(self.parameters['n']))
class RosenSuzukiConstraints(FunctionsComposite):
"""
Rosen-Suzuki constraints
"""
def __init__(self, n, b):
super().__init__()
self.n = n
self.n_functions = n-1
self.b = b
def dimension(self):
return self.n
def eval(self, x, idx=None, composition="parallel", weights=None):
# input check
idx, composition, weights, n_functions = self.input_check(idx=idx,
composition=composition,
weights=weights)
g = []
# evaluate
for j in range(1, self.n_functions+1):
v = []
a = []
for i in range(1, self.n+1):
v.append(2 - (-1) ** (i + j))
a.append(1 + (-1) ** j + (-1) ** i)
a = np.array(a).reshape((-1, 1))
v = np.array(v).reshape((-1, 1))
Q = np.diag(v.transpose()[0])
g.append(
np.dot(np.dot(x.transpose(), Q), x)[0] + np.dot(a.transpose(), x)[0] + self.b[j-1]
)
g_return = np.array(g).reshape((-1, 1))
# series composition
if composition == "series":
g_return = np.dot(weights, g_return)
return g_return
def gradient(self, x, idx=None, composition="parallel", weights=None):
# input check
idx, composition, weights, n_functions = self.input_check(idx=idx,
composition=composition,
weights=weights)
mdg = []
# evaluate
for j in range(1, self.n_functions+1):
v = []
a = []
for i in range(1, self.n+1):
v.append(2 - (-1) ** (i + j))
a.append(1 + (-1) ** j + (-1) ** i)
a = np.array(a).reshape((-1, 1))
v = np.array(v).reshape((-1, 1))
Q = np.diag(v.transpose()[0])
mdg.append(2 * np.dot(Q, x) + a)
j_matrix = np.array(mdg).transpose()[0] # jacobian (gradient of each constraint)
# series composition
if composition == "series":
j_matrix = np.dot(weights, j_matrix)
return j_matrix
def hessian(self, x, idx=None, composition="parallel", weights=None):
# TODO (Feres) implement hessian analytical calculus
raise Exception('Not implemented calculus') | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/problems/rosen_suzuki.py | 0.760517 | 0.523177 | rosen_suzuki.py | pypi |
from science_optimization.builder import BuilderOptimizationProblem
from science_optimization.builder import Objective
from science_optimization.builder import Variable
from science_optimization.builder import Constraint
from science_optimization.function import FunctionsComposite, LinearFunction
import numpy as np
from typing import List
class MIP(BuilderOptimizationProblem):
"""This class builds a mixed integer linear problem."""
# objective function(s)
_c = None
# inequality constraint matrix
_A = None
# inequality constraint vector
_b = None
# the variables' bounds
_x_bounds = None
# variables' type
_x_type = None
# equality constraint matrix
_Aeq = None
# equality constraint vector
_beq = None
def __init__(self,
c: np.ndarray,
A: np.ndarray,
b: np.ndarray,
x_bounds: np.ndarray=None,
x_type: List[str]=None,
Aeq: np.ndarray=None,
beq: np.ndarray=None):
"""Constructor of a generic mixed-integer linear problem.
min c' @ x
st. A @ x <= b
Aeq @ x == beq
x_min <= x <= x_max
Args:
c : (np.ndarray) (n x 1)-objective function coefficients.
A : (np.ndarray) (m1 x n)-inequality linear constraints matrix.
b : (np.ndarray) (m1 x 1)-inequality linear constraints bounds.
x_bounds: (np.ndarray) (n x 2)-lower bound and upper bounds.
x_type : (List[str]) variables' types ('c' or 'd').
Aeq : (m2 x n)-equality linear constraints matrix.
beq : (m2 x 1)-equality linear constraints bounds.
"""
# set parameters
self.c = c
self.A = A
self.b = b
self.x_bounds = x_bounds
self.x_type = x_type
self.Aeq = Aeq
self.beq = beq
# getters
@property
def c(self):
return self._c
@property
def A(self):
return self._A
@property
def b(self):
return self._b
@property
def Aeq(self):
return self._Aeq
@property
def beq(self):
return self._beq
@property
def x_bounds(self):
return self._x_bounds
@property
def x_type(self):
return self._x_type
# setters
@c.setter
def c(self, value):
self._c = value
@A.setter
def A(self, value):
self._A = value
@b.setter
def b(self, value):
self._b = value
@x_bounds.setter
def x_bounds(self, value):
self._x_bounds = value
@x_type.setter
def x_type(self, value):
self._x_type = value
@Aeq.setter
def Aeq(self, value):
self._Aeq = value
@beq.setter
def beq(self, value):
self._beq = value
def build_objectives(self):
# cardinalities
m, n = self.c.shape
# composition
obj_fun = FunctionsComposite()
# mono-objective problem
if (m > 1 and n == 1) or (m == 1 and n > 1):
# add to function composition
obj_fun.add(LinearFunction(c=self.c.reshape(-1, 1)))
elif m >= 1 and n >= 1:
for i in range(m):
# add to function composition
obj_fun.add(LinearFunction(c=self.c[i, :].reshape(-1, 1)))
else:
raise ValueError("({}x{})-array not supported!".format(m, n))
objective = Objective(objective=obj_fun)
return objective
def build_constraints(self):
# cardinalities
mi = self.A.shape[0]
me = self.Aeq.shape[0] if self.Aeq is not None else 0
# create object
ineq_cons = FunctionsComposite()
eq_cons = FunctionsComposite()
# add linear inequality functions
for i in range(mi):
ineq_cons.add(LinearFunction(c=self.A[i, :].reshape(-1, 1), d=-self.b[i, 0]))
# add linear equality functions
for i in range(me):
eq_cons.add(LinearFunction(c=self.Aeq[i, :].reshape(-1, 1), d=-self.beq[i, 0]))
# set constraints
constraints = Constraint(eq_cons=eq_cons, ineq_cons=ineq_cons)
return constraints
def build_variables(self):
# default unbounded variables
if self.x_bounds is None:
self.x_bounds = np.ones((self.c.shape[0], 2))
self.x_bounds[:, 0] = -np.inf
self.x_bounds[:, 1] = np.inf
# create variables
variables = Variable(x_min=self.x_bounds[:, 0].reshape(-1, 1),
x_max=self.x_bounds[:, 1].reshape(-1, 1),
x_type=self.x_type)
return variables | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/problems/mip.py | 0.95202 | 0.64058 | mip.py | pypi |
from science_optimization.builder import BuilderOptimizationProblem
from science_optimization.builder import Objective
from science_optimization.builder import Variable
from science_optimization.builder import Constraint
from science_optimization.function import FunctionsComposite
class SeparableResourceAllocation(BuilderOptimizationProblem):
"""Concrete builder implementation.
This class builds a dual decomposition optimization problem.
"""
# objective function(s)
_f_i = None
# equality constraint function(s)
_coupling_eq_constraints = None
# inequality constraint function(s)
_coupling_ineq_constraints = None
# the variables' bounds
_x_bounds = None
def __init__(self, f_i, coupling_eq_constraints, coupling_ineq_constraints, x_bounds):
"""Constructor of a Dual Decomposition problem builder.
Args:
f_i : Objective functions composition with i individual functions.
coupling_eq_constraints : Composition with functions in equality coupling.
coupling_ineq_constraints: Composition with functions in inequality coupling.
x_bounds : Lower bound and upper bounds.
"""
self.f_i = f_i
self.coupling_eq_constraints = coupling_eq_constraints
self.coupling_ineq_constraints = coupling_ineq_constraints
self.x_bounds = x_bounds
# gets
@property
def f_i(self):
return self._f_i
@property
def coupling_eq_constraints(self):
return self._coupling_eq_constraints
@property
def coupling_ineq_constraints(self):
return self._coupling_ineq_constraints
@property
def x_bounds(self):
return self._x_bounds
@f_i.setter
def f_i(self, value):
self._f_i = value
# sets
@coupling_eq_constraints.setter
def coupling_eq_constraints(self, value):
self._coupling_eq_constraints = value
@coupling_ineq_constraints.setter
def coupling_ineq_constraints(self, value):
self._coupling_ineq_constraints = value
@x_bounds.setter
def x_bounds(self, value):
self._x_bounds = value
# methods
def build_objectives(self):
# instantiate composition
obj_fun = FunctionsComposite()
for f in self.f_i:
obj_fun.add(f)
objective = Objective(objective=obj_fun)
return objective
def build_constraints(self):
# instantiate composition
eq_cons = FunctionsComposite()
ineq_cons = FunctionsComposite()
for eq_g in self.coupling_eq_constraints:
eq_cons.add(eq_g)
for ineq_g in self.coupling_ineq_constraints:
ineq_cons.add(ineq_g)
constraints = Constraint(eq_cons=eq_cons, ineq_cons=ineq_cons)
return constraints
def build_variables(self):
# variables
variables = Variable(x_min=self.x_bounds[:, 0].reshape(-1, 1),
x_max=self.x_bounds[:, 1].reshape(-1, 1))
return variables | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/problems/separable_resource_allocation.py | 0.953416 | 0.49823 | separable_resource_allocation.py | pypi |
from science_optimization.builder import BuilderOptimizationProblem
from science_optimization.builder import Objective
from science_optimization.builder import Variable
from science_optimization.builder import Constraint
from science_optimization.function import FunctionsComposite
class GenericProblem(BuilderOptimizationProblem):
"""Concrete builder implementation.
This class builds a generic optimization problem.
"""
# objective function(s)
_f = None
# equality constraint function(s)
_eq_cons = None
# inequality constraint function(s)
_ineq_cons = None
# the variables' bounds
_x_bounds = None
def __init__(self, f, eq_cons, ineq_cons, x_bounds, x_type=None):
"""Constructor of a generic optimization problem.
Args:
f : Objective functions.
eq_cons : Equality constraint functions.
ineq_cons: Inequality constraint functions.
x_bounds : Lower bound and upper bounds.
x_type: (np.ndarray) (n x 1)-list with variables' type ('c': continuous or 'd': discrete).
"""
self.f = f
self.eq_cons = eq_cons
self.ineq_cons = ineq_cons
self.x_bounds = x_bounds
self.x_type = x_type
@property
def f(self):
return self._f
@property
def eq_cons(self):
return self._eq_cons
@property
def ineq_cons(self):
return self._ineq_cons
@property
def x_bounds(self):
return self._x_bounds
@f.setter
def f(self, value):
self._f = value
@eq_cons.setter
def eq_cons(self, value):
self._eq_cons = value
@ineq_cons.setter
def ineq_cons(self, value):
self._ineq_cons = value
@x_bounds.setter
def x_bounds(self, value):
self._x_bounds = value
def build_objectives(self):
obj_fun = FunctionsComposite()
for f in self.f:
obj_fun.add(f)
objective = Objective(objective=obj_fun)
return objective
def build_constraints(self):
eq_cons = FunctionsComposite()
ineq_cons = FunctionsComposite()
for eq_g in self.eq_cons:
eq_cons.add(eq_g)
for ineq_g in self.ineq_cons:
ineq_cons.add(ineq_g)
constraints = Constraint(eq_cons=eq_cons, ineq_cons=ineq_cons)
return constraints
def build_variables(self):
variables = Variable(x_min=self.x_bounds[:, 0].reshape(-1, 1),
x_max=self.x_bounds[:, 1].reshape(-1, 1),
x_type=self.x_type)
return variables | /science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/problems/generic.py | 0.947076 | 0.479686 | generic.py | pypi |
__all__ = ['parse_pdf', 'logger']
# Cell
import logging
from pathlib import Path
from typing import Optional, Dict, Any
import requests
logger = logging.getLogger(__name__)
def parse_pdf(server_address: str, file_path: Path, port: str = '', timeout: int = 60
) -> Optional[Dict[str, Any]]:
'''
This function if successful returns the JSON output of the
science parse server as a dictionary. Else if a Timeout Exception
or any other Exception occurs it will return None. If any of the
exceptions do occur they will be logged as an error.
1. **server_address**: Address of the server e.g. `http://127.0.0.1`
2. **file_path**: Path to the pdf file to be processed.
3. **port**: The port to the server e.g. 8080
4. **timeout**: The amount of time to allow the request to take.
**returns** A dictionary with the following keys:
```python
['abstractText', 'authors', 'id', 'references', 'sections', 'title', 'year']
```
**Note** not all of these dictionary keys will always exist if science parse
cannot detect the relevant information e.g. if it cannot find any references
then there will be no reference key.
**Note** See the example on the main page of the documentation for a
detailed example of this method.
'''
endpoint = "/v1"
if port:
url = f'{server_address}:{port}{endpoint}'
else:
url = f'{server_address}{endpoint}'
file_name = file_path.name
files = {'data-binary': (file_name, file_path.open('rb'), 'application/pdf',
{'Expires': '0'})}
try:
response = requests.post(url, files=files,
headers={'Accept': 'application/json'},
timeout=timeout)
status_code = response.status_code
if status_code != 200:
error_message = (f'URL: {url}. {file_name} failed with a '
f'status code: {status_code}')
logger.error(error_message)
return None
return response.json()
except requests.exceptions.Timeout:
error_message = (f'URL: {url}. {file_name} failed due to a timeout.')
logger.error(error_message)
except Exception as e:
error_message = f'URL: {url}. {file_name} failed due to the following error:'
logger.error(error_message, exc_info=True)
return None | /science_parse_api-1.0.1-py3-none-any.whl/science_parse_api/api.py | 0.890205 | 0.575946 | api.py | pypi |
import logging
import json
import re
import os
import time
import datetime
import feedparser
import dateutil.parser
from os.path import expanduser
from scibot.telebot import telegram_bot_sendtext
from scibot.streamer import listen_stream_and_rt
from schedule import Scheduler
# logging parameters
logger = logging.getLogger("bot logger")
# handler determines where the logs go: stdout/file
file_handler = logging.FileHandler(f"{datetime.date.today()}_scibot.log")
logger.setLevel(logging.DEBUG)
file_handler.setLevel(logging.DEBUG)
fmt_file = (
"%(levelname)s %(asctime)s [%(filename)s: %(funcName)s:%(lineno)d] %(message)s"
)
file_formatter = logging.Formatter(fmt_file)
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
class Settings:
"""Twitter bot application settings.
Enter the RSS feed you want to tweet, or keywords you want to retweet.
"""
IGNORE_ERRORS = [327, 139]
# RSS feeds to read and post tweets from.
feed_urls = [
"https://pubmed.ncbi.nlm.nih.gov/rss/search/1X9MO_201KJGQLdG05NdxtaqKjTZuIPIGlgpiDZr31QjkgZUbj/?limit=300&utm_campaign=pubmed-2&fc=20210922175019",
"https://pubmed.ncbi.nlm.nih.gov/rss/search/1XSES1Yl3kEgnfOg6EStNFyWMogtYXic2VVXS8rpsyNHTjv1HK/?limit=200&utm_campaign=pubmed-2&fc=20210510224301",
"https://pubmed.ncbi.nlm.nih.gov/rss/search/1jAe3RzQKmf7SOUEM-Dt7QQtMWNG2UffuIIo_GGKHPfoKqhY9f/?limit=200&utm_campaign=pubmed-2&fc=20210510224348",
"https://pubmed.ncbi.nlm.nih.gov/rss/search/1bCr63ThlO22Eg5TxBaIQ5mzH02TqtmtM1QIkqa66iqK4SsMJm/?limit=200&utm_campaign=pubmed-2&fc=20210510224551",
"https://pubmed.ncbi.nlm.nih.gov/rss/search/1hEma6JdH30sOOO0DiTP1jZh-6ZgoypoEsw_B9tXZejk_E8QuX/?limit=200&utm_campaign=pubmed-2&fc=20210510230918",
]
# rss best results no time harm reduction and psychedelics
feed_older_literature = feedparser.parse("https://pubmed.ncbi.nlm.nih.gov/rss/search/1h_Yu2rLTrK0AIYDN2V5HLWSksLTr4a6SUZjZzoAPcf-Qk0gCJ/?limit=200&utm_campaign=pubmed-2&fc=20210901021150")["entries"]
pre_combined_feed = [feedparser.parse(url)["entries"] for url in feed_urls]
# (combined_feed)
combined_feed = [item for feed in pre_combined_feed for item in feed]
combined_feed.sort(
key=lambda x: dateutil.parser.parse(x["published"]), reverse=True
)
# Log file to save all tweeted RSS links (one URL per line).
posted_urls_output_file = expanduser("~/drugscibot/publications.json")
# Log file to save all retweeted tweets (one tweetid per line).
posted_retweets_output_file = expanduser("~/drugscibot/posted-retweets.log")
# Log file to save all retweeted tweets (one tweetid per line).
faved_tweets_output_file = expanduser("~/drugscibot/faved-tweets.log")
# Log file to save followers list.
users_json_file = expanduser("~/drugscibot/users.json")
# Include tweets with these words when retweeting.
retweet_include_words = [
"drugpolicy",
"drugspolicy",
"transformdrugspolicy",
"transformdrugpolicy",
"drugchecking",
"regulatestimulants",
"regulatedrugs",
"sensibledrugpolicy",
"drugpolicyreform",
"safeconsumption",
"harmreduction",
"druguse",
"regular",
"reduccion de dano",
"dosis minima",
"regulacion",
"droga",
"sicoactiva",
"psicoactiva",
"politica de droga",
# "cion de riesgo",
"legalizacion",
"safesuply",
"safersuply",
]
# Do not include tweets with these words when retweeting.
retweet_exclude_words = [
"sex",
"sexual",
"sexwork",
"sexualwork",
"fuck",
"vaping",
"vape",
"cigarretes",
"nicotine",
"smoke",
"smoking",
"constellationsfest",# to be deleted after the festival
"zigaretten",
]
add_hashtag = [
"psilocybin",
"psilocybine",
"psychedelic",
"hallucinogenic",
"overdose",
"microdosing",
"drug-policy",
"drugspolicy",
"mdma",
"drug checking",
"drugpolicy",
"drug policy",
"ayahuasca",
"psychopharmacology",
"neurogenesis",
"5-meo-dmt",
"serotonergic",
"ketamine",
"psychotherapy",
"harm reduction",
"methadone",
] # trip
# do not retweet if search results include only a single of these keywords
watch_add_hashtag = [
"alzheimer",
"depression",
"anxiety",
"dmt",
"droga",
"lsd",
"therapy",
"psychiatry",
"mentalhealth",
"trip",
"regula",
"regular",
"mental health",
"clinical trial",
"consciousness",
"meta-analysis",
"dopamine",
"serotonin",
"psychological",
"metaanalysis",
"reform",
]
# list of the distribution
mylist_id = "1306244304000749569"
class SafeScheduler(Scheduler):
"""
An implementation of Scheduler that catches jobs that fail, logs their
exception tracebacks as errors, optionally reschedules the jobs for their
next run time, and keeps going.
Use this to run jobs that may or may not crash without worrying about
whether other jobs will run or if they'll crash the entire script.
"""
def __init__(self, reschedule_on_failure=True):
"""
Args:
reschedule_on_failure: if is True, jobs will be rescheduled for their
next run as if they had completed successfully. If False, they'll run
on the next run_pending() tick.
"""
self.reschedule_on_failure = reschedule_on_failure
super().__init__()
def _run_job(self, job):
try:
super()._run_job(job)
except Exception as e:
logger.exception(e)
telegram_bot_sendtext(f"[Job Error] {e}")
job.last_run = datetime.datetime.now()
job._schedule_next_run()
def shorten_text(text: str, maxlength: int) -> str:
"""
Truncate text and append three dots (...) at the end if length exceeds
maxlength chars.
Args:
text: The to shorten.
maxlength: The maximum character length of the text string.
Returns: Shortened text string.
"""
return (text[:maxlength] + "...") if len(text) > maxlength else text
def insert_hashtag(title: str) -> str:
"""
Add hashtag on title for keywords found on Settings.add_hashtag
Args:
title: Text to parse for inserting hash symbols
Returns: Text with inserted hashtags
"""
for x in Settings.add_hashtag:
if re.search(fr"\b{x}", title.lower()):
pos = (re.search(fr"\b{x}", title.lower())).start()
if " " in x:
title = title[:pos] + "#" + title[pos:].replace(" ", "", 1)
else:
title = title[:pos] + "#" + title[pos:]
return title
def compose_message(item: feedparser.FeedParserDict) -> str:
"""
Compose a tweet from an RSS item (title, link, description)
and return final tweet message.
Args:
item: feedparser.FeedParserDict
An RSS item
Returns: mMssage suited for a Twitter status update.
"""
title = insert_hashtag(item["title"])
message = shorten_text(title, maxlength=250) + " 1/5 " + item["link"]
return message
def is_in_logfile(content: str, filename: str) -> bool:
"""
Does the content exist on any line in the log file?
Args:
content: Content to search file for.
filename: Full path to file to search.
Returns: `True` if content is found in file, otherwise `False`.
"""
if os.path.isfile(filename):
with open(filename, "r") as jsonFile:
article_log = json.load(jsonFile)
if content in article_log:
return True
return False
def write_to_logfile(content: dict, filename: str) -> None:
"""
Append content to json file.
Args:
content: Content to append to file
filename: Full path to file that should be appended.
Returns: None
"""
try:
with open(filename, "w") as fp:
json.dump(content, fp, indent=4)
except IOError as e:
logger.exception(e)
def scheduled_job(read_rss_and_tweet, retweet_own, search_and_retweet):
# listen_stream_and_rt('#INSIGHT2021')
schedule = SafeScheduler()
# job 1
schedule.every().day.at("22:20").do(read_rss_and_tweet)
schedule.every().day.at("06:20").do(read_rss_and_tweet)
schedule.every().day.at("14:20").do(read_rss_and_tweet)
# job 2
schedule.every().day.at("01:10").do(retweet_own)
schedule.every().day.at("09:10").do(retweet_own)
schedule.every().day.at("17:10").do(retweet_own)
# job 3
schedule.every().day.at("00:20").do(search_and_retweet, "list_search")
schedule.every().day.at("03:20").do(search_and_retweet, "list_search")
schedule.every().day.at("06:20").do(search_and_retweet, "list_search")
schedule.every().day.at("09:20").do(search_and_retweet, "list_search")
schedule.every().day.at("12:20").do(search_and_retweet, "list_search")
schedule.every().day.at("15:20").do(search_and_retweet, "list_search")
schedule.every().day.at("18:20").do(search_and_retweet, "list_search")
schedule.every().day.at("21:20").do(search_and_retweet, "list_search")
schedule.every().day.at("01:25").do(search_and_retweet, "list_search")
schedule.every().day.at("04:25").do(search_and_retweet, "list_search")
schedule.every().day.at("07:25").do(search_and_retweet, "list_search")
schedule.every().day.at("10:25").do(search_and_retweet, "list_search")
schedule.every().day.at("13:25").do(search_and_retweet, "list_search")
schedule.every().day.at("16:25").do(search_and_retweet, "list_search")
schedule.every().day.at("19:25").do(search_and_retweet, "list_search")
schedule.every().day.at("22:25").do(search_and_retweet, "list_search")
# job love
schedule.every(5).minutes.do(search_and_retweet, "give_love")
while 1:
schedule.run_pending()
time.sleep(1) | /scienceBot-0.1.1.1.tar.gz/scienceBot-0.1.1.1/scibot/tools.py | 0.499268 | 0.164852 | tools.py | pypi |
## Creative Commons Attribution 4.0 International
Creative Commons Attribution 4.0 International (CC BY 4.0) URL:
<http://creativecommons.org/licenses/by/4.0/>
Creative Commons Corporation (“Creative Commons”) is not a law firm and does not
provide legal services or legal advice. Distribution of Creative Commons public
licenses does not create a lawyer-client or other relationship. Creative Commons
makes its licenses and related information available on an “as-is” basis.
Creative Commons gives no warranties regarding its licenses, any material
licensed under their terms and conditions, or any related information. Creative
Commons disclaims all liability for damages resulting from their use to the
fullest extent possible.
**Using Creative Commons Public Licenses:** Creative Commons public licenses
provide a standard set of terms and conditions that creators and other rights
holders may use to share original works of authorship and other material subject
to copyright and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
**Considerations for licensors:** Our public licenses are intended for use by
those authorized to give the public permission to use material in ways otherwise
restricted by copyright and certain other rights. Our licenses are irrevocable.
Licensors should read and understand the terms and conditions of the license
they choose before applying it. Licensors should also secure all rights
necessary before applying our licenses so that the public can reuse the material
as expected. Licensors should clearly mark any material not subject to the
license. This includes other CC-licensed material, or material used under an
exception or limitation to copyright. More considerations for licensors.
**Considerations for the public:** By using one of our public licenses, a
licensor grants the public permission to use the licensed material under
specified terms and conditions. If the licensor’s permission is not necessary
for any reason–for example, because of any applicable exception or limitation to
copyright–then that use is not regulated by the license. Our licenses grant only
permissions under copyright and certain other rights that a licensor has
authority to grant. Use of the licensed material may still be restricted for
other reasons, including because others have copyright or other rights in the
material. A licensor may make special requests, such as asking that all changes
be marked or described. Although not required by our licenses, you are
encouraged to respect those requests where reasonable.
## Creative Commons Attribution 4.0 International Public License
By exercising the Licensed Rights (defined below), You accept and agree to be
bound by the terms and conditions of this Creative Commons Attribution 4.0
International Public License ("Public License"). To the extent this Public
License may be interpreted as a contract, You are granted the Licensed Rights in
consideration of Your acceptance of these terms and conditions, and the Licensor
grants You such rights in consideration of benefits the Licensor receives from
making the Licensed Material available under these terms and conditions.
**Section 1 – Definitions.**
1. **Adapted Material** means material subject to Copyright and Similar Rights
that is derived from or based upon the Licensed Material and in which the
Licensed Material is translated, altered, arranged, transformed, or
otherwise modified in a manner requiring permission under the Copyright and
Similar Rights held by the Licensor. For purposes of this Public License,
where the Licensed Material is a musical work, performance, or sound
recording, Adapted Material is always produced where the Licensed Material
is synched in timed relation with a moving image.
2. **Adapter's License** means the license You apply to Your Copyright and
Similar Rights in Your contributions to Adapted Material in accordance with
the terms and conditions of this Public License.
3. **Copyright and Similar Rights** means copyright and/or similar rights
closely related to copyright including, without limitation, performance,
broadcast, sound recording, and Sui Generis Database Rights, without regard
to how the rights are labeled or categorized. For purposes of this Public
License, the rights specified in
Section [2(b)(1)-(2)](https://creativecommons.org/licenses/by/4.0/legalcode#s2b) are
not Copyright and Similar Rights.
4. **Effective Technological Measures** means those measures that, in the
absence of proper authority, may not be circumvented under laws fulfilling
obligations under Article 11 of the WIPO Copyright Treaty adopted on
December 20, 1996, and/or similar international agreements.
5. **Exceptions and Limitations** means fair use, fair dealing, and/or any
other exception or limitation to Copyright and Similar Rights that applies
to Your use of the Licensed Material.
6. **Licensed Material** means the artistic or literary work, database, or
other material to which the Licensor applied this Public License.
7. **Licensed Rights** means the rights granted to You subject to the terms and
conditions of this Public License, which are limited to all Copyright and
Similar Rights that apply to Your use of the Licensed Material and that the
Licensor has authority to license.
8. **Licensor** means the individual(s) or entity(ies) granting rights under
this Public License.
9. **Share** means to provide material to the public by any means or process
that requires permission under the Licensed Rights, such as reproduction,
public display, public performance, distribution, dissemination,
communication, or importation, and to make material available to the public
including in ways that members of the public may access the material from a
place and at a time individually chosen by them.
10. **Sui Generis Database Rights** means rights other than copyright resulting
from Directive 96/9/EC of the European Parliament and of the Council of 11
March 1996 on the legal protection of databases, as amended and/or
succeeded, as well as other essentially equivalent rights anywhere in the
world.
11. **You** means the individual or entity exercising the Licensed Rights under
this Public License. **Your** has a corresponding meaning.
**Section 2 – Scope.**
1. **License grant**.
1. Subject to the terms and conditions of this Public License, the Licensor
hereby grants You a worldwide, royalty-free, non-sublicensable,
non-exclusive, irrevocable license to exercise the Licensed Rights in
the Licensed Material to:
1. reproduce and Share the Licensed Material, in whole or in part; and
2. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions
and Limitations apply to Your use, this Public License does not apply,
and You do not need to comply with its terms and conditions.
3. Term. The term of this Public License is specified in
Section [6(a)](https://creativecommons.org/licenses/by/4.0/legalcode#s6a).
4. Media and formats; technical modifications allowed. The Licensor
authorizes You to exercise the Licensed Rights in all media and formats
whether now known or hereafter created, and to make technical
modifications necessary to do so. The Licensor waives and/or agrees not
to assert any right or authority to forbid You from making technical
modifications necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective Technological
Measures. For purposes of this Public License, simply making
modifications authorized by this
Section [2(a)(4)](https://creativecommons.org/licenses/by/4.0/legalcode#s2a4) never
produces Adapted Material.
5. Downstream recipients.
1. Offer from the Licensor – Licensed Material. Every recipient of the
Licensed Material automatically receives an offer from the Licensor
to exercise the Licensed Rights under the terms and conditions of
this Public License.
2. No downstream restrictions. You may not offer or impose any
additional or different terms or conditions on, or apply any
Effective Technological Measures to, the Licensed Material if doing
so restricts exercise of the Licensed Rights by any recipient of the
Licensed Material.
6. No endorsement. Nothing in this Public License constitutes or may be
construed as permission to assert or imply that You are, or that Your
use of the Licensed Material is, connected with, or sponsored, endorsed,
or granted official status by, the Licensor or others designated to
receive attribution as provided in
Section [3(a)(1)(A)(i)](https://creativecommons.org/licenses/by/4.0/legalcode#s3a1Ai).
2. **Other rights**.
1. Moral rights, such as the right of integrity, are not licensed under
this Public License, nor are publicity, privacy, and/or other similar
personality rights; however, to the extent possible, the Licensor waives
and/or agrees not to assert any such rights held by the Licensor to the
limited extent necessary to allow You to exercise the Licensed Rights,
but not otherwise.
2. Patent and trademark rights are not licensed under this Public License.
3. To the extent possible, the Licensor waives any right to collect
royalties from You for the exercise of the Licensed Rights, whether
directly or through a collecting society under any voluntary or waivable
statutory or compulsory licensing scheme. In all other cases the
Licensor expressly reserves any right to collect such royalties.
**Section 3 – License Conditions.**
Your exercise of the Licensed Rights is expressly made subject to the following
conditions.
1. **Attribution**.
1. If You Share the Licensed Material (including in modified form), You
must:
1. retain the following if it is supplied by the Licensor with the
Licensed Material:
1. identification of the creator(s) of the Licensed Material and
any others designated to receive attribution, in any reasonable
manner requested by the Licensor (including by pseudonym if
designated);
2. a copyright notice;
3. a notice that refers to this Public License;
4. a notice that refers to the disclaimer of warranties;
5. a URI or hyperlink to the Licensed Material to the extent
reasonably practicable;
2. indicate if You modified the Licensed Material and retain an
indication of any previous modifications; and
3. indicate the Licensed Material is licensed under this Public
License, and include the text of, or the URI or hyperlink to, this
Public License.
2. You may satisfy the conditions in
Section [3(a)(1)](https://creativecommons.org/licenses/by/4.0/legalcode#s3a1) in
any reasonable manner based on the medium, means, and context in which
You Share the Licensed Material. For example, it may be reasonable to
satisfy the conditions by providing a URI or hyperlink to a resource
that includes the required information.
3. If requested by the Licensor, You must remove any of the information
required by
Section [3(a)(1)(A)](https://creativecommons.org/licenses/by/4.0/legalcode#s3a1A) to
the extent reasonably practicable.
4. If You Share Adapted Material You produce, the Adapter's License You
apply must not prevent recipients of the Adapted Material from complying
with this Public License.
**Section 4 – Sui Generis Database Rights.**
> Where the Licensed Rights include Sui Generis Database Rights that apply to
> Your use of the Licensed Material:
1. for the avoidance of doubt,
Section [2(a)(1)](https://creativecommons.org/licenses/by/4.0/legalcode#s2a1) grants
You the right to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
2. if You include all or a substantial portion of the database contents in a
database in which You have Sui Generis Database Rights, then the database in
which You have Sui Generis Database Rights (but not its individual contents)
is Adapted Material; and
3. You must comply with the conditions in
Section [3(a)](https://creativecommons.org/licenses/by/4.0/legalcode#s3a) if
You Share all or a substantial portion of the contents of the database.
For the avoidance of doubt, this
Section [4](https://creativecommons.org/licenses/by/4.0/legalcode#s4) supplements
and does not replace Your obligations under this Public License where the
Licensed Rights include other Copyright and Similar Rights.
**Section 5 – Disclaimer of Warranties and Limitation of Liability.**
1. **Unless otherwise separately undertaken by the Licensor, to the extent
possible, the Licensor offers the Licensed Material as-is and as-available,
and makes no representations or warranties of any kind concerning the
Licensed Material, whether express, implied, statutory, or other. This
includes, without limitation, warranties of title, merchantability, fitness
for a particular purpose, non-infringement, absence of latent or other
defects, accuracy, or the presence or absence of errors, whether or not
known or discoverable. Where disclaimers of warranties are not allowed in
full or in part, this disclaimer may not apply to You.**
2. **To the extent possible, in no event will the Licensor be liable to You on
any legal theory (including, without limitation, negligence) or otherwise
for any direct, special, indirect, incidental, consequential, punitive,
exemplary, or other losses, costs, expenses, or damages arising out of this
Public License or use of the Licensed Material, even if the Licensor has
been advised of the possibility of such losses, costs, expenses, or damages.
Where a limitation of liability is not allowed in full or in part, this
limitation may not apply to You.**
3. The disclaimer of warranties and limitation of liability provided above
shall be interpreted in a manner that, to the extent possible, most closely
approximates an absolute disclaimer and waiver of all liability.
**Section 6 – Term and Termination.**
1. This Public License applies for the term of the Copyright and Similar Rights
licensed here. However, if You fail to comply with this Public License, then
Your rights under this Public License terminate automatically.
2. Where Your right to use the Licensed Material has terminated under
Section [6(a)](https://creativecommons.org/licenses/by/4.0/legalcode#s6a),
it reinstates:
1. automatically as of the date the violation is cured, provided it is
cured within 30 days of Your discovery of the violation; or
2. upon express reinstatement by the Licensor.
> For the avoidance of doubt, this
> Section [6(b)](https://creativecommons.org/licenses/by/4.0/legalcode#s6b) does
> not affect any right the Licensor may have to seek remedies for Your
> violations of this Public License.
1. For the avoidance of doubt, the Licensor may also offer the Licensed
Material under separate terms or conditions or stop distributing the
Licensed Material at any time; however, doing so will not terminate this
Public License.
2. Sections [1](https://creativecommons.org/licenses/by/4.0/legalcode#s1), [5](https://creativecommons.org/licenses/by/4.0/legalcode#s5), [6](https://creativecommons.org/licenses/by/4.0/legalcode#s6), [7](https://creativecommons.org/licenses/by/4.0/legalcode#s7),
and [8](https://creativecommons.org/licenses/by/4.0/legalcode#s8) survive
termination of this Public License.
**Section 7 – Other Terms and Conditions.**
1. The Licensor shall not be bound by any additional or different terms or
conditions communicated by You unless expressly agreed.
2. Any arrangements, understandings, or agreements regarding the Licensed
Material not stated herein are separate from and independent of the terms
and conditions of this Public License.
**Section 8 – Interpretation.**
1. For the avoidance of doubt, this Public License does not, and shall not be
interpreted to, reduce, limit, restrict, or impose conditions on any use of
the Licensed Material that could lawfully be made without permission under
this Public License.
2. To the extent possible, if any provision of this Public License is deemed
unenforceable, it shall be automatically reformed to the minimum extent
necessary to make it enforceable. If the provision cannot be reformed, it
shall be severed from this Public License without affecting the
enforceability of the remaining terms and conditions.
3. No term or condition of this Public License will be waived and no failure to
comply consented to unless expressly agreed to by the Licensor.
4. Nothing in this Public License constitutes or may be interpreted as a
limitation upon, or waiver of, any privileges and immunities that apply to
the Licensor or You, including from the legal processes of any jurisdiction
or authority.
Creative Commons is not a party to its public licenses. Notwithstanding,
Creative Commons may elect to apply one of its public licenses to material it
publishes and in those instances will be considered the “Licensor.” The text of
the Creative Commons public licenses is dedicated to the public domain under the
CC0 Public Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as otherwise
permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the use of the
trademark “Creative Commons” or any other trademark or logo of Creative Commons
without its prior written consent including, without limitation, in connection
with any unauthorized modifications to any of its public licenses or any other
arrangements, understandings, or agreements concerning use of licensed material.
For the avoidance of doubt, this paragraph does not form part of the public
licenses.
Creative Commons may be contacted at creativecommons.org.
| /sciencebasepy-2.0.13-py3-none-any.whl/sciencebasepy-2.0.13.dist-info/LICENSE.md | 0.692018 | 0.78611 | LICENSE.md | pypi |
# ScienceBeam Alignment
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](LICENSE)
ScienceBeam Alignment provides generic low-level sequence alignment utility functions, similar to Python's [SequenceMatcher](https://docs.python.org/3/library/difflib.html).
This project is currently mainly used for training data generation, related to the [ScienceBeam project](https://github.com/elifesciences/sciencebeam). Although this project itself has no ScienceBeam dependency and can be considered a standalone sequence alignment library. It is however more targeted at document size sequences rather than massive gene sequences.
## Pre-requisites
- Python 2 or 3
## API
### SequenceMatcher
The mostly drop-in replacement of Python's [SequenceMatcher](https://docs.python.org/3/library/difflib.html)
is provided by [fuzzywuzzy](https://github.com/seatgeek/fuzzywuzzy)'s [StringMatcher](https://github.com/seatgeek/fuzzywuzzy/blob/master/fuzzywuzzy/StringMatcher.py).
In that respect, `sciencebeam-alignment` merely provides a wrapper with fallback.
### WordSequenceMatcher
A wrapper around the aforementioned `SequenceMatcher`, but matching on word level tokens only.
It currently only implements `get_matching_blocks`.
The main advantage is that it is much faster for long texts, because it won't have to match individual characters. It isn't recommended for short texts, where character level alignment is probably more desirable.
example match results:
```python
>>> from sciencebeam_alignment.word_sequence_matcher import (
... WordSequenceMatcher
... )
>>> WordSequenceMatcher(a='word1', b='word2').get_matching_blocks()
[]
>>> WordSequenceMatcher(a='a word1 b', b='x word1 y').get_matching_blocks()
[(2, 2, 5)]
```
### GlobalSequenceMatcher and LocalSequenceMatcher
The [GlobalSequenceMatcher and LocalSequenceMatcher](https://github.com/elifesciences/sciencebeam-alignment/blob/develop/sciencebeam_alignment/align.py) implements the [Needleman-Wunsch](https://en.wikipedia.org/wiki/Needleman%E2%80%93Wunsch_algorithm) [global alignment](https://en.wikipedia.org/wiki/Sequence_alignment#Global_and_local_alignments) as well as the [Smith-Waterman](https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm) local alignment algorithms. The implementation is somewhat inspired by [python-alignment](https://github.com/eseraygun/python-alignment).
It does implement `get_matching_blocks` to match Python's [SequenceMatcher](https://docs.python.org/3/library/difflib.html).
By passing in a scoring object, the results can be influenced (e.g. gaps can be penalized more than mismatches).
It does also provide an optimized implementation using [Cython](https://cython.org/). The level of optimization depends on the type of passed in sequences and scoring. The fastest being with integer sequences and simple scoring. Especially with longer sequences, the potential speed ups can be significant.
```python
>>> from sciencebeam_alignment.align import LocalSequenceMatcher, SimpleScoring
>>> DEFAULT_SCORING = SimpleScoring(match_score=3, mismatch_score=-1, gap_score=-2)
>>> LocalSequenceMatcher(a='a word1 b', b='x word2 y', scoring=DEFAULT_SCORING).get_matching_blocks()
[(1, 1, 5), (7, 7, 1), (9, 9, 0)]
```
In addition, the `get_multiple_matching_blocks` can be used to retrieve multiple matching blocks with the same score:
```python
>>> from sciencebeam_alignment.align import GlobalSequenceMatcher, SimpleScoring
>>> DEFAULT_SCORING = SimpleScoring(match_score=3, mismatch_score=-1, gap_score=-2)
>>> matcher = GlobalSequenceMatcher(a='xyzabc', b='abcxyz', scoring=DEFAULT_SCORING)
>>> list(matcher.get_multiple_matching_blocks(limit=2))
[[(3, 0, 3)], [(0, 3, 3)]]
```
`get_multiple_matching_blocks` returns a generator. The number of variations can be limited using the `limit` argument or by simply stopping early.
The `GlobalSequenceMatcher` can also be used to calculate the [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance) (or _edit distance_). An example is provided in `sciencebeam_alignment.levenshtein`:
```python
>>> from sciencebeam_alignment.levenshtein import get_levenshtein_distance
>>> get_levenshtein_distance('kitten', 'sitting')
3
>>> from sciencebeam_alignment.levenshtein import get_levenshtein_ratio
>>> get_levenshtein_ratio('kitten', 'sitting')
0.5714285714285714
```
Calculating the levenshtein distance is mainly provided as an example. You might want to consider using [python-Levenshtein](https://github.com/ztane/python-Levenshtein).
To check whether the fast implementation is enabled:
```python
>>> from sciencebeam_alignment.align import native_enabled
>>> native_enabled
True
```
## Development
Development can be done either using Docker (default) or a virtual environment.
All commands are available via `make`.
### Development using Docker
Build and run tests:
```bash
make build test
```
Or intended for CI:
```bash
make ci-build-and-test
```
### Development using a virtual environment
`make` targets with the `dev-` prefix are intended for the use with the virtual environment.
This requires that you already have Python installed.
#### Setup (virtual environment)
```bash
make dev-venv
```
To update the dependencies:
```bash
make dev-install
```
#### Cython (virtual environment)
Compile code using Cython:
```bash
make dev-cython-clean dev-cython-compile
```
#### Tests (virtual environment)
```base
make dev-test
```
Or:
```base
make dev-watch
```
| /sciencebeam_alignment-0.0.5.tar.gz/sciencebeam_alignment-0.0.5/README.md | 0.635222 | 0.988256 | README.md | pypi |
from __future__ import absolute_import, print_function
import logging
import timeit
import numpy as np
from sciencebeam_alignment.align import (
SimpleScoring,
CustomScoring,
LocalSequenceMatcher,
require_native
)
DEFAULT_MATCH_SCORE = 2
DEFAULT_MISMATCH_SCORE = -1
DEFAULT_GAP_SCORE = -3
DEFAULT_SCORING = SimpleScoring(
DEFAULT_MATCH_SCORE, DEFAULT_MISMATCH_SCORE, DEFAULT_GAP_SCORE
)
CUSTOM_SCORING = CustomScoring(
lambda a, b: DEFAULT_MATCH_SCORE if a == b else DEFAULT_MISMATCH_SCORE,
DEFAULT_GAP_SCORE
)
SHORT_STRING1 = 'abc'
SHORT_STRING2 = 'def'
LONG_STRING1 = 'abcefghijk' * 100
LONG_STRING2 = ''.join(list(reversed(LONG_STRING1)))
def encode_str(s):
return np.array([int(ord(x)) for x in s], dtype=np.int32)
LONG_ENCODED1 = encode_str(LONG_STRING1)
LONG_ENCODED2 = encode_str(LONG_STRING2)
def test_align_with_scoring_fn_py():
with require_native(False):
LocalSequenceMatcher(LONG_STRING1, LONG_STRING2, CUSTOM_SCORING).get_matching_blocks()
def test_align_with_scoring_fn():
with require_native(True):
LocalSequenceMatcher(LONG_STRING1, LONG_STRING2, CUSTOM_SCORING).get_matching_blocks()
def test_align_with_simple_scoring():
with require_native(True):
LocalSequenceMatcher(LONG_STRING1, LONG_STRING2, DEFAULT_SCORING).get_matching_blocks()
def test_align_with_simple_scoring_int():
with require_native(True):
LocalSequenceMatcher(LONG_ENCODED1, LONG_ENCODED2, DEFAULT_SCORING).get_matching_blocks()
def test_align_with_simple_scoring_str():
with require_native(True):
LocalSequenceMatcher(LONG_STRING1, LONG_STRING2, DEFAULT_SCORING).get_matching_blocks()
def report_timing(fn, number=1):
timeit_result_ms = timeit.timeit(
fn + "()",
setup="from __main__ import " + fn,
number=number
) * 1000
print("{} ({}x):\n{:f} ms / it ({:f} ms total)\n".format(
fn,
number,
timeit_result_ms / number,
timeit_result_ms
))
def main():
print("len LONG_STRING1: {}\n".format(len(LONG_STRING1)))
print("len LONG_ENCODED1: {}\n".format(len(LONG_ENCODED1)))
report_timing("test_align_with_scoring_fn_py")
report_timing("test_align_with_scoring_fn", 3)
report_timing("test_align_with_simple_scoring", 3)
report_timing("test_align_with_simple_scoring_int", 3)
report_timing("test_align_with_simple_scoring_str", 3)
if __name__ == "__main__":
logging.basicConfig(level='INFO')
main() | /sciencebeam_alignment-0.0.5.tar.gz/sciencebeam_alignment-0.0.5/sciencebeam_alignment/align_performance.py | 0.577376 | 0.210644 | align_performance.py | pypi |
import logging
import warnings
from collections import deque
from itertools import islice
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
import numpy as np
from six import (
with_metaclass,
string_types,
binary_type
)
try:
from sciencebeam_alignment.align_fast_utils import ( # pylint: disable=no-name-in-module
native_compute_inner_alignment_matrix_simple_scoring_int,
native_compute_inner_alignment_matrix_simple_scoring_any,
native_compute_inner_alignment_matrix_scoring_fn_any,
native_alignment_matrix_single_path_traceback
)
native_enabled = True
except Exception as e: # pylint: disable=broad-except
warnings.warn('fast implementation not available due to: %s' % e)
native_enabled = False
MIN_INT = -2147483647
def get_logger():
return logging.getLogger(__name__)
@contextmanager
def require_native(required=True):
global native_enabled # pylint: disable=W0603
was_enabled = native_enabled
native_enabled = required
yield
native_enabled = was_enabled
def _is_array_of_type(a, dtype):
return np.issubdtype(a.dtype, dtype)
# pylint: disable=too-many-arguments
def compute_inner_alignment_matrix_simple_scoring_py(
scoring_matrix, a, b, match_score, mismatch_score, gap_score, min_score):
"""Pure python fallback implementation.
Calculates the inner alignment matrix for a and b using simple scoring parameters:
match_score, mismatch_score, gap_score, min_score
Arguments:
scoring_matrix {matrix} -- Output matrix (1 + len(a), 1 + len(b))
a {sequence} -- First sequence (string or list)
b {sequence} -- Second sequence (string or list)
match_score {int} -- Score for a match
mismatch_score {int} -- Score for a mismatch
gap_score {int} -- Score for a gap (increase to peanilise gaps between matches)
min_score {int} -- Minimum score (e.g. zero if scores shouldn't be allowed to go negative)
"""
m = len(a) + 1
n = len(b) + 1
for i in range(1, m):
for j in range(1, n):
scoring_matrix[i, j] = max(
min_score,
# Match elements.
scoring_matrix[i - 1, j - 1] +
(match_score if a[i - 1] == b[j - 1] else mismatch_score),
# Gap on sequenceA.
scoring_matrix[i, j - 1] + gap_score,
# Gap on sequenceB.
scoring_matrix[i - 1, j] + gap_score
)
def compute_inner_alignment_matrix_scoring_fn_py(
scoring_matrix, a, b, scoring_fn, gap_score, min_score):
"""Pure python fallback implementation.
Same as compute_inner_alignment_matrix_simple_scoring_py but uses a function
to calculate match / mismatch (may be slower but more flexible).
Arguments:
scoring_matrix {matrix} -- Output matrix (1 + len(a), 1 + len(b))
a {sequence} -- First sequence (string or list)
b {sequence} -- Second sequence (string or list)
scoring_fn {function} -- Function to return the score between two items (e.g. characters)
gap_score {int} -- Score for a gap (increase to peanilise gaps between matches)
min_score {int} -- Minimum score (e.g. zero if scores shouldn't be allowed to go negative)
"""
m = len(a) + 1
n = len(b) + 1
for i in range(1, m):
for j in range(1, n):
scoring_matrix[i, j] = max(
min_score,
# Match elements.
scoring_matrix[i - 1, j - 1] +
scoring_fn(a[i - 1], b[j - 1]),
# Gap on sequenceA.
scoring_matrix[i, j - 1] + gap_score,
# Gap on sequenceB.
scoring_matrix[i - 1, j] + gap_score
)
def compute_inner_alignment_matrix_simple_scoring(
scoring_matrix, a, b, match_score, mismatch_score, gap_score, min_score):
try:
if (
native_enabled and
_is_array_of_type(a, np.int32) and _is_array_of_type(b, np.int32)
):
native_compute_inner_alignment_matrix_simple_scoring_int(
scoring_matrix, a, b,
match_score, mismatch_score, gap_score, min_score
)
return
elif native_enabled:
native_compute_inner_alignment_matrix_simple_scoring_any(
scoring_matrix, a, b,
match_score, mismatch_score, gap_score, min_score
)
return
except AttributeError:
pass
compute_inner_alignment_matrix_simple_scoring_py(
scoring_matrix, a, b,
match_score, mismatch_score, gap_score, min_score
)
def compute_inner_alignment_matrix_custom_scoring(
scoring_matrix, a, b, scoring_fn, gap_score, min_score):
if native_enabled:
native_compute_inner_alignment_matrix_scoring_fn_any(
scoring_matrix, a, b,
scoring_fn, gap_score, min_score
)
else:
compute_inner_alignment_matrix_scoring_fn_py(
scoring_matrix, a, b,
scoring_fn, gap_score, min_score
)
def compute_inner_alignment_matrix(
scoring_matrix, a, b, scoring, min_score):
if isinstance(scoring, CustomScoring):
compute_inner_alignment_matrix_custom_scoring(
scoring_matrix, a, b,
scoring.scoring_fn, scoring.gap_score, min_score
)
else:
compute_inner_alignment_matrix_simple_scoring(
scoring_matrix, a, b,
scoring.match_score, scoring.mismatch_score, scoring.gap_score,
min_score
)
def _next_locs(score_matrix, i, j, is_local):
diag_score = score_matrix[i - 1][j - 1] if (i != 0 and j != 0) else MIN_INT
up_score = score_matrix[i - 1][j] if i != 0 else MIN_INT
left_score = score_matrix[i][j - 1] if j != 0 else MIN_INT
max_score = max(diag_score, up_score, left_score)
if max_score == MIN_INT:
return []
if (max_score == 0 or diag_score == 0) and (is_local or (i == 1 and j == 1)):
return []
if diag_score == max_score:
get_logger().debug('diag_score: %s (%s)', diag_score, max_score)
return [(i - 1, j - 1)]
locs = []
if up_score == max_score:
locs.append((i - 1, j))
if left_score == max_score:
locs.append((i, j - 1))
return locs
def alignment_matrix_traceback_py(score_matrix, start_locs, is_local):
# Using LinkedListNode to cheaply branch off to multiple paths
pending_roots = deque([
LinkedListNode(tuple(loc))
for loc in start_locs
])
while pending_roots:
n = pending_roots.pop()
i, j = n.data
next_locs = _next_locs(score_matrix, i, j, is_local)
get_logger().debug('next_locs: %s', next_locs)
if not next_locs:
yield n
else:
pending_roots.extend([
LinkedListNode(next_loc, n)
for next_loc in next_locs
])
def alignment_matrix_traceback(score_matrix, start_locs, is_local, limit):
if native_enabled and limit == 1:
yield native_alignment_matrix_single_path_traceback(
score_matrix, start_locs[0], 1 if is_local else 0
)
else:
paths = alignment_matrix_traceback_py(
score_matrix, reversed(start_locs), is_local
)
if limit:
paths = islice(paths, limit)
for path in paths:
yield path
class SimpleScoring(object):
def __init__(self, match_score, mismatch_score, gap_score):
self.match_score = match_score
self.mismatch_score = mismatch_score
self.gap_score = gap_score
class CustomScoring(object):
def __init__(self, scoring_fn, gap_score):
self.scoring_fn = scoring_fn
self.gap_score = gap_score
class LinkedListNode(object):
def __init__(self, data, next_node=None):
self.data = data
self.next_node = next_node
def __str__(self):
if self.next_node is not None:
return str(self.data) + ' -> ' + str(self.next_node)
return str(self.data)
def __iter__(self):
yield self.data
next_node = self.next_node
while next_node is not None:
yield next_node.data
next_node = next_node.next_node
def _path_to_matching_blocks(path, a, b):
block_ai = 0
block_bi = 0
block_size = 0
for ai, bi in ((ai_ - 1, bi_ - 1) for ai_, bi_ in path):
if a[ai] == b[bi]:
if block_size and block_ai + block_size == ai and block_bi + block_size == bi:
block_size += 1
else:
if block_size:
yield (block_ai, block_bi, block_size)
block_ai = ai
block_bi = bi
block_size = 1
if block_size:
yield (block_ai, block_bi, block_size)
def _as_np_array(s):
if isinstance(s, binary_type):
return np.frombuffer(s, dtype=np.uint8).astype(np.int32)
if isinstance(s, string_types):
return np.array([ord(c) for c in s], dtype=np.int32)
return np.asarray(s)
wrap_sequence = _as_np_array
class AbstractSequenceMatcher(object, with_metaclass(ABCMeta)):
def __init__(self, a, b, scoring):
self.a = a
self.b = b
self.scoring = scoring
self._alignment_matrix = None
self._a = _as_np_array(a)
self._b = _as_np_array(b)
@abstractmethod
def _computer_alignment_matrix(self):
pass
def _get_alignment_matrix(self):
if self._alignment_matrix is None:
self._alignment_matrix = self._computer_alignment_matrix()
return self._alignment_matrix
@abstractmethod
def get_multiple_matching_blocks(self, limit=None):
pass
def get_matching_blocks(self):
for matching_blocks in self.get_multiple_matching_blocks(limit=1):
return list(matching_blocks) + [(len(self.a), len(self.b), 0)]
return [(len(self.a), len(self.b), 0)]
class LocalSequenceMatcher(AbstractSequenceMatcher):
"""
Local sequence matcher using Smith-Waterman algorithm
"""
def _computer_alignment_matrix(self):
m = len(self._a) + 1
n = len(self._b) + 1
scoring_matrix = np.empty((m, n), dtype=int)
scoring_matrix[:, 0] = 0
scoring_matrix[0, :] = 0
min_score = 0
compute_inner_alignment_matrix(
scoring_matrix,
self._a, self._b,
self.scoring,
min_score
)
return scoring_matrix
def get_multiple_matching_blocks(self, limit=None):
score_matrix = self._get_alignment_matrix()
max_score = score_matrix.max()
max_score_loc = np.argwhere(score_matrix == max_score)
get_logger().debug('max_score_loc: %s', max_score_loc)
is_local = True
paths = alignment_matrix_traceback(score_matrix, max_score_loc, is_local, limit=limit or 0)
return (
list(_path_to_matching_blocks(path, self.a, self.b))
for path in paths
)
class GlobalSequenceMatcher(AbstractSequenceMatcher):
"""
Global sequence matcher using Needleman-Wunsch algorithm
"""
def _computer_alignment_matrix(self):
m = len(self._a) + 1
n = len(self._b) + 1
scoring_matrix = np.empty((m, n), dtype=int)
for i in range(m):
scoring_matrix[i, 0] = self.scoring.gap_score * i
for j in range(n):
scoring_matrix[0, j] = self.scoring.gap_score * j
min_score = MIN_INT
compute_inner_alignment_matrix(
scoring_matrix,
self._a, self._b,
self.scoring,
min_score
)
return scoring_matrix
def get_multiple_matching_blocks(self, limit=None):
score_matrix = self._get_alignment_matrix()
m = len(self._a) + 1
n = len(self._b) + 1
start_locs = [(m - 1, n - 1)]
is_local = False
paths = alignment_matrix_traceback(score_matrix, start_locs, is_local, limit=limit or 0)
return (
list(_path_to_matching_blocks(path, self.a, self.b))
for path in paths
) | /sciencebeam_alignment-0.0.5.tar.gz/sciencebeam_alignment-0.0.5/sciencebeam_alignment/align.py | 0.647464 | 0.344085 | align.py | pypi |
import dataclasses
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import (
Callable,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast
)
from typing_extensions import Protocol
from sciencebeam_parser.document.layout_document import (
EMPTY_BLOCK,
LayoutBlock,
LayoutGraphic,
LayoutToken
)
class SemanticContentWrapper(ABC):
def get_text(self) -> str:
return ' '.join((
block.text
for block in self.iter_blocks()
))
def __len__(self) -> int:
return len(list(self.iter_blocks()))
def get_short_semantic_content_repr(self):
return '%s(%r)' % (type(self).__name__, self.get_text())
@abstractmethod
def iter_blocks(self) -> Iterable[LayoutBlock]:
pass
def iter_tokens(self) -> Iterable[LayoutToken]:
return (
token
for block in self.iter_blocks()
for token in block.iter_all_tokens()
)
@property
def merged_block(self) -> LayoutBlock:
return LayoutBlock.merge_blocks(self.iter_blocks())
@dataclass
class SemanticSimpleContentWrapper(SemanticContentWrapper):
content: LayoutBlock = EMPTY_BLOCK
layout_block: dataclasses.InitVar[LayoutBlock] = None
def __post_init__(self, layout_block: Optional[LayoutBlock] = None):
assert isinstance(self.content, LayoutBlock)
if layout_block is not None:
self.add_content(layout_block)
def iter_blocks(self) -> Iterable[LayoutBlock]:
return [self.content]
def add_content(self, block: LayoutBlock):
self.content = LayoutBlock(
lines=self.content.lines + block.lines
)
class SemanticTextContentWrapper(SemanticSimpleContentWrapper):
pass
class SemanticContentFactoryProtocol(Protocol):
def __call__(self, layout_block: LayoutBlock) -> SemanticContentWrapper:
pass
EMPTY_CONTENT = SemanticSimpleContentWrapper()
T_SemanticContentWrapper = TypeVar('T_SemanticContentWrapper', bound=SemanticContentWrapper)
@dataclass
class SemanticMixedContentWrapper(SemanticContentWrapper):
mixed_content: List[SemanticContentWrapper] = field(default_factory=list)
content_id: Optional[str] = None
layout_block: dataclasses.InitVar[LayoutBlock] = None
def __post_init__(self, layout_block: Optional[LayoutBlock] = None):
if layout_block is not None:
self.add_block_content(layout_block)
def __len__(self):
return len(self.mixed_content)
def __iter__(self) -> Iterator[SemanticContentWrapper]:
return iter(self.mixed_content)
def is_empty(self):
return not self.mixed_content
def iter_blocks(self) -> Iterable[LayoutBlock]:
return (
block
for content in self.mixed_content
for block in content.iter_blocks()
)
def add_block_content(self, block: LayoutBlock):
self.add_content(SemanticTextContentWrapper(block))
def add_content(self, content: SemanticContentWrapper):
assert not isinstance(content, LayoutBlock)
self.mixed_content.append(content)
def add_content_and_return_content(
self, content: T_SemanticContentWrapper
) -> T_SemanticContentWrapper:
self.add_content(content)
return content
def iter_by_type(
self, type_: Type[T_SemanticContentWrapper]
) -> Iterable[T_SemanticContentWrapper]:
return (
content for content in self.mixed_content
if isinstance(content, type_)
)
def iter_by_type_recursively(
self, type_: Type[T_SemanticContentWrapper]
) -> Iterable[T_SemanticContentWrapper]:
return iter_by_semantic_type_recursively(self.mixed_content, type_)
def iter_by_types_recursively(
self, types_: Tuple[Type[T_SemanticContentWrapper], ...]
) -> Iterable[SemanticContentWrapper]:
return iter_by_semantic_types_recursively(self.mixed_content, types_)
def iter_parent_by_semantic_type_recursively(
self, type_: Type[T_SemanticContentWrapper]
):
return iter_parent_by_semantic_type_recursively(
self.mixed_content, type_, self
)
def has_type(
self, type_: Type[T_SemanticContentWrapper]
) -> bool:
return next(iter(self.iter_by_type(type_)), None) is not None
def view_by_type(self, type_: Type[T_SemanticContentWrapper]) -> 'SemanticMixedContentWrapper':
return SemanticMixedContentWrapper(list(self.iter_by_type(type_)))
def flat_map_inplace(
self,
fn: Callable[[SemanticContentWrapper], Sequence[SemanticContentWrapper]]
):
self.mixed_content = [
replaced_content
for content in self.mixed_content
for replaced_content in fn(content)
]
def flat_map_inplace_by_type(
self,
type_: Type[T_SemanticContentWrapper],
fn: Callable[[SemanticContentWrapper], Sequence[SemanticContentWrapper]]
):
self.flat_map_inplace(
lambda content: (
fn(content) if isinstance(content, type_)
else [content]
)
)
def get_text_list(self) -> List[str]:
return [content.get_text() for content in self.mixed_content]
def get_text_by_type(self, type_: Type[T_SemanticContentWrapper]) -> str:
return self.view_by_type(type_).get_text()
def iter_parent_by_semantic_type_recursively(
semantic_content_iterable: Iterable[SemanticContentWrapper],
type_: Type[T_SemanticContentWrapper],
parent_content: SemanticContentWrapper
) -> Iterable[SemanticContentWrapper]:
for semantic_content in semantic_content_iterable:
if isinstance(semantic_content, type_):
yield parent_content
return
if isinstance(semantic_content, SemanticMixedContentWrapper):
yield from iter_parent_by_semantic_type_recursively(
semantic_content.mixed_content,
type_=type_,
parent_content=semantic_content
)
def iter_by_semantic_types_recursively(
semantic_content_iterable: Iterable[SemanticContentWrapper],
types_: Union[Type[T_SemanticContentWrapper], Tuple[Type[T_SemanticContentWrapper], ...]]
) -> Iterable[SemanticContentWrapper]:
for semantic_content in semantic_content_iterable:
if isinstance(semantic_content, types_):
yield semantic_content
continue
if isinstance(semantic_content, SemanticMixedContentWrapper):
yield from iter_by_semantic_types_recursively(
semantic_content.mixed_content,
types_=types_
)
def iter_by_semantic_type_recursively(
semantic_content_iterable: Iterable[SemanticContentWrapper],
type_: Type[T_SemanticContentWrapper]
) -> Iterable[T_SemanticContentWrapper]:
return cast(
Iterable[T_SemanticContentWrapper],
iter_by_semantic_types_recursively(
semantic_content_iterable,
type_
)
)
@dataclass
class SemanticNote(SemanticSimpleContentWrapper):
note_type: str = 'other'
@dataclass
class SemanticMixedNote(SemanticMixedContentWrapper):
note_type: str = 'other'
@dataclass
class SemanticOptionalValueSemanticMixedContentWrapper(SemanticMixedContentWrapper):
value: Optional[str] = None
class SemanticHeading(SemanticMixedContentWrapper):
pass
class SemanticParagraph(SemanticMixedContentWrapper):
pass
class SemanticSectionTypes:
BODY = 'BODY'
BACK = 'BACK'
ACKNOWLEDGEMENT = 'ACKNOWLEDGEMENT'
OTHER = 'OTHER'
class SemanticLabel(SemanticSimpleContentWrapper):
pass
class SemanticCaption(SemanticSimpleContentWrapper):
pass
class SemanticTitle(SemanticSimpleContentWrapper):
pass
class SemanticJournal(SemanticSimpleContentWrapper):
pass
class SemanticVolume(SemanticSimpleContentWrapper):
pass
class SemanticIssue(SemanticSimpleContentWrapper):
pass
@dataclass
class SemanticPageRange(SemanticSimpleContentWrapper):
from_page: Optional[str] = None
to_page: Optional[str] = None
class SemanticPublisher(SemanticSimpleContentWrapper):
pass
class SemanticLocation(SemanticSimpleContentWrapper):
pass
@dataclass
class SemanticDate(SemanticSimpleContentWrapper):
year: Optional[int] = None
class SemanticExternalIdentifierTypes:
ARXIV = 'ARXIV'
DOI = 'DOI'
PII = 'PII'
PMCID = 'PMCID'
PMID = 'PMID'
@dataclass
class SemanticExternalIdentifier(SemanticSimpleContentWrapper):
value: Optional[str] = None
external_identifier_type: Optional[str] = None
class SemanticExternalUrl(SemanticOptionalValueSemanticMixedContentWrapper):
pass
class SemanticAbstract(SemanticSimpleContentWrapper):
pass
class SemanticRawNameList(SemanticMixedContentWrapper):
pass
T_SemanticRawNameList = TypeVar('T_SemanticRawNameList', bound=SemanticRawNameList)
class SemanticRawAuthors(SemanticRawNameList):
pass
class SemanticRawEditors(SemanticRawNameList):
pass
class SemanticRawAffiliation(SemanticMixedContentWrapper):
pass
class SemanticRawAddress(SemanticMixedContentWrapper):
pass
class SemanticRawAffiliationAddress(SemanticMixedContentWrapper):
pass
class SemanticMarker(SemanticSimpleContentWrapper):
pass
class SemanticNamePart(SemanticOptionalValueSemanticMixedContentWrapper):
pass
class SemanticNameTitle(SemanticNamePart):
pass
class SemanticNameSuffix(SemanticNamePart):
pass
class SemanticGivenName(SemanticNamePart):
pass
class SemanticMiddleName(SemanticNamePart):
pass
class SemanticSurname(SemanticNamePart):
pass
class SemanticName(SemanticMixedContentWrapper):
@property
def label_text(self) -> str:
return self.view_by_type(SemanticLabel).get_text()
@property
def given_name_text(self) -> str:
return self.view_by_type(SemanticGivenName).get_text()
@property
def surname_text(self) -> str:
return self.view_by_type(SemanticSurname).get_text()
T_SemanticName = TypeVar('T_SemanticName', bound=SemanticName)
class SemanticAuthor(SemanticName):
pass
class SemanticEditor(SemanticName):
pass
class SemanticInstitution(SemanticMixedContentWrapper):
pass
class SemanticDepartment(SemanticMixedContentWrapper):
pass
class SemanticLaboratory(SemanticMixedContentWrapper):
pass
class SemanticAddressField(SemanticMixedContentWrapper):
pass
class SemanticAddressLine(SemanticAddressField):
pass
class SemanticPostCode(SemanticAddressField):
pass
class SemanticPostBox(SemanticAddressField):
pass
class SemanticRegion(SemanticAddressField):
pass
class SemanticSettlement(SemanticAddressField):
pass
class SemanticCountry(SemanticAddressField):
pass
class SemanticAffiliationAddress(SemanticMixedContentWrapper):
pass
class SemanticRawReferenceText(SemanticMixedContentWrapper):
pass
class SemanticRawReference(SemanticMixedContentWrapper):
pass
class SemanticReference(SemanticMixedContentWrapper):
pass
class SemanticInvalidReference(SemanticMixedContentWrapper):
pass
class SemanticReferenceList(SemanticMixedContentWrapper):
pass
class SemanticRawFigure(SemanticMixedContentWrapper):
pass
class SemanticFigure(SemanticMixedContentWrapper):
pass
class SemanticRawTable(SemanticMixedContentWrapper):
pass
class SemanticTable(SemanticMixedContentWrapper):
pass
class SemanticRawEquationContent(SemanticMixedContentWrapper):
pass
class SemanticRawEquation(SemanticMixedContentWrapper):
pass
@dataclass
class SemanticGraphic(SemanticSimpleContentWrapper):
layout_graphic: Optional[LayoutGraphic] = None
relative_path: Optional[str] = None
def get_short_semantic_content_repr(self):
if not self.layout_graphic:
return repr(self)
return '%s(layout_graphic.local_file_path=%r)' % (
type(self).__name__,
self.layout_graphic.local_file_path
)
@dataclass
class SemanticCitation(SemanticSimpleContentWrapper):
target_content_id: Optional[str] = None
class SemanticFigureCitation(SemanticCitation):
pass
class SemanticTableCitation(SemanticCitation):
pass
class SemanticReferenceCitation(SemanticCitation):
pass
class SemanticFront(SemanticMixedContentWrapper):
@property
def authors(self) -> List[SemanticAuthor]:
return list(self.iter_by_type(SemanticAuthor))
def get_raw_authors_text(self) -> str:
return '\n'.join(self.view_by_type(SemanticRawAuthors).get_text_list())
def get_authors_text(self) -> str:
return '\n'.join(self.view_by_type(SemanticAuthor).get_text_list())
@dataclass
class SemanticSection(SemanticMixedContentWrapper):
section_type: str = SemanticSectionTypes.OTHER
@property
def headings(self) -> List[SemanticHeading]:
return list(self.iter_by_type(SemanticHeading))
def get_heading_text(self) -> str:
return '\n'.join(self.view_by_type(SemanticHeading).get_text_list())
@property
def paragraphs(self) -> List[SemanticParagraph]:
return list(self.iter_by_type(SemanticParagraph))
def get_paragraph_text_list(self) -> List[str]:
return self.view_by_type(SemanticParagraph).get_text_list()
def add_heading_block(self, block: LayoutBlock) -> SemanticHeading:
return self.add_content_and_return_content(SemanticHeading(layout_block=block))
def add_new_paragraph(self) -> SemanticParagraph:
return self.add_content_and_return_content(SemanticParagraph())
def add_note(self, block: LayoutBlock, note_type: str) -> SemanticNote:
return self.add_content_and_return_content(
SemanticNote(block, note_type=note_type)
)
def get_notes(self, note_type: str) -> List[SemanticNote]:
return [
note
for note in self.iter_by_type(SemanticNote)
if note.note_type == note_type
]
def get_notes_text_list(self, note_type: str) -> List[str]:
return [note.get_text() for note in self.get_notes(note_type)]
@property
def sections(self) -> List['SemanticSection']:
return list(self.iter_by_type(SemanticSection))
def get_sections(
self,
section_type: Optional[str] = None
) -> List['SemanticSection']:
return [
section
for section in self.iter_by_type(SemanticSection)
if not section_type or section.section_type == section_type
]
def view_by_section_type(self, section_type: str) -> 'SemanticMixedContentWrapper':
return SemanticMixedContentWrapper(
cast(List[SemanticContentWrapper], self.get_sections(section_type))
)
def add_new_section(
self,
section_type: str = SemanticSectionTypes.OTHER
) -> 'SemanticSection':
return self.add_content_and_return_content(
SemanticSection(section_type=section_type)
)
class SemanticDocument(SemanticMixedContentWrapper):
def __init__(self):
self.front = SemanticFront()
self.body_section = SemanticSection(section_type=SemanticSectionTypes.BODY)
self.back_section = SemanticSection(section_type=SemanticSectionTypes.BACK)
super().__init__([self.front, self.body_section, self.back_section]) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/document/semantic_document.py | 0.851907 | 0.244611 | semantic_document.py | pypi |
import dataclasses
import logging
import itertools
import operator
from dataclasses import dataclass, field
from functools import partial
from typing import Callable, List, Iterable, NamedTuple, Optional, Sequence, Tuple
from sciencebeam_parser.utils.bounding_box import BoundingBox
from sciencebeam_parser.utils.tokenizer import iter_tokenized_tokens, get_tokenized_tokens
LOGGER = logging.getLogger(__name__)
class LayoutFont(NamedTuple):
font_id: str
font_family: Optional[str] = None
font_size: Optional[float] = None
is_bold: Optional[bool] = None
is_italics: Optional[bool] = None
is_subscript: Optional[bool] = None
is_superscript: Optional[bool] = None
EMPTY_FONT = LayoutFont(font_id='_EMPTY')
class LayoutPageCoordinates(NamedTuple):
x: float
y: float
width: float
height: float
page_number: int = 0
@staticmethod
def from_bounding_box(
bounding_box: BoundingBox,
page_number: int = 0
) -> 'LayoutPageCoordinates':
return LayoutPageCoordinates(
x=bounding_box.x,
y=bounding_box.y,
width=bounding_box.width,
height=bounding_box.height,
page_number=page_number
)
@property
def bounding_box(self) -> BoundingBox:
return BoundingBox(x=self.x, y=self.y, width=self.width, height=self.height)
def __bool__(self) -> bool:
return not self.is_empty()
def is_empty(self) -> bool:
return self.width == 0 or self.height == 0
def move_by(self, dx: float = 0, dy: float = 0) -> 'LayoutPageCoordinates':
return LayoutPageCoordinates(
x=self.x + dx, y=self.y + dy, width=self.width, height=self.height,
page_number=self.page_number
)
def get_merged_with(
self,
other: 'LayoutPageCoordinates'
) -> 'LayoutPageCoordinates':
assert self.page_number == other.page_number, \
'cannot merge coordinates on different pages'
x = min(self.x, other.x)
y = min(self.y, other.y)
width = max(self.x + self.width, other.x + other.width) - x
height = max(self.y + self.height, other.y + other.height) - y
return LayoutPageCoordinates(
x=x, y=y, width=width, height=height, page_number=self.page_number
)
def get_merged_coordinates_list(
coordinates_list: Iterable[LayoutPageCoordinates]
) -> List[LayoutPageCoordinates]:
result: List[LayoutPageCoordinates] = []
pending_coordinates: Optional[LayoutPageCoordinates] = None
for coordinates in coordinates_list:
if not pending_coordinates:
pending_coordinates = coordinates
continue
if coordinates.page_number != pending_coordinates.page_number:
result.append(pending_coordinates)
pending_coordinates = coordinates
continue
pending_coordinates = pending_coordinates.get_merged_with(
coordinates
)
if pending_coordinates:
result.append(pending_coordinates)
return result
class LayoutPageMeta(NamedTuple):
page_number: int = 0
coordinates: Optional[LayoutPageCoordinates] = None
@staticmethod
def for_coordinates(coordinates: LayoutPageCoordinates) -> 'LayoutPageMeta':
return LayoutPageMeta(page_number=coordinates.page_number, coordinates=coordinates)
DEFAULT_LAYOUT_PAGE_META = LayoutPageMeta()
class LayoutLineMeta(NamedTuple):
line_id: int = -1
page_meta: LayoutPageMeta = DEFAULT_LAYOUT_PAGE_META
DEFAULT_LAYOUT_LINE_META = LayoutLineMeta()
class LayoutToken(NamedTuple):
text: str
font: LayoutFont = EMPTY_FONT
whitespace: str = ' '
coordinates: Optional[LayoutPageCoordinates] = None
line_meta: LayoutLineMeta = DEFAULT_LAYOUT_LINE_META
T_FlatMapLayoutTokensFn = Callable[[LayoutToken], List[LayoutToken]]
def default_get_tokenized_tokens_keep_whitespace(text: str) -> List[str]:
return get_tokenized_tokens(text, keep_whitespace=True)
def get_relative_coordinates(
coordinates: Optional[LayoutPageCoordinates],
text: str,
text_character_offset: int,
total_text_length: int
) -> Optional[LayoutPageCoordinates]:
if not coordinates:
return None
return LayoutPageCoordinates(
page_number=coordinates.page_number,
x=(
coordinates.x
+ coordinates.width * text_character_offset / total_text_length
),
y=coordinates.y,
width=(
coordinates.width
* len(text) / total_text_length
),
height=coordinates.height
)
def retokenize_layout_token(
layout_token: LayoutToken,
tokenize_fn: Optional[Callable[[str], List[str]]] = None
) -> List[LayoutToken]:
if not layout_token.text.strip():
return []
if tokenize_fn is None:
tokenize_fn = default_get_tokenized_tokens_keep_whitespace
token_texts = tokenize_fn(layout_token.text)
if token_texts == [layout_token.text]:
return [layout_token]
total_text_length = sum(len(token_text) for token_text in token_texts)
texts_with_whitespace: List[Tuple[str, str, int]] = []
pending_token_text = ''
pending_whitespace = ''
text_character_offset = 0
pending_text_character_offset = 0
for token_text in token_texts:
if not token_text.strip():
pending_whitespace += token_text
text_character_offset += len(token_text)
continue
if pending_token_text:
texts_with_whitespace.append((
pending_token_text,
pending_whitespace,
pending_text_character_offset
))
pending_token_text = token_text
pending_whitespace = ''
pending_text_character_offset = text_character_offset
text_character_offset += len(token_text)
pending_whitespace += layout_token.whitespace
if pending_token_text:
texts_with_whitespace.append((
pending_token_text,
pending_whitespace,
pending_text_character_offset
))
return [
LayoutToken(
text=token_text,
font=layout_token.font,
whitespace=whitespace,
coordinates=get_relative_coordinates(
layout_token.coordinates,
pending_token_text,
text_character_offset,
total_text_length
),
line_meta=layout_token.line_meta
)
for token_text, whitespace, text_character_offset in texts_with_whitespace
]
def iter_layout_tokens_for_text(
text: str,
tail_whitespace: str = ' ',
**kwargs
) -> Iterable[LayoutToken]:
pending_text = ''
pending_whitespace = ' '
for token_text in iter_tokenized_tokens(text, keep_whitespace=True):
if not token_text.strip():
pending_whitespace += token_text
continue
if pending_text:
yield LayoutToken(pending_text, whitespace=pending_whitespace, **kwargs)
pending_text = token_text
pending_whitespace = ''
if pending_text:
pending_whitespace += tail_whitespace
yield LayoutToken(pending_text, whitespace=pending_whitespace, **kwargs)
def get_layout_tokens_for_text(*args, **kwargs) -> List[LayoutToken]:
return list(iter_layout_tokens_for_text(*args, **kwargs))
@dataclass
class LayoutLine:
tokens: List[LayoutToken]
@property
def text(self) -> str:
return join_layout_tokens(self.tokens)
@staticmethod
def for_text(text: str, **kwargs) -> 'LayoutLine':
return LayoutLine(tokens=get_layout_tokens_for_text(text, **kwargs))
def flat_map_layout_tokens(self, fn: T_FlatMapLayoutTokensFn) -> 'LayoutLine':
return LayoutLine(tokens=[
tokenized_token
for token in self.tokens
for tokenized_token in fn(token)
])
@dataclass
class LayoutBlock:
lines: List[LayoutLine]
def __len__(self):
return len(self.lines)
@staticmethod
def for_tokens(tokens: List[LayoutToken]) -> 'LayoutBlock':
if not tokens:
return EMPTY_BLOCK
lines = [
LayoutLine(tokens=list(line_tokens))
for _, line_tokens in itertools.groupby(
tokens, key=operator.attrgetter('line_meta')
)
]
return LayoutBlock(lines=lines)
@staticmethod
def merge_blocks(blocks: Iterable['LayoutBlock']) -> 'LayoutBlock':
return LayoutBlock(lines=[
line
for block in blocks
for line in block.lines
])
@staticmethod
def for_text(text: str, **kwargs) -> 'LayoutBlock':
return LayoutBlock(lines=[LayoutLine.for_text(text, **kwargs)])
def iter_all_tokens(self) -> Iterable[LayoutToken]:
return (
token
for line in self.lines
for token in line.tokens
)
def get_merged_coordinates_list(self) -> List[LayoutPageCoordinates]:
return get_merged_coordinates_list([
token.coordinates
for token in self.iter_all_tokens()
if token.coordinates
])
def flat_map_layout_tokens(self, fn: T_FlatMapLayoutTokensFn) -> 'LayoutBlock':
return LayoutBlock(lines=[
line.flat_map_layout_tokens(fn)
for line in self.lines
])
def remove_empty_lines(self) -> 'LayoutBlock':
return LayoutBlock(lines=[
line
for line in self.lines
if line.tokens
])
@property
def text(self) -> str:
return join_layout_tokens(self.iter_all_tokens())
@property
def whitespace(self) -> str:
if not self.lines or not self.lines[-1].tokens:
return ''
return self.lines[-1].tokens[-1].whitespace
EMPTY_BLOCK = LayoutBlock(lines=[])
class LayoutGraphic(NamedTuple):
local_file_path: Optional[str] = None
coordinates: Optional[LayoutPageCoordinates] = None
graphic_type: Optional[str] = None
related_block: Optional[LayoutBlock] = None
page_meta: LayoutPageMeta = DEFAULT_LAYOUT_PAGE_META
@dataclass
class LayoutPage:
blocks: List[LayoutBlock]
graphics: Sequence[LayoutGraphic] = field(default_factory=list)
meta: LayoutPageMeta = DEFAULT_LAYOUT_PAGE_META
def replace(self, **changes) -> 'LayoutPage':
return dataclasses.replace(self, **changes)
def iter_all_tokens(self) -> Iterable[LayoutToken]:
return (
token
for block in self.blocks
for token in block.iter_all_tokens()
)
def flat_map_layout_tokens(self, fn: T_FlatMapLayoutTokensFn) -> 'LayoutPage':
return LayoutPage(
blocks=[
block.flat_map_layout_tokens(fn)
for block in self.blocks
],
graphics=self.graphics,
meta=self.meta
)
def remove_empty_blocks(self) -> 'LayoutPage':
blocks: List[LayoutBlock] = [
block.remove_empty_lines()
for block in self.blocks
]
return LayoutPage(
blocks=[
block
for block in blocks
if block.lines
],
graphics=self.graphics,
meta=self.meta
)
@dataclass
class LayoutDocument:
pages: List[LayoutPage]
def __len__(self):
return len(self.pages)
@staticmethod
def for_blocks(blocks: List[LayoutBlock]) -> 'LayoutDocument':
return LayoutDocument(pages=[LayoutPage(
blocks=blocks, graphics=[]
)])
def replace(self, **changes) -> 'LayoutDocument':
return dataclasses.replace(self, **changes)
def iter_all_blocks(self) -> Iterable[LayoutBlock]:
return (
block
for page in self.pages
for block in page.blocks
)
def iter_all_lines(self) -> Iterable[LayoutLine]:
return (
line
for block in self.iter_all_blocks()
for line in block.lines
)
def iter_all_tokens(self) -> Iterable[LayoutToken]:
return (
token
for block in self.iter_all_blocks()
for token in block.iter_all_tokens()
)
def iter_all_graphics(self) -> Iterable[LayoutGraphic]:
return (
graphic
for page in self.pages
for graphic in page.graphics
)
def flat_map_layout_tokens(
self, fn: T_FlatMapLayoutTokensFn, **kwargs
) -> 'LayoutDocument':
if kwargs:
fn = partial(fn, **kwargs)
return LayoutDocument(pages=[
page.flat_map_layout_tokens(fn)
for page in self.pages
])
def retokenize(self, **kwargs) -> 'LayoutDocument':
return self.flat_map_layout_tokens(retokenize_layout_token, **kwargs)
def remove_empty_blocks(self, preserve_empty_pages: bool = False) -> 'LayoutDocument':
pages: List[LayoutPage] = [
page.remove_empty_blocks()
for page in self.pages
]
return LayoutDocument(pages=[
page
for page in pages
if page.blocks or preserve_empty_pages
])
class LayoutTokenIndexRange(NamedTuple):
layout_token: LayoutToken
start: int
end: int
class LayoutTokensText:
def __init__(self, layout_block: LayoutBlock) -> None:
self.layout_block = layout_block
text_fragments = []
pending_whitespace = ''
text_offset = 0
token_index_ranges: List[LayoutTokenIndexRange] = []
for line in layout_block.lines:
for token in line.tokens:
if pending_whitespace:
text_fragments.append(pending_whitespace)
text_offset += len(pending_whitespace)
pending_whitespace = ''
token_text = token.text
token_index_ranges.append(LayoutTokenIndexRange(
layout_token=token,
start=text_offset,
end=text_offset + len(token_text)
))
text_fragments.append(token_text)
text_offset += len(token_text)
pending_whitespace += token.whitespace
self.token_index_ranges = token_index_ranges
self.text = ''.join(text_fragments)
def __str__(self):
return self.text
def iter_layout_tokens_between(
self, start: int, end: int
) -> Iterable[LayoutToken]:
for token_index_range in self.token_index_ranges:
if token_index_range.start >= end:
break
if token_index_range.end <= start:
continue
yield token_index_range.layout_token
def get_layout_tokens_between(
self, start: int, end: int
) -> List[LayoutToken]:
return list(self.iter_layout_tokens_between(start, end))
def join_layout_tokens(layout_tokens: Iterable[LayoutToken]) -> str:
layout_tokens = list(layout_tokens)
return ''.join([
(
token.text + token.whitespace
if index < len(layout_tokens) - 1
else token.text
)
for index, token in enumerate(layout_tokens)
])
def flat_map_layout_document_tokens(
layout_document: LayoutDocument,
fn: T_FlatMapLayoutTokensFn,
**kwargs
) -> LayoutDocument:
return layout_document.flat_map_layout_tokens(fn, **kwargs)
def retokenize_layout_document(
layout_document: LayoutDocument,
**kwargs
) -> LayoutDocument:
return layout_document.retokenize(**kwargs)
def remove_empty_blocks(
layout_document: LayoutDocument,
**kwargs
) -> LayoutDocument:
return layout_document.remove_empty_blocks(**kwargs) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/document/layout_document.py | 0.870776 | 0.279988 | layout_document.py | pypi |
import logging
from typing import Dict, Iterable, List, Optional, Union
from lxml import etree
from lxml.builder import ElementMaker
from sciencebeam_parser.utils.xml import get_text_content
from sciencebeam_parser.utils.xml_writer import parse_tag_expression
from sciencebeam_parser.document.layout_document import (
LayoutBlock,
LayoutPageCoordinates,
LayoutToken
)
from sciencebeam_parser.document.semantic_document import (
SemanticContentWrapper,
SemanticMixedContentWrapper
)
LOGGER = logging.getLogger(__name__)
XML_NS = 'http://www.w3.org/XML/1998/namespace'
XML_NS_PREFIX = '{%s}' % XML_NS
XML_ID = XML_NS_PREFIX + 'id'
TEI_NS = 'http://www.tei-c.org/ns/1.0'
TEI_NS_PREFIX = '{%s}' % TEI_NS
TEI_NS_MAP = {
'tei': TEI_NS
}
TEI_E = ElementMaker(namespace=TEI_NS, nsmap={
None: TEI_NS
})
def get_or_create_element_at(parent: etree.ElementBase, path: List[str]) -> etree.ElementBase:
if not path:
return parent
child = parent.find(TEI_NS_PREFIX + path[0])
if child is None:
LOGGER.debug('creating element: %s', path[0])
tag_expression = parse_tag_expression(path[0])
child = tag_expression.create_node(
element_maker=TEI_E
)
parent.append(child)
return get_or_create_element_at(child, path[1:])
def tei_xpath(parent: etree.ElementBase, xpath: str) -> List[etree.ElementBase]:
return parent.xpath(xpath, namespaces=TEI_NS_MAP)
def get_tei_xpath_text_content_list(parent: etree.ElementBase, xpath: str) -> List[str]:
return [get_text_content(node) for node in tei_xpath(parent, xpath)]
def get_required_styles(layout_token: LayoutToken) -> List[str]:
required_styles = []
if layout_token.font.is_bold:
required_styles.append('bold')
if layout_token.font.is_italics:
required_styles.append('italic')
if layout_token.font.is_subscript:
required_styles.append('subscript')
if layout_token.font.is_superscript:
required_styles.append('superscript')
return required_styles
def get_element_for_styles(styles: List[str], text: str) -> etree.ElementBase:
if not styles:
return text
child: Optional[etree.ElementBase] = None
for style in reversed(styles):
LOGGER.debug('style: %r, child: %r, text: %r', style, child, text)
if child is not None:
child = TEI_E('hi', {'rend': style}, child)
else:
child = TEI_E('hi', {'rend': style}, text)
return child
def format_coordinates(coordinates: LayoutPageCoordinates) -> str:
return '%d,%.2f,%.2f,%.2f,%.2f' % (
coordinates.page_number,
coordinates.x,
coordinates.y,
coordinates.width,
coordinates.height
)
def format_coordinates_list(coordinates_list: List[LayoutPageCoordinates]) -> str:
return ';'.join((
format_coordinates(coordinates)
for coordinates in coordinates_list
))
def get_default_attributes_for_layout_block(
layout_block: LayoutBlock,
enable_coordinates: bool = True
) -> Dict[str, str]:
if enable_coordinates:
formatted_coords = format_coordinates_list(
layout_block.get_merged_coordinates_list()
)
if formatted_coords:
return {'coords': formatted_coords}
return {}
def iter_layout_block_tei_children(
layout_block: LayoutBlock,
enable_coordinates: bool = True
) -> Iterable[Union[str, etree.ElementBase]]:
pending_styles: List[str] = []
pending_text = ''
pending_whitespace = ''
if enable_coordinates:
yield get_default_attributes_for_layout_block(
layout_block=layout_block,
enable_coordinates=enable_coordinates
)
for line in layout_block.lines:
for token in line.tokens:
required_styles = get_required_styles(token)
LOGGER.debug('token: %r, required_styles=%r', token, required_styles)
if required_styles != pending_styles:
if pending_text:
yield get_element_for_styles(
pending_styles,
pending_text
)
pending_text = ''
if pending_whitespace:
yield pending_whitespace
pending_whitespace = ''
pending_styles = required_styles
if pending_whitespace:
pending_text += pending_whitespace
pending_whitespace = ''
pending_text += token.text
pending_whitespace = token.whitespace
if pending_text:
yield get_element_for_styles(
pending_styles,
pending_text
)
def extend_element(
element: etree.ElementBase,
children_or_attributes: Iterable[etree.ElementBase]
):
for item in children_or_attributes:
if isinstance(item, dict):
element.attrib.update(item)
continue
if isinstance(item, str):
try:
previous_element = element[-1]
except IndexError:
previous_element = None
if previous_element is not None:
previous_element.tail = (
(previous_element.tail or '')
+ item
)
else:
element.text = (
(element.text or '')
+ item
)
continue
element.append(item)
def create_tei_note_element(
note_type: str,
layout_block: LayoutBlock
) -> etree.EntityBase:
return TEI_E(
'note',
{'type': note_type},
*iter_layout_block_tei_children(layout_block)
)
def get_default_attributes_for_semantic_content(
semantic_content: SemanticContentWrapper,
**kwargs
) -> Dict[str, str]:
attrib = get_default_attributes_for_layout_block(
semantic_content.merged_block,
**kwargs
)
if isinstance(semantic_content, SemanticMixedContentWrapper):
if semantic_content.content_id:
attrib = {
**attrib,
XML_ID: semantic_content.content_id
}
return attrib
def _create_tei_note_element(
note_type: str,
layout_block: LayoutBlock
) -> etree.EntityBase:
return TEI_E(
'note',
{'type': note_type},
*iter_layout_block_tei_children(layout_block)
)
class TeiElementWrapper:
def __init__(self, element: etree.ElementBase):
self.element = element
def xpath_nodes(self, xpath: str) -> List[etree.ElementBase]:
return tei_xpath(self.element, xpath)
def xpath(self, xpath: str) -> List['TeiElementWrapper']:
return [TeiElementWrapper(node) for node in self.xpath_nodes(xpath)]
def get_xpath_text_content_list(self, xpath: str) -> List[str]:
return get_tei_xpath_text_content_list(self.element, xpath)
def get_notes_text_list(self, note_type: str) -> List[str]:
return get_tei_xpath_text_content_list(
self.element,
'//tei:note[@type="%s"]' % note_type,
)
def add_note(self, note_type: str, layout_block: LayoutBlock):
self.element.append(create_tei_note_element(note_type, layout_block))
class TeiElementBuilder:
def __init__(
self,
element: etree.ElementBase,
):
self.element = element
self.builder_by_path_fragment: Dict[str, 'TeiElementBuilder'] = {}
def get_or_create(
self,
path: Optional[List[str]]
) -> 'TeiElementBuilder':
if not path:
return self
key = path[0]
builder = self.builder_by_path_fragment.get(key)
if not builder:
builder = TeiElementBuilder(TEI_E(key))
self.element.append(builder.element)
self.builder_by_path_fragment[key] = builder
return builder.get_or_create(path[1:])
def add_dict(self, attrib: dict):
_attrib = self.element.attrib
for k, v in attrib.items():
_attrib[k] = v
def append(
self,
child: Union[dict, etree.ElementBase]
):
if isinstance(child, dict):
self.add_dict(child)
return
self.element.append(child)
def extend(self, children: List[Union[dict, etree.ElementBase]]):
for child in children:
self.append(child) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/document/tei/common.py | 0.709523 | 0.154185 | common.py | pypi |
import logging
from typing import (
Dict,
List,
Mapping,
Optional,
Sequence,
Set,
Union
)
from lxml import etree
from sciencebeam_parser.document.semantic_document import (
SemanticAddressField,
SemanticAffiliationAddress,
SemanticAuthor,
SemanticMarker
)
from sciencebeam_parser.document.tei.common import (
TEI_E,
XML_ID
)
from sciencebeam_parser.document.tei.factories import (
TeiElementFactoryContext
)
LOGGER = logging.getLogger(__name__)
def _get_tei_raw_affiliation_element_for_semantic_affiliation_address(
semantic_affiliation_address: SemanticAffiliationAddress,
context: TeiElementFactoryContext
) -> etree.ElementBase:
children: List[Union[str, dict, etree.ElementBase]] = []
children.append({'type': 'raw_affiliation'})
pending_whitespace: str = ''
for semantic_content in semantic_affiliation_address:
merged_block = semantic_content.merged_block
if pending_whitespace:
children.append(pending_whitespace)
if isinstance(semantic_content, SemanticMarker):
children.append(TEI_E(
'label',
*context.iter_layout_block_tei_children(merged_block, enable_coordinates=False)
))
pending_whitespace = merged_block.whitespace
continue
children.extend(
context.iter_layout_block_tei_children(merged_block, enable_coordinates=False)
)
pending_whitespace = merged_block.whitespace
return TEI_E('note', *children)
def get_tei_affiliation_for_semantic_affiliation_address_element(
semantic_affiliation_address: SemanticAffiliationAddress,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_affiliation_address: %s', semantic_affiliation_address)
raw_affiliation = _get_tei_raw_affiliation_element_for_semantic_affiliation_address(
semantic_affiliation_address,
context=context
)
attributes = context.get_default_attributes_for_semantic_content(
semantic_affiliation_address
)
if semantic_affiliation_address.content_id:
attributes = {**attributes, 'key': semantic_affiliation_address.content_id}
if XML_ID in attributes:
del attributes[XML_ID]
children = [
attributes,
raw_affiliation
]
address_semantic_content_list = []
for semantic_content in semantic_affiliation_address:
if isinstance(semantic_content, SemanticAddressField):
address_semantic_content_list.append(semantic_content)
continue
children.extend(context.get_tei_child_elements_for_semantic_content(
semantic_content
))
LOGGER.debug('address_semantic_content_list: %r', address_semantic_content_list)
if address_semantic_content_list:
children.append(TEI_E('address', *[
child
for semantic_content in address_semantic_content_list
for child in context.get_tei_child_elements_for_semantic_content(
semantic_content
)
]))
return TEI_E('affiliation', *children)
def get_tei_author_for_semantic_author_element(
semantic_author: SemanticAuthor,
context: TeiElementFactoryContext,
affiliations_by_marker: Optional[Mapping[str, Sequence[SemanticAffiliationAddress]]] = None
) -> etree.ElementBase:
if affiliations_by_marker is None:
affiliations_by_marker = {}
LOGGER.debug('semantic_author: %s', semantic_author)
pers_name_children = []
for semantic_content in semantic_author:
pers_name_children.extend(context.get_tei_child_elements_for_semantic_content(
semantic_content
))
children = [
TEI_E(
'persName',
context.get_default_attributes_for_semantic_content(semantic_author),
*pers_name_children
)
]
affiliations = []
for marker_text in semantic_author.view_by_type(SemanticMarker).get_text_list():
semantic_affiliations = affiliations_by_marker.get(marker_text)
if not semantic_affiliations:
LOGGER.warning('affiliation not found for marker: %r', marker_text)
continue
for semantic_affiliation in semantic_affiliations:
affiliations.append(get_tei_affiliation_for_semantic_affiliation_address_element(
semantic_affiliation,
context=context
))
children.extend(affiliations)
return TEI_E('author', *children)
def get_dummy_tei_author_for_semantic_affiliations_element(
semantic_affiliations: Sequence[SemanticAffiliationAddress],
context: TeiElementFactoryContext
) -> etree.ElementBase:
children = [
TEI_E('note', {'type': 'dummy_author'}, 'Dummy author for orphan affiliations')
]
children.extend([
get_tei_affiliation_for_semantic_affiliation_address_element(
semantic_affiliation,
context=context
)
for semantic_affiliation in semantic_affiliations
])
return TEI_E('author', *children)
def get_authors_affiliation_markers(authors: List[SemanticAuthor]) -> Set[str]:
return {
marker
for author in authors
for marker in author.view_by_type(SemanticMarker).get_text_list()
}
def get_orphan_affiliations(
affiliations_by_marker: Dict[str, List[SemanticAffiliationAddress]],
authors: List[SemanticAuthor]
) -> List[SemanticAffiliationAddress]:
used_affiliation_markers = get_authors_affiliation_markers(authors)
return [
affiliation
for marker, affiliations in affiliations_by_marker.items()
if marker not in used_affiliation_markers
for affiliation in affiliations
] | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/document/tei/author.py | 0.640523 | 0.189484 | author.py | pypi |
import logging
from typing import (
Iterable,
List,
)
from lxml import etree
from sciencebeam_parser.document.semantic_document import (
SemanticContentWrapper,
SemanticFigure,
SemanticHeading,
SemanticLabel,
SemanticParagraph,
SemanticRawEquation,
SemanticSection,
SemanticSectionTypes,
SemanticTable
)
from sciencebeam_parser.document.tei.common import (
TEI_E,
TeiElementBuilder
)
from sciencebeam_parser.document.tei.factory import (
SingleElementTeiElementFactory,
T_ElementChildrenList,
TeiElementFactory,
TeiElementFactoryContext
)
LOGGER = logging.getLogger(__name__)
class HeadingTeiElementFactory(SingleElementTeiElementFactory):
def get_tei_element_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticHeading)
semantic_heading = semantic_content
children: T_ElementChildrenList = [
context.get_default_attributes_for_semantic_content(semantic_heading)
]
pending_whitespace = ''
for child_semantic_content in semantic_heading:
if isinstance(child_semantic_content, SemanticLabel):
children.append({'n': child_semantic_content.get_text()})
continue
layout_block = child_semantic_content.merged_block
if pending_whitespace:
children.append(pending_whitespace)
children.extend(context.iter_layout_block_tei_children(
layout_block=layout_block,
enable_coordinates=False
))
pending_whitespace = layout_block.whitespace
return TEI_E('head', *children)
def iter_flat_paragraph_formula(
semantic_paragraph: SemanticParagraph
) -> Iterable[SemanticContentWrapper]:
pending_semantic_content_list: List[SemanticContentWrapper] = []
for semantic_content in semantic_paragraph:
if isinstance(semantic_content, SemanticRawEquation):
if pending_semantic_content_list:
yield SemanticParagraph(pending_semantic_content_list)
pending_semantic_content_list = []
yield semantic_content
continue
pending_semantic_content_list.append(semantic_content)
if pending_semantic_content_list:
yield SemanticParagraph(pending_semantic_content_list)
class ParagraphTeiElementFactory(TeiElementFactory):
def get_tei_children_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> List[etree.ElementBase]:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticParagraph)
semantic_paragraph = semantic_content
result: List[etree.ElementBase] = []
for flat_parent_semantic_content in iter_flat_paragraph_formula(semantic_paragraph):
if not isinstance(flat_parent_semantic_content, SemanticParagraph):
result.extend(context.get_tei_child_elements_for_semantic_content(
flat_parent_semantic_content
))
continue
children: T_ElementChildrenList = [
context.get_default_attributes_for_semantic_content(flat_parent_semantic_content)
]
pending_whitespace = ''
for child_semantic_content in flat_parent_semantic_content:
pending_whitespace = context.append_tei_children_list_and_get_whitespace(
children,
child_semantic_content,
pending_whitespace=pending_whitespace
)
result.append(TEI_E('p', *children))
return result
class SectionTeiElementFactory(TeiElementFactory):
def get_tei_children_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> List[etree.ElementBase]:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticSection)
semantic_section = semantic_content
tei_section = TeiElementBuilder(TEI_E('div'))
for child_semantic_content in semantic_section:
if isinstance(child_semantic_content, (SemanticFigure, SemanticTable,)):
# rendered at parent level
continue
tei_section.extend(context.get_tei_child_elements_for_semantic_content(
child_semantic_content
))
if semantic_content.section_type == SemanticSectionTypes.ACKNOWLEDGEMENT:
tei_section.element.attrib['type'] = 'acknowledgement'
if not list(tei_section.element):
return []
return [tei_section.element] | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/document/tei/section.py | 0.601711 | 0.165627 | section.py | pypi |
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Callable, Sequence
import PIL.Image
from sciencebeam_parser.utils.bounding_box import BoundingBox
from sciencebeam_parser.utils.lazy import LazyLoaded, Preloadable
class ComputerVisionModelInstance(ABC):
@abstractmethod
def get_bounding_box(self) -> BoundingBox:
pass
@abstractmethod
def get_type_name(self) -> str:
pass
@dataclass
class SimpleComputerVisionModelInstance(ComputerVisionModelInstance):
bounding_box: BoundingBox
type_name: str
def get_bounding_box(self) -> BoundingBox:
return self.bounding_box
def get_type_name(self) -> str:
return self.type_name
class ComputerVisionModelResult(ABC):
@abstractmethod
def get_instances_by_type_names(
self,
type_names: Sequence[str]
) -> Sequence[ComputerVisionModelInstance]:
pass
def get_instances_by_type_name(
self,
type_name: str
) -> Sequence[ComputerVisionModelInstance]:
return self.get_instances_by_type_names([type_name])
class ComputerVisionModel(ABC, Preloadable):
@abstractmethod
def predict_single(self, image: PIL.Image.Image) -> ComputerVisionModelResult:
pass
T_ComputerVisionModelFactory = Callable[[], ComputerVisionModel]
class LazyComputerVisionModel(ComputerVisionModel):
def __init__(self, factory: T_ComputerVisionModelFactory) -> None:
super().__init__()
self._lazy_model = LazyLoaded[ComputerVisionModel](factory)
def __repr__(self) -> str:
return '%s(factory=%r, loaded=%r)' % (
type(self).__name__, self._lazy_model.factory, self._lazy_model.is_loaded
)
@property
def cv_model(self) -> ComputerVisionModel:
return self._lazy_model.get()
def preload(self):
self.cv_model.preload()
def predict_single(self, image: PIL.Image.Image) -> ComputerVisionModelResult:
return self.cv_model.predict_single(image) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/cv_models/cv_model.py | 0.917185 | 0.199152 | cv_model.py | pypi |
import logging
from typing import List, Sequence, Tuple
import PIL.Image
from layoutparser.elements.layout import Layout
from layoutparser.models.auto_layoutmodel import AutoLayoutModel
from layoutparser.models.base_layoutmodel import BaseLayoutModel
from sciencebeam_parser.utils.bounding_box import BoundingBox
from sciencebeam_parser.cv_models.cv_model import (
ComputerVisionModel,
ComputerVisionModelInstance,
ComputerVisionModelResult,
SimpleComputerVisionModelInstance
)
from sciencebeam_parser.utils.lazy import LazyLoaded
LOGGER = logging.getLogger(__name__)
DEFAULT_MODEL_PATH = 'lp://efficientdet/PubLayNet'
DEFAULT_SCORE_THRESHOLD = 0.0
def load_model(model_path: str) -> BaseLayoutModel:
LOGGER.info('loading model: %r', model_path)
return AutoLayoutModel(model_path)
def get_bounding_box_for_layout_parser_coordinates(
coordinates: Tuple[float, float, float, float]
) -> BoundingBox:
x1, y1, x2, y2 = coordinates
return BoundingBox(x=x1, y=y1, width=x2 - x1, height=y2 - y1)
def is_bounding_box_overlapping_with_any_bounding_boxes(
bounding_box: BoundingBox,
other_bounding_boxes: Sequence[BoundingBox],
max_overlap_ratio: float = 0.1
) -> bool:
bounding_box_area = bounding_box.area
for other_bounding_box in other_bounding_boxes:
intersection_bounding_box = bounding_box.intersection(
other_bounding_box
)
if not intersection_bounding_box:
continue
if intersection_bounding_box.area / bounding_box_area >= max_overlap_ratio:
return True
return False
class LayoutParserComputerVisionModelResult(ComputerVisionModelResult):
def __init__(
self,
layout: Layout,
score_threshold: float,
avoid_overlapping: bool,
max_overlap_ratio: float = 0.1
):
super().__init__()
self.layout = layout
self.score_threshold = score_threshold
self.avoid_overlapping = avoid_overlapping
self.max_overlap_ratio = max_overlap_ratio
LOGGER.debug('layout: %r', layout)
def get_instances_by_type_names(
self,
type_names: Sequence[str]
) -> Sequence[ComputerVisionModelInstance]:
instances = [
SimpleComputerVisionModelInstance(
bounding_box=get_bounding_box_for_layout_parser_coordinates(block.coordinates),
type_name=block.type
)
for block in self.layout
if (
block.type in type_names
and block.score >= self.score_threshold
)
]
instances = [
instance
for instance in instances
if instance.get_bounding_box()
]
if self.avoid_overlapping:
_instances = instances
instances = []
prev_bounding_boxes: List[BoundingBox] = []
for instance in _instances:
bounding_box = instance.get_bounding_box()
if is_bounding_box_overlapping_with_any_bounding_boxes(
bounding_box,
prev_bounding_boxes,
max_overlap_ratio=self.max_overlap_ratio
):
LOGGER.debug(
'bounding box overlapping with prev: %r ~ %r',
bounding_box, prev_bounding_boxes
)
continue
instances.append(instance)
prev_bounding_boxes.append(bounding_box)
return instances
class LayoutParserComputerVisionModel(ComputerVisionModel):
def __init__(
self,
config: dict,
model_path: str = DEFAULT_MODEL_PATH,
):
super().__init__()
self.score_threshold = float(config.get('score_threshold', DEFAULT_SCORE_THRESHOLD))
self.avoid_overlapping = bool(config.get('avoid_overlapping', True))
self.model_path = model_path
self._lazy_model = LazyLoaded[BaseLayoutModel](self._load_model)
def _load_model(self) -> BaseLayoutModel:
model = load_model(self.model_path)
LOGGER.info('loaded layout model: %r', self.model_path)
return model
@property
def layout_model(self) -> BaseLayoutModel:
return self._lazy_model.get()
def preload(self):
self._lazy_model.get()
def predict_single(self, image: PIL.Image.Image) -> ComputerVisionModelResult:
return LayoutParserComputerVisionModelResult(
self.layout_model.detect(image),
score_threshold=self.score_threshold,
avoid_overlapping=self.avoid_overlapping
) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/cv_models/layout_parser_cv_model.py | 0.854354 | 0.19787 | layout_parser_cv_model.py | pypi |
from abc import ABC, abstractmethod
from dataclasses import dataclass
import logging
from typing import Iterable, List, Mapping, NamedTuple, Optional, Sequence, Tuple, TypeVar, Union
from lxml import etree
from lxml.builder import ElementMaker
from sciencebeam_parser.utils.xml_writer import XmlTreeWriter
from sciencebeam_parser.utils.labels import get_split_prefix_label
from sciencebeam_parser.utils.tokenizer import get_tokenized_tokens
from sciencebeam_parser.document.tei.common import TEI_E, TEI_NS_PREFIX, tei_xpath
from sciencebeam_parser.document.layout_document import (
LayoutLine,
LayoutLineMeta,
LayoutToken
)
from sciencebeam_parser.models.data import (
NEW_DOCUMENT_MARKER,
LabeledLayoutModelData,
LabeledLayoutToken,
LayoutModelData,
NewDocumentMarker
)
LOGGER = logging.getLogger(__name__)
NO_NS_TEI_E = ElementMaker()
OTHER_LABELS = {'<other>', 'O'}
class ExtractInstruction:
pass
class NewLineExtractInstruction(ExtractInstruction):
pass
@dataclass
class ResetExtractInstruction(ExtractInstruction):
reset_element_path: List[str]
def get_model_data_label(model_data: LayoutModelData) -> Optional[str]:
if isinstance(model_data, LabeledLayoutModelData):
return model_data.label
return None
def is_same_layout_line(
layout_line_1: Optional[LayoutLine],
layout_line_2: Optional[LayoutLine]
) -> bool:
assert layout_line_1 is not None
assert layout_line_2 is not None
return id(layout_line_1) == id(layout_line_2)
def is_same_model_data_layout_line(
model_data_1: LayoutModelData,
model_data_2: LayoutModelData
) -> bool:
return is_same_layout_line(model_data_1.layout_line, model_data_2.layout_line)
def iter_group_model_data_by_line(
model_data_iterable: Iterable[LayoutModelData]
) -> Iterable[Sequence[LayoutModelData]]:
line_model_data_list: List[LayoutModelData] = []
for model_data in model_data_iterable:
if not line_model_data_list:
line_model_data_list.append(model_data)
continue
previous_model_data = line_model_data_list[-1]
if is_same_model_data_layout_line(
model_data,
previous_model_data
):
LOGGER.debug('same line: %r - %r', model_data, previous_model_data)
line_model_data_list.append(model_data)
continue
yield line_model_data_list
line_model_data_list = [model_data]
if line_model_data_list:
yield line_model_data_list
def iter_model_data_with_new_line_instruction(
model_data_iterable: Iterable[LayoutModelData]
) -> Iterable[Union[LayoutModelData, ExtractInstruction]]:
line_model_data_list: List[LayoutModelData] = []
for model_data in model_data_iterable:
if not line_model_data_list:
line_model_data_list.append(model_data)
continue
previous_model_data = line_model_data_list[-1]
if is_same_model_data_layout_line(
model_data,
previous_model_data
):
LOGGER.debug('same line: %r - %r', model_data, previous_model_data)
line_model_data_list.append(model_data)
continue
yield from line_model_data_list
yield NewLineExtractInstruction()
line_model_data_list = [model_data]
if line_model_data_list:
yield from line_model_data_list
yield NewLineExtractInstruction()
def get_default_note_type_for_label(label: str) -> str:
return label.strip('<>')
def is_parent_path_of(
parent_path: Sequence[str],
child_path: Sequence[str]
) -> bool:
if len(parent_path) >= len(child_path):
return False
return tuple(child_path[:len(parent_path)]) == tuple(parent_path)
def is_same_or_parent_path_of(
parent_path: Sequence[str],
child_path: Sequence[str]
) -> bool:
return (
tuple(parent_path) == tuple(child_path)
or is_parent_path_of(parent_path, child_path)
)
class TeiTrainingDataGenerator(ABC):
@abstractmethod
def get_training_tei_xml_for_multiple_model_data_iterables(
self,
model_data_iterables: Iterable[Iterable[LayoutModelData]]
) -> etree.ElementBase:
pass
@abstractmethod
def get_training_tei_xml_for_model_data_iterable(
self,
model_data_iterable: Iterable[LayoutModelData]
) -> etree.ElementBase:
pass
@abstractmethod
def get_default_tei_filename_suffix(self) -> Optional[str]:
pass
def get_default_data_filename_suffix(self) -> Optional[str]:
return None
def get_default_tei_sub_directory(self) -> Optional[str]:
pass
def get_default_data_sub_directory(self) -> Optional[str]:
pass
class AbstractTeiTrainingDataGenerator(TeiTrainingDataGenerator):
def __init__(
self,
root_training_xml_element_path: Sequence[str],
training_xml_element_path_by_label: Mapping[str, Sequence[str]],
root_tag: str = 'tei',
use_tei_namespace: bool = True,
element_maker: Optional[ElementMaker] = None,
reset_training_xml_element_path_by_label: Optional[Mapping[str, Sequence[str]]] = None,
default_tei_filename_suffix: Optional[str] = None,
default_data_filename_suffix: Optional[str] = None,
default_tei_sub_directory: Optional[str] = None,
default_data_sub_directory: Optional[str] = None
):
self.root_training_xml_element_path = root_training_xml_element_path
self.root_parent_training_xml_element_path = root_training_xml_element_path[:-1]
self.training_xml_element_path_by_label = training_xml_element_path_by_label
self.reset_training_xml_element_path_by_label = (
reset_training_xml_element_path_by_label or {}
)
self._training_xml_element_paths = {
tuple(element_path)
for label, element_path in training_xml_element_path_by_label.items()
if (
label not in OTHER_LABELS
and tuple(element_path) != tuple(root_training_xml_element_path)
)
}
self.other_element_path = training_xml_element_path_by_label.get('<other>')
if element_maker is None:
element_maker = TEI_E if use_tei_namespace else NO_NS_TEI_E
self.element_maker = element_maker
self.root_tag = root_tag
self.default_tei_filename_suffix = default_tei_filename_suffix
self.default_data_filename_suffix = default_data_filename_suffix
self.default_tei_sub_directory = default_tei_sub_directory
self.default_data_sub_directory = default_data_sub_directory
def get_default_tei_filename_suffix(self) -> Optional[str]:
return self.default_tei_filename_suffix
def get_default_data_filename_suffix(self) -> Optional[str]:
return self.default_data_filename_suffix
def get_default_tei_sub_directory(self) -> Optional[str]:
return self.default_tei_sub_directory
def get_default_data_sub_directory(self) -> Optional[str]:
return self.default_data_sub_directory
def get_training_xml_path_for_label(
self,
label: Optional[str],
current_path: Sequence[str]
) -> Sequence[str]:
if not label or label in OTHER_LABELS:
if label and self.other_element_path is not None:
return self.other_element_path
if tuple(current_path) in self._training_xml_element_paths:
LOGGER.debug(
'found current path in element paths, returning parent: %r', current_path
)
return current_path[:-1]
LOGGER.debug(
'not found current path in element paths, returning current: %r', current_path
)
return current_path
training_xml_path = self.training_xml_element_path_by_label.get(label or '')
if not training_xml_path:
note_type = get_default_note_type_for_label(label)
LOGGER.info('label not mapped, creating note: %r', label)
training_xml_path = (
list(self.root_training_xml_element_path) + [f'note[@type="{note_type}"]']
)
return training_xml_path
def get_reset_training_xml_path_for_label(
self,
label: Optional[str],
prefix: Optional[str]
) -> Optional[Sequence[str]]:
if prefix != 'B' or not label:
return None
return self.reset_training_xml_element_path_by_label.get(label)
def write_xml_for_model_data_with_instructions_iterable(
self,
xml_writer: XmlTreeWriter,
model_data_or_instruction_iterable: Iterable[Union[LayoutModelData, ExtractInstruction]]
):
default_path = xml_writer.current_path
LOGGER.debug('default_path: %r', default_path)
pending_whitespace = ''
prev_label: str = ''
pending_reset_path: Optional[List[str]] = None
for model_data_or_instruction in model_data_or_instruction_iterable:
if isinstance(model_data_or_instruction, LayoutModelData):
model_data = model_data_or_instruction
layout_token = model_data.layout_token
assert layout_token is not None
prefixed_label = get_model_data_label(model_data)
prefix, label = get_split_prefix_label(prefixed_label or '')
xml_element_path = self.get_training_xml_path_for_label(
label,
current_path=xml_writer.current_path
)
reset_path = self.get_reset_training_xml_path_for_label(
label=label,
prefix=prefix
)
if pending_reset_path is not None:
reset_path = pending_reset_path
pending_reset_path = None
LOGGER.debug(
'label: %r (%r: %r; reset_path=%r)',
label, prefix, xml_element_path, reset_path
)
if reset_path is not None:
xml_writer.require_path(reset_path)
elif (
prev_label not in OTHER_LABELS
and pending_whitespace
and not is_same_or_parent_path_of(xml_writer.current_path, xml_element_path)
):
LOGGER.debug(
'closing element before adding whitespace, %r -> %r',
xml_writer.current_path, xml_element_path
)
xml_writer.require_path(xml_writer.current_path[:-1])
elif prefix == 'B' and label not in OTHER_LABELS:
xml_writer.require_path(xml_element_path[:-1])
xml_writer.require_path_or_below(xml_element_path)
xml_writer.append_text(pending_whitespace)
pending_whitespace = ''
xml_writer.require_path(xml_element_path)
xml_writer.append_text(layout_token.text)
pending_whitespace = layout_token.whitespace
prev_label = label
elif isinstance(model_data_or_instruction, ResetExtractInstruction):
pending_reset_path = model_data_or_instruction.reset_element_path
elif isinstance(model_data_or_instruction, NewLineExtractInstruction):
xml_writer.append(self.element_maker('lb'))
pending_whitespace = '\n'
xml_writer.require_path(default_path)
xml_writer.append_text(pending_whitespace)
def iter_model_data_or_instruction_for_model_data_iterable(
self,
model_data_iterable: Iterable[LayoutModelData]
) -> Iterable[Union[LayoutModelData, ExtractInstruction]]:
return iter_model_data_with_new_line_instruction(
model_data_iterable
)
def write_xml_for_model_data_iterable(
self,
xml_writer: XmlTreeWriter,
model_data_iterable: Iterable[LayoutModelData]
):
self.write_xml_for_model_data_with_instructions_iterable(
xml_writer,
self.iter_model_data_or_instruction_for_model_data_iterable(
model_data_iterable
)
)
def _get_xml_writer(self) -> XmlTreeWriter:
return XmlTreeWriter(
self.element_maker(self.root_tag),
element_maker=self.element_maker
)
def get_post_processed_xml_root(self, xml_root: etree.ElementBase):
return xml_root
def get_training_tei_xml_for_multiple_model_data_iterables(
self,
model_data_iterables: Iterable[Iterable[LayoutModelData]]
) -> etree.ElementBase:
xml_writer = self._get_xml_writer()
xml_writer.require_path(self.root_parent_training_xml_element_path)
for model_data_iterable in model_data_iterables:
xml_writer.require_path(self.root_parent_training_xml_element_path)
xml_writer.require_path(self.root_training_xml_element_path)
self.write_xml_for_model_data_iterable(
xml_writer,
model_data_iterable=model_data_iterable
)
return self.get_post_processed_xml_root(xml_writer.root)
def get_training_tei_xml_for_model_data_iterable(
self,
model_data_iterable: Iterable[LayoutModelData]
) -> etree.ElementBase:
return self.get_training_tei_xml_for_multiple_model_data_iterables(
[model_data_iterable]
)
TEI_LB = 'lb'
LINE_BREAK_TAGS = {
TEI_LB,
TEI_NS_PREFIX + TEI_LB
}
def _get_tag_expression_for_element(element: etree.ElementBase) -> str:
if not element.attrib:
return element.tag
if len(element.attrib) > 1:
raise ValueError('only supporting up to one attribute')
key, value = list(element.attrib.items())[0]
return '{tag}[@{key}="{value}"]'.format(tag=element.tag, key=key, value=value)
class TeiTrainingElementPath(NamedTuple):
element_list: Sequence[etree.ElementBase] = tuple([])
def get_path(self) -> Sequence[str]:
return [
_get_tag_expression_for_element(element)
for element in self.element_list
]
def append(self, element: etree.ElementBase) -> 'TeiTrainingElementPath':
return TeiTrainingElementPath(
list(self.element_list) + [element]
)
EMPTY_TEI_TRAINING_ELEMENT_PATH = TeiTrainingElementPath()
class TeiTrainingText(NamedTuple):
text: str
path: TeiTrainingElementPath
is_start: bool
class TeiTrainingLine(NamedTuple):
text_list: Sequence[TeiTrainingText]
def is_line_break_element(element: etree.ElementBase) -> bool:
return element.tag in LINE_BREAK_TAGS
def _iter_flat_tei_training_text_from_element(
parent_element: etree.ElementBase,
current_path: TeiTrainingElementPath = EMPTY_TEI_TRAINING_ELEMENT_PATH
) -> Iterable[Union[TeiTrainingText, ExtractInstruction]]:
LOGGER.debug('current_path: %s', current_path)
is_start = True
if parent_element.text:
yield TeiTrainingText(
text=parent_element.text,
path=current_path,
is_start=is_start
)
is_start = False
for child_element in parent_element:
if is_line_break_element(child_element):
yield NewLineExtractInstruction()
else:
child_path = current_path.append(child_element)
yield from _iter_flat_tei_training_text_from_element(
child_element,
child_path
)
if child_element.tail:
yield TeiTrainingText(
text=child_element.tail,
path=current_path,
is_start=is_start
)
is_start = False
def _iter_tei_training_lines_from_element(
parent_element: etree.ElementBase,
current_path: TeiTrainingElementPath = EMPTY_TEI_TRAINING_ELEMENT_PATH
) -> Iterable[TeiTrainingLine]:
line_text_list = []
for item in _iter_flat_tei_training_text_from_element(
parent_element,
current_path
):
if isinstance(item, TeiTrainingText):
line_text_list.append(item)
elif isinstance(item, NewLineExtractInstruction):
yield TeiTrainingLine(line_text_list)
line_text_list = []
else:
raise RuntimeError('unrecognised item: %r' % item)
if line_text_list:
yield TeiTrainingLine(line_text_list)
T = TypeVar('T')
def iter_group_doc_items_with_new_doc_marker(
flat_item_iterable: Iterable[Union[T, NewDocumentMarker]]
) -> Iterable[List[T]]:
doc_items: List[T] = []
for item in flat_item_iterable:
if isinstance(item, NewDocumentMarker):
yield doc_items
doc_items = []
continue
doc_items.append(item)
def iter_tag_result_for_flat_tag_result(
flat_tag_result_iterable: Iterable[Union[Tuple[str, str], NewDocumentMarker]]
) -> Iterable[List[Tuple[str, str]]]:
doc_tag_result: List[Tuple[str, str]] = []
for token_tag_result in flat_tag_result_iterable:
if isinstance(token_tag_result, NewDocumentMarker):
yield doc_tag_result
doc_tag_result = []
continue
doc_tag_result.append(token_tag_result)
def get_tag_result_for_flat_tag_result(
flat_tag_result_iterable: Iterable[Union[Tuple[str, str], NewDocumentMarker]]
) -> List[List[Tuple[str, str]]]:
return list(iter_tag_result_for_flat_tag_result(flat_tag_result_iterable))
class TrainingTeiParser(ABC):
@abstractmethod
def parse_training_tei_to_tag_result(
self,
tei_root: etree.ElementBase
) -> List[List[Tuple[str, str]]]:
pass
@abstractmethod
def parse_training_tei_to_labeled_layout_tokens_list(
self,
tei_root: etree.ElementBase
) -> Sequence[Sequence[LabeledLayoutToken]]:
pass
def get_element_path_with_prefix(
element_path: Sequence[str],
prefix: str
) -> Sequence[str]:
return [
prefix + item
for item in element_path
]
class AbstractTrainingTeiParser(TrainingTeiParser):
def __init__(
self,
root_training_xml_element_path: Sequence[str],
training_xml_element_path_by_label: Mapping[str, Sequence[str]],
use_tei_namespace: bool,
line_as_token: bool = False,
) -> None:
tag_namespace_prefix = TEI_NS_PREFIX if use_tei_namespace else ''
if use_tei_namespace:
root_training_xml_element_path = get_element_path_with_prefix(
root_training_xml_element_path,
'tei:'
)
self.label_by_relative_element_path_map = {
tuple(
get_element_path_with_prefix(
element_path[len(root_training_xml_element_path):],
tag_namespace_prefix
)
): label
for label, element_path in training_xml_element_path_by_label.items()
}
for element_path in list(self.label_by_relative_element_path_map.keys()):
if len(element_path) < 2:
continue
parent_element_path = element_path[:-1]
if parent_element_path not in self.label_by_relative_element_path_map:
self.label_by_relative_element_path_map[parent_element_path] = 'O'
self.root_training_xml_xpath = './' + '/'.join(root_training_xml_element_path)
self.line_as_token = line_as_token
def _get_label_for_element_path(
self,
tei_training_element_path: TeiTrainingElementPath,
text: str
) -> str:
element_path = tei_training_element_path.get_path()
label = self.label_by_relative_element_path_map.get(tuple(element_path))
if not label:
raise RuntimeError(
'label not found for %r (available: %r; for text: %r)' % (
element_path,
self.label_by_relative_element_path_map.keys(),
text
)
)
return label
def iter_parse_training_tei_to_flat_labeled_layout_tokens(
self,
tei_root: etree.ElementBase
) -> Iterable[Union[LabeledLayoutToken, NewDocumentMarker]]:
for text_node in tei_xpath(tei_root, self.root_training_xml_xpath):
tei_training_lines = list(
_iter_tei_training_lines_from_element(
text_node, EMPTY_TEI_TRAINING_ELEMENT_PATH
)
)
LOGGER.debug('tei_training_lines: %r', tei_training_lines)
prefix = ''
prev_label = ''
for line_index, line in enumerate(tei_training_lines):
line_meta = LayoutLineMeta(line_id=1 + line_index)
for text in line.text_list:
if text.text.isspace():
continue
token_count = 0
if text.path.element_list:
label = self._get_label_for_element_path(text.path, text=text.text)
if prev_label != label:
prefix = 'B-' if text.is_start else 'I-'
else:
label = 'O'
prefix = ''
if label in OTHER_LABELS:
prefix = ''
prev_label = label
for token_text in get_tokenized_tokens(text.text):
yield LabeledLayoutToken(
label=prefix + label,
layout_token=LayoutToken(
text=token_text,
line_meta=line_meta
)
)
token_count += 1
if prefix:
prefix = 'I-'
if self.line_as_token:
break
if token_count and self.line_as_token:
# we are only outputting the first token of each line
break
yield NEW_DOCUMENT_MARKER
def iter_parse_training_tei_to_flat_tag_result(
self,
tei_root: etree.ElementBase
) -> Iterable[Union[Tuple[str, str], NewDocumentMarker]]:
for item in self.iter_parse_training_tei_to_flat_labeled_layout_tokens(
tei_root
):
if isinstance(item, NewDocumentMarker):
yield item
continue
assert isinstance(item, LabeledLayoutToken)
yield item.layout_token.text, item.label
def parse_training_tei_to_tag_result(
self,
tei_root: etree.ElementBase
) -> List[List[Tuple[str, str]]]:
return list(iter_group_doc_items_with_new_doc_marker(
self.iter_parse_training_tei_to_flat_tag_result(
tei_root
)
))
def parse_training_tei_to_labeled_layout_tokens_list(
self,
tei_root: etree.ElementBase
) -> Sequence[Sequence[LabeledLayoutToken]]:
return list(iter_group_doc_items_with_new_doc_marker(
self.iter_parse_training_tei_to_flat_labeled_layout_tokens(
tei_root
)
)) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/training_data.py | 0.843444 | 0.222605 | training_data.py | pypi |
import os
import logging
import threading
from typing import Iterable, Optional, List, Tuple
import numpy as np
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti import WapitiWrapper
from sciencebeam_trainer_delft.utils.io import copy_file
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.sequence_labelling.engines.wapiti_adapters import (
WapitiModelAdapter,
WapitiModel
)
from sciencebeam_parser.app.context import AppContext
from sciencebeam_parser.models.model_impl import ModelImpl
from sciencebeam_parser.utils.download import download_if_url_from_alternatives
from sciencebeam_parser.utils.lazy import LazyLoaded
LOGGER = logging.getLogger(__name__)
class WapitiServiceModelAdapter(WapitiModelAdapter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._lock = threading.Lock()
self._wapiti_timeout = 20.0
self._wapiti_timeout_counter = 0
self._wapiti_trial_count = 10
@staticmethod
def load_from(
model_path: str,
download_manager: DownloadManager,
wapiti_binary_path: str = None) -> 'WapitiModelAdapter':
# overriding method to return WapitiServiceModelAdapter
model_file_path = os.path.join(model_path, 'model.wapiti.gz')
model_file_paths = [model_file_path, os.path.splitext(model_file_path)[0]]
LOGGER.debug('checking for existing local model files: %r', model_file_paths)
local_model_file_path = download_if_url_from_alternatives(
download_manager=download_manager,
alternative_file_url_or_path_list=model_file_paths
)
LOGGER.debug('local_model_file_path: %s', local_model_file_path)
if local_model_file_path.endswith('.gz'):
local_uncompressed_file_path = os.path.splitext(local_model_file_path)[0]
copy_file(local_model_file_path, local_uncompressed_file_path, overwrite=False)
local_model_file_path = local_uncompressed_file_path
return WapitiServiceModelAdapter(
WapitiWrapper(
wapiti_binary_path=wapiti_binary_path
),
model_file_path=local_model_file_path,
model_path=model_path
)
def stop(self):
wapiti_model = self._wapiti_model
if wapiti_model is None:
return
self._wapiti_model = None
LOGGER.info('stopping wapiti process: %s', wapiti_model.process.pid)
wapiti_model.process.kill()
def on_wapiti_timeout(self):
self._wapiti_timeout_counter += 1
LOGGER.info(
'wapiti timeout (%s, counter=%d)',
self._wapiti_timeout, self._wapiti_timeout_counter
)
self.stop()
def _get_tag_results_with_timeout(
self,
x: np.ndarray,
features: np.ndarray,
output_format: str = None
) -> List[List[Tuple[str, str]]]:
prev_wapiti_timeout_counter = self._wapiti_timeout_counter
timer = threading.Timer(self._wapiti_timeout, self.on_wapiti_timeout)
timer.start()
result = list(self.iter_tag_using_model(x, features, output_format))
timer.cancel()
if self._wapiti_timeout_counter != prev_wapiti_timeout_counter:
raise TimeoutError('wapiti timeout received during processing')
return result
def _get_tag_results_with_timeout_and_retry(
self,
x: np.ndarray,
features: np.ndarray,
output_format: str = None
) -> List[List[Tuple[str, str]]]:
attempt = 0
while True:
try:
return self._get_tag_results_with_timeout(x, features, output_format)
except Exception as exc: # pylint: disable=broad-except
attempt += 1
LOGGER.warning(
'received error processing data: %r, attempt=%d/%d, texts=%r',
exc, attempt, self._wapiti_trial_count, list(x), exc_info=True
)
if attempt >= self._wapiti_trial_count:
LOGGER.warning('final attempt, re-raising exception')
raise
def iter_tag(
self,
x: np.ndarray,
features: np.ndarray,
output_format: str = None
) -> Iterable[List[Tuple[str, str]]]:
# by default, WapitiModelAdapter will run the binary for each call
# using "iter_tag_using_model" will result in a wapiti process
# that we communicate with via stdin / stdout
with self._lock:
yield from self._get_tag_results_with_timeout_and_retry(x, features, output_format)
class WapitiModelImpl(ModelImpl):
def __init__(self, model_url: str, app_context: AppContext):
self.model_url = model_url
self.app_context = app_context
self._lazy_model = LazyLoaded[WapitiModelAdapter](self._load_model)
def __repr__(self) -> str:
return '%s(%r, loaded=%r)' % (
type(self).__name__, self.model_url, self._lazy_model.is_loaded
)
def _load_model(self) -> WapitiModel:
model = WapitiServiceModelAdapter.load_from(
self.model_url,
wapiti_binary_path=self.app_context.lazy_wapiti_binary_wrapper.get_binary_path(),
download_manager=self.app_context.download_manager
)
LOGGER.info('loaded wapiti model: %r', self.model_url)
return model
@property
def model(self) -> WapitiModel:
return self._lazy_model.get()
def preload(self):
self._lazy_model.get()
def predict_labels(
self,
texts: List[List[str]],
features: List[List[List[str]]],
output_format: Optional[str] = None
) -> List[List[Tuple[str, str]]]:
model = self.model
result = model.tag(texts, features=features, output_format=output_format)
token_count = sum(len(text) for text in texts)
LOGGER.info(
'predicted labels using wapiti model (document count: %d, token count: %d)',
len(texts), token_count
)
return result | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/wapiti_model_impl.py | 0.719482 | 0.154887 | wapiti_model_impl.py | pypi |
from typing import Iterable
from sciencebeam_parser.models.data import (
ContextAwareLayoutTokenFeatures,
ContextAwareLayoutTokenModelDataGenerator,
LayoutModelData
)
class CitationDataGenerator(ContextAwareLayoutTokenModelDataGenerator):
def iter_model_data_for_context_layout_token_features(
self,
token_features: ContextAwareLayoutTokenFeatures
) -> Iterable[LayoutModelData]:
yield token_features.get_layout_model_data([
token_features.token_text,
token_features.get_lower_token_text(),
token_features.get_prefix(1),
token_features.get_prefix(2),
token_features.get_prefix(3),
token_features.get_prefix(4),
token_features.get_suffix(1),
token_features.get_suffix(2),
token_features.get_suffix(3),
token_features.get_suffix(4),
token_features.get_line_status_with_lineend_for_single_token(),
token_features.get_capitalisation_status_using_allcap(),
token_features.get_digit_status_using_containsdigits(),
token_features.get_str_is_single_char(),
token_features.get_dummy_str_is_proper_name(),
token_features.get_dummy_str_is_common_name(),
token_features.get_str_is_first_name(),
token_features.get_str_is_last_name(),
token_features.get_dummy_str_is_location_name(),
token_features.get_dummy_str_is_year(),
token_features.get_dummy_str_is_month(),
token_features.get_dummy_str_is_http(),
token_features.get_dummy_str_is_known_collaboration(),
token_features.get_dummy_str_is_known_journal_title(),
token_features.get_dummy_str_is_known_conference_title(),
token_features.get_dummy_str_is_known_publisher(),
token_features.get_dummy_str_is_known_identifier(),
token_features.get_punctuation_type_feature(),
token_features.get_str_sentence_token_relative_position(),
token_features.get_dummy_label()
]) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/citation/data.py | 0.763836 | 0.180467 | data.py | pypi |
import logging
import re
from typing import Iterable, Mapping, Optional, Set, Tuple, Type, Union
from sciencebeam_parser.utils.misc import iter_ids
from sciencebeam_parser.document.semantic_document import (
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticDate,
SemanticExternalIdentifier,
SemanticExternalIdentifierTypes,
SemanticExternalUrl,
SemanticInvalidReference,
SemanticIssue,
SemanticJournal,
SemanticLocation,
SemanticPageRange,
SemanticPublisher,
SemanticRawAuthors,
SemanticRawEditors,
SemanticRawReference,
SemanticRawReferenceText,
SemanticReference,
SemanticTitle,
SemanticVolume
)
from sciencebeam_parser.document.layout_document import LayoutBlock
from sciencebeam_parser.models.extract import SimpleModelSemanticExtractor
LOGGER = logging.getLogger(__name__)
# https://en.wikipedia.org/wiki/Digital_Object_Identifier
# https://www.doi.org/doi_handbook/2_Numbering.html
DOI_PATTERN = r'\b(10\.\d{4,}(?:\.\d{1,})*/.+)'
# copied and adapted from:
# https://github.com/kermitt2/grobid/blob/0.6.2/grobid-core/src/main/java/org/grobid/core/utilities/TextUtilities.java#L66
PMID_PATTERN = r"(?:(?:PMID)|(?:Pub(?:\s)?Med(?:\s)?(?:ID)?))(?:\s)?(?:\:)?(?:\s)*(\d{1,8})"
PMCID_PATTERN = r"(?:PMC)(\d{1,})"
# copied and adapted from:
# https://github.com/kermitt2/grobid/blob/0.6.2/grobid-core/src/main/java/org/grobid/core/utilities/TextUtilities.java#L62-L63
ARXIV_PATTERN = (
r"(?:arXiv\s?(?:\.org)?\s?\:\s?(\d{4}\s?\.\s?\d{4,5}(?:v\d+)?))"
r"|(?:arXiv\s?(?:\.org)?\s?\:\s?([ a-zA-Z\-\.]*\s?/\s?\d{7}(?:v\d+)?))"
)
# https://en.wikipedia.org/wiki/Publisher_Item_Identifier
PII_PATTERN = r'\b([S,B]\W*(?:[0-9xX]\W*){15,}[0-9xX])'
SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<author>': SemanticRawAuthors,
'<editor>': SemanticRawEditors,
'<title>': SemanticTitle,
'<journal>': SemanticJournal,
'<volume>': SemanticVolume,
'<issue>': SemanticIssue,
'<publisher>': SemanticPublisher,
'<location>': SemanticLocation
}
VALID_REFERENCE_TYPES: Set[Type[SemanticContentWrapper]] = {
SemanticTitle,
SemanticJournal,
SemanticRawAuthors,
SemanticRawEditors,
SemanticExternalIdentifier,
SemanticExternalUrl
}
def parse_page_range(layout_block: LayoutBlock) -> SemanticPageRange:
page_range_text = layout_block.text
page_parts = page_range_text.split('-')
if len(page_parts) == 2:
from_page = page_parts[0].strip()
to_page = page_parts[1].strip()
if to_page and len(to_page) < len(from_page):
to_page = from_page[:-(len(to_page))] + to_page
return SemanticPageRange(
layout_block=layout_block,
from_page=from_page,
to_page=to_page
)
return SemanticPageRange(layout_block=layout_block)
def parse_web(layout_block: LayoutBlock) -> Union[SemanticExternalUrl, SemanticExternalIdentifier]:
value = re.sub(r'\s', '', layout_block.text)
m = re.search(DOI_PATTERN, value)
if m:
return SemanticExternalIdentifier(
layout_block=layout_block,
value=m.group(1),
external_identifier_type=SemanticExternalIdentifierTypes.DOI
)
return SemanticExternalUrl(
layout_block=layout_block,
value=value
)
def get_detected_external_identifier_type_and_value_for_text(
text: str
) -> Tuple[Optional[str], str]:
value = re.sub(r'\s', '', text)
m = re.search(DOI_PATTERN, value)
if m:
value = m.group(1)
return SemanticExternalIdentifierTypes.DOI, value
m = re.search(PMCID_PATTERN, value)
if m:
value = 'PMC' + m.group(1)
return SemanticExternalIdentifierTypes.PMCID, value
m = re.search(ARXIV_PATTERN, value)
if m:
value = m.group(1) or m.group(2)
return SemanticExternalIdentifierTypes.ARXIV, value
m = re.match(PMID_PATTERN, value)
if m:
value = m.group(1)
return SemanticExternalIdentifierTypes.PMID, value
m = re.search(PII_PATTERN, value)
if m:
value = m.group(1)
return SemanticExternalIdentifierTypes.PII, value
return None, value
def get_detected_external_identifier_type_for_text(text: str) -> Optional[str]:
external_identifier_type, _ = get_detected_external_identifier_type_and_value_for_text(
text
)
return external_identifier_type
def parse_pubnum(layout_block: LayoutBlock) -> SemanticExternalIdentifier:
external_identifier_type, value = get_detected_external_identifier_type_and_value_for_text(
layout_block.text
)
return SemanticExternalIdentifier(
layout_block=layout_block,
value=value,
external_identifier_type=external_identifier_type
)
def parse_date(layout_block: LayoutBlock) -> SemanticDate:
value = re.sub(r'\s', '', layout_block.text)
year: Optional[int] = None
m = re.search(r'(\d{4})', value)
if m:
year = int(m.group(1))
return SemanticDate(
layout_block=layout_block,
year=year
)
def is_reference_valid(ref: SemanticReference) -> bool:
for semantic_content in ref:
if type(semantic_content) in VALID_REFERENCE_TYPES:
return True
return False
def get_invalid_reference(ref: SemanticReference) -> SemanticInvalidReference:
return SemanticInvalidReference(
mixed_content=[
semantic_content
for semantic_content in ref
if not isinstance(semantic_content, SemanticRawReferenceText)
]
)
class CitationSemanticExtractor(SimpleModelSemanticExtractor):
def __init__(self):
super().__init__(semantic_content_class_by_tag=SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG)
def get_semantic_content_for_entity_name( # pylint: disable=too-many-return-statements
self,
name: str,
layout_block: LayoutBlock
) -> SemanticContentWrapper:
if name == '<pages>':
return parse_page_range(layout_block)
if name == '<web>':
return parse_web(layout_block)
if name == '<pubnum>':
return parse_pubnum(layout_block)
if name == '<date>':
return parse_date(layout_block)
return super().get_semantic_content_for_entity_name(name, layout_block)
def iter_semantic_content_for_entity_blocks( # pylint: disable=arguments-differ
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
semantic_raw_reference: Optional[SemanticRawReference] = None,
**kwargs
) -> Iterable[SemanticContentWrapper]:
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
ids_iterator = iter(iter_ids('b'))
ref: Optional[SemanticReference] = None
for name, layout_block in entity_tokens:
if not ref:
ref = SemanticReference()
if semantic_raw_reference:
ref.content_id = semantic_raw_reference.content_id
for semantic_content in semantic_raw_reference:
ref.add_content(semantic_content)
if not ref.content_id:
ref.content_id = next(ids_iterator, '?')
semantic_content = self.get_semantic_content_for_entity_name(
name, layout_block=layout_block
)
ref.add_content(semantic_content)
if ref and not is_reference_valid(ref):
yield get_invalid_reference(ref)
elif ref:
yield ref | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/citation/extract.py | 0.799521 | 0.208259 | extract.py | pypi |
import logging
import re
from typing import Iterable, List, Mapping, Optional, Tuple, Type, Union, cast
from sciencebeam_parser.document.semantic_document import (
SemanticAuthor,
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticMarker,
SemanticMiddleName,
SemanticMixedContentWrapper,
SemanticNamePart,
SemanticNameSuffix,
SemanticNameTitle,
SemanticNote,
SemanticGivenName,
SemanticSurname,
T_SemanticName
)
from sciencebeam_parser.document.layout_document import LayoutBlock, LayoutDocument, LayoutToken
from sciencebeam_parser.models.extract import SimpleModelSemanticExtractor
LOGGER = logging.getLogger(__name__)
SPLIT_ON_SECOND_ENTIY_NAME = {'<title>', '<forename>', '<surname>'}
SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<title>': SemanticNameTitle,
'<forename>': SemanticGivenName,
'<middlename>': SemanticMiddleName,
'<surname>': SemanticSurname,
'<suffix>': SemanticNameSuffix
}
def tokenize_individual_characters(text: str) -> List[str]:
return list(text)
def convert_two_letter_uppercase_given_name_to_given_middle_name(
name: T_SemanticName
):
given_names = list(name.iter_by_type(SemanticGivenName))
middle_names = list(name.iter_by_type(SemanticMiddleName))
if middle_names:
LOGGER.debug('already has a middle name: %r', middle_names)
return
if len(given_names) != 1:
LOGGER.debug('no or too many given names: %r', given_names)
return
given_name_text = given_names[0].get_text()
if len(given_name_text) != 2 or not given_name_text.isupper():
LOGGER.debug('not two uppercase characters: %r', given_name_text)
return
layout_document = LayoutDocument.for_blocks(list(given_names[0].iter_blocks()))
retokenized_layout_document = layout_document.retokenize(
tokenize_fn=tokenize_individual_characters
)
LOGGER.debug('retokenized_layout_document: %r', retokenized_layout_document)
split_name_parts = [
(
SemanticGivenName(layout_block=LayoutBlock.for_tokens([token])) if index == 0
else SemanticMiddleName(layout_block=LayoutBlock.for_tokens([token]))
)
for index, token in enumerate(retokenized_layout_document.iter_all_tokens())
]
LOGGER.debug('split_name_parts: %r', split_name_parts)
name.flat_map_inplace_by_type(
SemanticGivenName,
lambda _: split_name_parts
)
def convert_name_parts_to_title_case(name: T_SemanticName):
for semantic_content in name:
if not isinstance(semantic_content, SemanticNamePart):
continue
semantic_content.value = semantic_content.get_text().title()
# based on:
# https://github.com/kermitt2/grobid/blob/0.6.2/grobid-core/src/main/java/org/grobid/core/data/Person.java#L375-L391
# and:
# https://github.com/kermitt2/grobid/blob/0.6.2/grobid-core/src/main/java/org/grobid/core/data/Person.java#L756-L775
def normalize_name_parts(name: T_SemanticName):
if not list(name.iter_by_type(SemanticSurname)):
return SemanticNote(
layout_block=LayoutBlock.merge_blocks(name.iter_blocks()),
note_type='invalid_author_name'
)
convert_two_letter_uppercase_given_name_to_given_middle_name(name)
convert_name_parts_to_title_case(name)
return name
def iter_semantic_markers_for_layout_block(
layout_block: LayoutBlock
) -> Iterable[Union[SemanticMarker, SemanticContentWrapper]]:
for text in re.split(r'(\D)', layout_block.text):
if not text:
continue
local_block = LayoutBlock.for_tokens([
LayoutToken(text, whitespace='')
])
if text == ',' or text.isspace():
yield SemanticNote(
layout_block=local_block,
note_type='marker_delimiter'
)
continue
yield SemanticMarker(layout_block=local_block)
def append_semantic_markers_for_layout_block(
parent_semantic_content: SemanticMixedContentWrapper,
layout_block: LayoutBlock
) -> None:
semantic_markers = list(iter_semantic_markers_for_layout_block(layout_block))
for semantic_marker in semantic_markers:
parent_semantic_content.add_content(semantic_marker)
class NameSemanticExtractor(SimpleModelSemanticExtractor):
def __init__(self):
super().__init__(semantic_content_class_by_tag=SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG)
def iter_semantic_content_for_entity_blocks( # type: ignore # pylint: disable=arguments-differ
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
name_type: Optional[Type[T_SemanticName]] = None,
**kwargs
) -> Iterable[T_SemanticName]:
_name_type: Type[T_SemanticName] = cast(
Type[T_SemanticName],
name_type if name_type is not None else SemanticAuthor
)
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
semantic_name: Optional[T_SemanticName] = None
seen_entity_tokens: List[Tuple[str, LayoutBlock]] = []
seen_name_labels: List[str] = []
has_tail_marker: bool = False
for name, layout_block in entity_tokens:
seen_entity_tokens.append((name, layout_block,))
if name == '<marker>':
if not semantic_name:
LOGGER.debug('new semantic_name with marker in the beginning')
semantic_name = _name_type()
append_semantic_markers_for_layout_block(semantic_name, layout_block)
continue
if len(seen_entity_tokens) >= 2 and seen_name_labels and not has_tail_marker:
previous_layout_block = seen_entity_tokens[-2][1]
if previous_layout_block.text.strip().endswith(','):
LOGGER.debug(
'new semantic_name marker after comma, seen_name_labels=%s',
seen_name_labels
)
yield normalize_name_parts(semantic_name)
seen_name_labels = []
semantic_name = _name_type()
append_semantic_markers_for_layout_block(semantic_name, layout_block)
continue
append_semantic_markers_for_layout_block(semantic_name, layout_block)
has_tail_marker = True
continue
if semantic_name and name in SPLIT_ON_SECOND_ENTIY_NAME and name in seen_name_labels:
LOGGER.debug(
'starting new semantic_name after having seen name part again, name=%r',
name
)
yield normalize_name_parts(semantic_name)
seen_name_labels = []
has_tail_marker = False
semantic_name = None
semantic_content = self.get_semantic_content_for_entity_name(
name, layout_block
)
if not isinstance(semantic_content, SemanticNote):
if has_tail_marker and semantic_name:
LOGGER.debug('starting new semantic_name after tail markers, name=%r', name)
yield normalize_name_parts(semantic_name)
seen_name_labels = []
has_tail_marker = False
semantic_name = None
seen_name_labels.append(name)
if not semantic_name:
semantic_name = _name_type()
semantic_name.add_content(semantic_content)
if semantic_name:
yield normalize_name_parts(semantic_name) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/name/extract.py | 0.7324 | 0.162912 | extract.py | pypi |
import logging
from typing import Iterable, Set, Union
from sciencebeam_parser.document.semantic_document import SemanticAuthor
from sciencebeam_parser.models.data import LayoutModelData
from sciencebeam_parser.models.model import (
LabeledLayoutToken,
iter_entity_layout_blocks_for_labeled_layout_tokens
)
from sciencebeam_parser.models.name.extract import NameSemanticExtractor
from sciencebeam_parser.models.training_data import (
AbstractTeiTrainingDataGenerator,
AbstractTrainingTeiParser,
ExtractInstruction,
ResetExtractInstruction,
get_model_data_label
)
LOGGER = logging.getLogger(__name__)
# based on:
# https://github.com/kermitt2/grobid/blob/0.7.0/grobid-core/src/main/java/org/grobid/core/engines/AuthorParser.java
ROOT_TRAINING_XML_ELEMENT_PATH = [
'teiHeader', 'fileDesc', 'sourceDesc', 'biblStruct', 'analytic', 'author', 'persName'
]
TRAINING_XML_ELEMENT_PATH_BY_LABEL = {
'<marker>': ROOT_TRAINING_XML_ELEMENT_PATH + ['marker'],
'<title>': ROOT_TRAINING_XML_ELEMENT_PATH + ['roleName'],
'<forename>': ROOT_TRAINING_XML_ELEMENT_PATH + ['forename'],
'<middlename>': ROOT_TRAINING_XML_ELEMENT_PATH + ['middlename'],
'<surname>': ROOT_TRAINING_XML_ELEMENT_PATH + ['surname'],
'<suffix>': ROOT_TRAINING_XML_ELEMENT_PATH + ['suffix']
}
def iter_model_data_with_reset_instruction_iterable(
model_data_or_instruction_iterable: Iterable[Union[LayoutModelData, ExtractInstruction]]
) -> Iterable[Union[LayoutModelData, ExtractInstruction]]:
# using extractor to re-use logic to split author names
# here we will split on the first token of the extracted semantic content
extractor = NameSemanticExtractor()
model_data_or_instruction_list = list(
model_data_or_instruction_iterable
)
entity_tokens = iter_entity_layout_blocks_for_labeled_layout_tokens([
LabeledLayoutToken(
label=get_model_data_label(model_data) or '',
layout_token=model_data.layout_token
)
for model_data in model_data_or_instruction_list
if (
isinstance(model_data, LayoutModelData)
and model_data.layout_token is not None
)
])
LOGGER.debug('entity_tokens: %r', entity_tokens)
reset_token_ids: Set[int] = set()
for index, semantic_content in enumerate(extractor.iter_semantic_content_for_entity_blocks(
entity_tokens=entity_tokens,
name_type=SemanticAuthor
)):
if index == 0:
continue
for semantic_token in semantic_content.iter_tokens():
reset_token_ids.add(id(semantic_token))
break
for model_data_or_instruction in model_data_or_instruction_list:
if isinstance(model_data_or_instruction, LayoutModelData):
model_data = model_data_or_instruction
if id(model_data.layout_token) in reset_token_ids:
yield ResetExtractInstruction(
ROOT_TRAINING_XML_ELEMENT_PATH[:-1]
)
yield model_data_or_instruction
class NameTeiTrainingDataGenerator(AbstractTeiTrainingDataGenerator):
DEFAULT_TEI_FILENAME_SUFFIX = '.authors.tei.xml'
def __init__(self):
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH,
training_xml_element_path_by_label=TRAINING_XML_ELEMENT_PATH_BY_LABEL,
use_tei_namespace=True,
root_tag='TEI',
default_tei_filename_suffix=(
NameTeiTrainingDataGenerator.DEFAULT_TEI_FILENAME_SUFFIX
),
default_data_filename_suffix=None
)
def iter_model_data_or_instruction_for_model_data_iterable(
self,
model_data_iterable: Iterable[LayoutModelData]
) -> Iterable[Union[LayoutModelData, ExtractInstruction]]:
parent_model_data_or_instruction_iterable = (
super().iter_model_data_or_instruction_for_model_data_iterable(
model_data_iterable
)
)
return iter_model_data_with_reset_instruction_iterable(
parent_model_data_or_instruction_iterable
)
class NameTrainingTeiParser(AbstractTrainingTeiParser):
def __init__(self) -> None:
super().__init__(
root_training_xml_element_path=ROOT_TRAINING_XML_ELEMENT_PATH[:-1],
training_xml_element_path_by_label=(
TRAINING_XML_ELEMENT_PATH_BY_LABEL
),
use_tei_namespace=True
) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/name/training_data.py | 0.74826 | 0.187058 | training_data.py | pypi |
from typing import Iterable
from sciencebeam_parser.models.data import (
ContextAwareLayoutTokenFeatures,
ContextAwareLayoutTokenModelDataGenerator,
LayoutModelData
)
class ReferenceSegmenterDataGenerator(ContextAwareLayoutTokenModelDataGenerator):
def iter_model_data_for_context_layout_token_features(
self,
token_features: ContextAwareLayoutTokenFeatures
) -> Iterable[LayoutModelData]:
yield token_features.get_layout_model_data([
token_features.token_text,
token_features.get_lower_token_text(),
token_features.get_prefix(1),
token_features.get_prefix(2),
token_features.get_prefix(3),
token_features.get_prefix(4),
token_features.get_suffix(1),
token_features.get_suffix(2),
token_features.get_suffix(3),
token_features.get_suffix(4),
token_features.get_line_status_with_lineend_for_single_token(),
token_features.get_alignment_status(),
token_features.get_capitalisation_status_using_allcap(),
token_features.get_digit_status_using_containsdigits(),
token_features.get_str_is_single_char(),
token_features.get_dummy_str_is_proper_name(),
token_features.get_dummy_str_is_common_name(),
token_features.get_str_is_first_name(),
token_features.get_dummy_str_is_location_name(),
token_features.get_dummy_str_is_year(),
token_features.get_dummy_str_is_month(),
token_features.get_dummy_str_is_http(),
token_features.get_line_punctuation_profile(),
token_features.get_str_line_token_relative_position(),
token_features.get_str_line_relative_length(),
token_features.get_block_status_with_blockend_for_single_token(),
token_features.get_truncated_line_punctuation_profile_length_feature(),
token_features.get_dummy_label()
]) | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/reference_segmenter/data.py | 0.774626 | 0.155976 | data.py | pypi |
import logging
from typing import Iterable, Optional, Tuple
from sciencebeam_parser.utils.misc import iter_ids
from sciencebeam_parser.document.semantic_document import (
SemanticContentWrapper,
SemanticHeading,
SemanticLabel,
SemanticNote,
SemanticRawReference,
SemanticRawReferenceText
)
from sciencebeam_parser.document.layout_document import LayoutBlock
from sciencebeam_parser.models.extract import ModelSemanticExtractor
LOGGER = logging.getLogger(__name__)
def is_looks_like_reference(layout_block: LayoutBlock) -> bool:
# a quick and dirty check whether this remotely looks like a reference
return len(list(layout_block.iter_all_tokens())) > 3
class ReferenceSegmenterSemanticExtractor(ModelSemanticExtractor):
def iter_semantic_content_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
**kwargs
) -> Iterable[SemanticContentWrapper]:
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
ids_iterator = iter(iter_ids('b'))
ref: Optional[SemanticRawReference] = None
is_first_ref = True
for name, layout_block in entity_tokens:
if name == '<label>':
if not ref:
ref = SemanticRawReference(content_id=next(ids_iterator, '?'))
ref.add_content(SemanticLabel(layout_block=layout_block))
continue
if name == '<reference>':
if not ref and is_first_ref and not is_looks_like_reference(layout_block):
yield SemanticHeading(layout_block=layout_block)
is_first_ref = False
continue
if not ref:
ref = SemanticRawReference(content_id=next(ids_iterator, '?'))
ref.add_content(SemanticRawReferenceText(layout_block=layout_block))
yield ref
ref = None
is_first_ref = False
continue
yield SemanticNote(layout_block=layout_block, note_type=name)
if ref:
yield ref | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/reference_segmenter/extract.py | 0.797399 | 0.186299 | extract.py | pypi |
import logging
import re
from typing import Iterable, Mapping, Optional, Tuple
from sciencebeam_parser.document.semantic_document import (
SemanticContentFactoryProtocol,
SemanticContentWrapper,
SemanticFigureCitation,
SemanticHeading,
SemanticLabel,
SemanticNote,
SemanticParagraph,
SemanticRawEquation,
SemanticRawEquationContent,
SemanticRawFigure,
SemanticRawTable,
SemanticReferenceCitation,
SemanticSection,
SemanticSectionTypes,
SemanticTableCitation,
SemanticTitle
)
from sciencebeam_parser.document.layout_document import LayoutBlock, LayoutTokensText
from sciencebeam_parser.models.extract import SimpleModelSemanticExtractor
LOGGER = logging.getLogger(__name__)
SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<figure>': SemanticRawFigure,
'<table>': SemanticRawTable
}
PARAGRAPH_SEMANTIC_CONTENT_CLASS_BY_TAG: Mapping[str, SemanticContentFactoryProtocol] = {
'<figure_marker>': SemanticFigureCitation,
'<table_marker>': SemanticTableCitation,
'<citation_marker>': SemanticReferenceCitation
}
HEADER_LABEL_REGEX = r'(\d+\.?(?:\d+\.?)*)\s*(\D.*)'
def get_section_label_and_title_from_layout_block(
layout_block: LayoutBlock
) -> Tuple[Optional[LayoutBlock], LayoutBlock]:
if not layout_block:
return None, layout_block
layout_tokens_text = LayoutTokensText(layout_block)
text = str(layout_tokens_text)
m = re.match(HEADER_LABEL_REGEX, text, re.IGNORECASE)
if not m:
return None, layout_block
label_end = m.end(1)
title_start = m.start(2)
LOGGER.debug('label_end: %d, title_start: %d (text: %r)', label_end, title_start, text)
section_label_layout_block = LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(0, label_end)
))
section_title_layout_block = LayoutBlock.for_tokens(list(
layout_tokens_text.iter_layout_tokens_between(title_start, len(text))
))
return section_label_layout_block, section_title_layout_block
class FullTextSemanticExtractor(SimpleModelSemanticExtractor):
def __init__(self):
super().__init__(semantic_content_class_by_tag=SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG)
def add_paragraph_content(
self,
paragraph: SemanticParagraph,
name: str,
layout_block: LayoutBlock
):
semantic_content_class = PARAGRAPH_SEMANTIC_CONTENT_CLASS_BY_TAG.get(name)
if semantic_content_class:
paragraph.add_content(semantic_content_class(layout_block=layout_block))
return
paragraph.add_block_content(layout_block)
def get_semantic_heading(self, layout_block: LayoutBlock):
section_label_layout_block, section_title_layout_block = (
get_section_label_and_title_from_layout_block(layout_block)
)
if section_label_layout_block:
return SemanticHeading([
SemanticLabel(layout_block=section_label_layout_block),
SemanticTitle(layout_block=section_title_layout_block)
])
return SemanticHeading([
SemanticTitle(layout_block=section_title_layout_block)
])
def get_raw_equation_child_semantic_content(
self,
name: str,
layout_block: LayoutBlock
):
if name == '<equation_label>':
return SemanticLabel(layout_block=layout_block)
if name == '<equation>':
return SemanticRawEquationContent(layout_block=layout_block)
return self.get_semantic_content_for_entity_name(
name, layout_block=layout_block
)
def iter_semantic_content_for_entity_blocks( # noqa pylint: disable=arguments-differ, too-many-branches
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
section_type: str = SemanticSectionTypes.OTHER,
**kwargs
) -> Iterable[SemanticContentWrapper]:
entity_tokens = list(entity_tokens)
LOGGER.debug('entity_tokens: %s', entity_tokens)
section: Optional[SemanticSection] = None
paragraph: Optional[SemanticParagraph] = None
raw_equation: Optional[SemanticRawEquation] = None
_previous_tag: Optional[str] = None
for name, layout_block in entity_tokens:
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('entity_block: %r, %r', name, layout_block.text)
previous_tag = _previous_tag
_previous_tag = name
if name in {'O'}:
LOGGER.debug('ignoring content (%r): %r', name, layout_block)
note_type = 'fulltext:other' if name == 'O' else name
if section:
section.add_note(layout_block, note_type=note_type)
else:
yield SemanticNote(
layout_block=layout_block,
note_type=note_type
)
continue
if name == '<section>':
paragraph = None
raw_equation = None
if section:
yield section
section = SemanticSection(section_type=section_type)
section.add_content(self.get_semantic_heading(layout_block))
continue
if not section:
section = SemanticSection(section_type=section_type)
if name in SIMPLE_SEMANTIC_CONTENT_CLASS_BY_TAG:
section.add_content(self.get_semantic_content_for_entity_name(
name, layout_block=layout_block
))
continue
# treat everything else as paragraph content
if (
not paragraph
or (
name == '<paragraph>'
and previous_tag == '<paragraph>'
)
):
paragraph = section.add_new_paragraph()
if name in {'<equation>', '<equation_label>'}:
semantic_content = self.get_raw_equation_child_semantic_content(
name, layout_block=layout_block
)
if (
isinstance(semantic_content, SemanticRawEquationContent)
and raw_equation
and raw_equation.has_type(SemanticRawEquationContent)
):
LOGGER.debug('already has equation content, start new one')
raw_equation = None
if not raw_equation:
raw_equation = SemanticRawEquation()
paragraph.add_content(raw_equation)
raw_equation.add_content(semantic_content)
continue
raw_equation = None
self.add_paragraph_content(
paragraph, name, layout_block
)
if section:
yield section | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/fulltext/extract.py | 0.727298 | 0.152095 | extract.py | pypi |
import logging
from typing import Iterable, Tuple
from sciencebeam_parser.document.layout_document import (
LayoutBlock
)
from sciencebeam_parser.document.semantic_document import (
SemanticSection,
SemanticSectionTypes
)
from sciencebeam_parser.models.fulltext.training_data import (
FullTextTeiTrainingDataGenerator,
FullTextTrainingTeiParser
)
from sciencebeam_parser.models.model import Model
from sciencebeam_parser.models.data import (
DocumentFeaturesContext
)
from sciencebeam_parser.models.fulltext.data import FullTextDataGenerator
from sciencebeam_parser.models.fulltext.extract import FullTextSemanticExtractor
LOGGER = logging.getLogger(__name__)
class FullTextModel(Model):
def get_data_generator(
self,
document_features_context: DocumentFeaturesContext
) -> FullTextDataGenerator:
return FullTextDataGenerator(
document_features_context=document_features_context
)
def get_semantic_extractor(self) -> FullTextSemanticExtractor:
return FullTextSemanticExtractor()
def get_tei_training_data_generator(self) -> FullTextTeiTrainingDataGenerator:
return FullTextTeiTrainingDataGenerator()
def get_training_tei_parser(self) -> FullTextTrainingTeiParser:
return FullTextTrainingTeiParser()
def update_section_with_entity_blocks(
self,
parent_section: SemanticSection,
entity_tokens: Iterable[Tuple[str, LayoutBlock]],
section_type: str = SemanticSectionTypes.OTHER
):
semantic_extractor = self.get_semantic_extractor()
for semantic_content in semantic_extractor.iter_semantic_content_for_entity_blocks(
entity_tokens=entity_tokens,
section_type=section_type
):
parent_section.add_content(semantic_content)
def get_section_for_entity_blocks(
self,
entity_tokens: Iterable[Tuple[str, LayoutBlock]]
) -> SemanticSection:
parent_section = SemanticSection()
self.update_section_with_entity_blocks(parent_section, entity_tokens)
return parent_section | /sciencebeam_parser-0.1.8.tar.gz/sciencebeam_parser-0.1.8/sciencebeam_parser/models/fulltext/model.py | 0.671686 | 0.161056 | model.py | pypi |