code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
import requests
import json
import time
import datetime
import os
def write_json(file, data):
with open(file,'w') as f:
json.dump(data, f, indent=4)
"""
Checks the responses from the server and throws errors accordingly.
"""
def check_response(response):
if response.status_code == 401:
raise ValueError("Old or incorrect token used, ensure you've got a working one with get_updated_token()")
if response.status_code == 404:
raise ValueError("Incorrect parameters passed to the API")
if response.status_code == 500:
raise ValueError("Unknown error with request")
"""
App key is the first step in authentication, it gets a key used to generate the token.
"""
def get_app_key(name, password, school, write_file_path='app_key.json'):
# It actually sends the password plaintext to the server.
app_key_response = requests.post(f"https://sms.schoolsoft.se/{school}/rest/app/login",
data={'identification':name,
'verification':password,
'logintype':'4',
'usertype':'1',
})
check_response(app_key_response)
app_key_json = app_key_response.json()
if write_file_path:
write_json(write_file_path, app_key_json)
return app_key_json
"""
Gets the token used for authentication from the app_key.
Note that the token has an expiry date.
This function shouldn't be used directly.
Use get_updated_token() to prevent spamming the servers for new tokens.
"""
def get_token(school, app_key_json={}, app_key_path=None, write_file_path='token.json'):
key = None
if 'appKey' in app_key_json and not app_key_path:
key = app_key_json['appKey']
elif app_key_path:
with open(app_key_path) as app_key_json:
key = json.load(app_key_json).get('appKey')
# If no key is obtained from app_key_path or app_key_json raises an error.
if not key:
raise RuntimeError('No valid value for app_key. An app key is needed to generate the token')
token_response = requests.get(f'https://sms.schoolsoft.se/{school}/rest/app/token', headers={
"appversion": "2.3.2",
"appos": "android",
"appkey": key,
"deviceid":""
})
check_response(token_response)
token_json = token_response.json()
if write_file_path:
write_json(write_file_path, token_json)
return token_json
"""
Gets the lessons based on token and schoolname.
School is found in the url like this:
"https://sms13.schoolsoft.se/ school /jsp/student/right_student_startpage.jsp"
"""
def get_lessons(token, school, org_id, write_file_path='lessons.json'):
lesson_response = requests.get(f'https://sms.schoolsoft.se/{school}/api/lessons/student/{org_id}', headers= {
"appversion": "2.3.2",
"appos": "android",
"token": token})
check_response(lesson_response)
lesson_json = lesson_response.json()
if write_file_path:
write_json(write_file_path, lesson_json)
return lesson_json
"""
Gets the calendar for the student based on unix timestamps (1597246367)
The API uses milliseconds based timestamps, but the function takes second based ones and converts them.
By default with no parameters it will use the current time as start and a month from that as end.
"""
def get_calendar(token, school, org_id, unix_time_start=None, unix_time_end=None, write_file_path='calendar.json'):
unix_time_start = time.time()*1000 if not unix_time_start else unix_time_start*1000
unix_time_end = (time.time() + 2592000)*1000 if not unix_time_end else unix_time_end*1000
# No decimals can get passed to the api without errors.
unix_time_start = round(unix_time_start)
unix_time_end = round(unix_time_end)
calendar_response = requests.get(f'https://sms.schoolsoft.se/{school}/api/notices/student/{org_id}/{unix_time_start}/{unix_time_end}/calendar,schoolcalendar,privatecalendar',
headers={
"appversion": "2.3.2",
"appos": "android",
"token": token})
check_response(calendar_response)
calendar_json = calendar_response.json()
if write_file_path:
write_json(write_file_path, calendar_json)
return calendar_json
"""
Gets the lunch :)
"""
def get_lunch(token, school, org_id, write_file_path='lunch.json'):
lunch_response = requests.get(f'https://sms.schoolsoft.se/{school}/api/lunchmenus/student/{org_id}',
headers={
"appversion": "2.3.2",
"appos": "android",
"token": token})
check_response(lunch_response)
lunch_json = lunch_response.json()
if write_file_path:
write_json(write_file_path, lunch_json)
return lunch_json
"""
Basically get_token(), but looks at the previous tokens expiry date and determines if a new token should
be issued or use the old one. This function should be used when making applications.
"""
def get_updated_token(school, app_key_json={}, app_key_path=None, token_json={}, token_path=None, write_file_path='token.json'):
if 'expiryDate' not in token_json and token_path:
if os.path.isfile(token_path):
with open(token_path) as f:
token_json = json.load(f)
if not token_json:
# If no token passed to the function it generates a new one.
token_json = get_token(school, app_key_json, app_key_path, write_file_path)
return token_json
# Cuts off milliseconds.
expiry_date = token_json['expiryDate'][:-4]
# Assumes the date is formatted like "2020-08-12 17:48:22".
unix_time = time.mktime(datetime.datetime.strptime(expiry_date, "%Y-%m-%d %H:%M:%S").timetuple())
# Extra 5 minutes for good measure
# The token seems to last 3 hours.
if time.time() + 5*60 > unix_time:
token_json = get_token(school, app_key_json, app_key_path, write_file_path)
else:
write_json(write_file_path, token_json)
return token_json
"""
Gives the same info get_app_key(), but doesn't generate an app key.
Should be used when you want to get user info and already have a token.
"""
def get_user_info(token, school, write_file_path='user.json'):
user_response = requests.get(f'https://sms.schoolsoft.se/{school}/api/user/get',
headers={
"appversion": "2.3.2",
"appos": "android",
"token": token})
check_response(user_response)
user_json = user_response.json()
if write_file_path:
write_json(write_file_path, user_json)
return user_json | /schoolsoft_api-1.0.3-py3-none-any.whl/schoolsoft_api/schoolsoft_api.py | 0.484624 | 0.205356 | schoolsoft_api.py | pypi |
===========
schoolutils
===========
schoolutils provides a simple, efficient way to track and manage
student data. It includes:
* a database for storing information about students, courses,
assignments, and grades
* a command-line interface for interacting with the database
* tools for calculating grades
* tools for importing and exporting student data in useful formats
* reports on basic grade statistics
Other planned features include:
* tools for reporting more complex grade statistics
* tools for receiving student assignments via email, and returning
graded assignments and comments via email
Installation
============
Installing locally vs. system-wide
----------------------------------
If you are your computer's administrator, you probably want to
install schoolutils system-wide. In that case, you need to run the
``pip`` or ``python`` installation commands below with administrator
privileges. On Mac OS X and GNU/Linux, you can generally do that by
prefixing ``sudo`` to these commands (e.g., ``sudo pip install
schoolutils``).
If you do not have adminstrative access to the computer where you want
to install schoolutils, or you simply don't want to install it
system-wide, there are a couple of options for installing it locally.
The first is to install schoolutils in a Python `virtual environment
<https://pypi.python.org/pypi/virtualenv>`_ that you control. To do
this, create and activate a virtual environment, then run the ``pip``
command below. The second is to install schoolutils to a directory in
your control which is on the system Python interpreter's path. You
can do that by passing the ``--user`` option to the ``python`` command
below (``python setup.py install --user``).
Note that if you don't install schoolutils system-wide, you may need
to adjust your shell's $PATH environment variable to make the
``grade`` command available. A virtual environment makes this easy,
so that is the recommended method for installing locally.
Installation procedures
-----------------------
The easiest way to install schoolutils is via `pip
<http://www.pip-installer.org/en/latest/installing.html>`_::
$ pip install schoolutils
You can also `download
<http://pypi.python.org/pypi/schoolutils#downloads>`_ the package from
PyPI, unpack it, and run::
$ python setup.py install
from the package directory.
Finally, you can get the development version with ``git``. The project
is hosted on both `Bitbucket <https://bitbucket.org/wyleyr/schoolutils>`_
and `Github <https://github.com/wyleyr/schoolutils>`_. You can clone it
using one of the following commands::
$ git clone https://bitbucket.org/wyleyr/schoolutils.git
$ git clone git://github.com/wyleyr/schoolutils.git
Then run the ``setup.py`` script from the repository root, as above.
schoolutils has no dependencies (besides the Python standard library),
so the installation should go smoothly; if you have any problems, please
`report a bug <https://bitbucket.org/wyleyr/schoolutils/issues>`_.
Configuration
=============
It isn't necessary to configure schoolutils, but it will be faster to
use if you do. The command-line interface expects to find configuration
files in the ``.schoolutils`` directory of your home directory. You
should create three Python modules there: ``config.py``,
``calculators.py``, and ``validators.py``. Sample configuration files
are included in the ``examples`` directory of the source package::
$ mkdir ~/.schoolutils
$ cp path/to/schoolutils_source/examples/*.py ~/.schoolutils
The comments in the sample files explain the values you should provide
there. The most important one in ``config.py`` is ``gradedb_file``,
which should contain the path to your grade database file. If you
don't provide this value, you will have to type it in every time you
start the grading program.
First run
=========
Once you've installed the package, you can run the grading program as
follows::
$ grade
This will start the grading program's interactive user interface with
the configuration you specified in your ``config.py`` module.
From there, you can:
1) Add a course
2) Add or import students into the course
3) Add assignments
4) Start entering grades
After that
==========
A few concepts
--------------
The grading program has a few important concepts you should be aware
of:
Currently selected course and assignment
The grading program has a notion of the 'current course' and
'current assignment'. Most of the actions you take in the grading
program depend on your having previously selected a course or
assignment. For example, when you add or import students, the
grading program will add them as members of the current course.
When you enter grades, you will be entering grades for the current
assignment. You can specify the current course and assignment in
your ``config.py`` module, or select them interactively.
Entered vs. calculated grades
'Entered' grades are grades you have entered into the database
through the interactive interface. These are the sort of grades you
produce by hand: for example, letter grades for a batch of papers
that you've graded.
'Calculated' grades are grades you use the grading program to
calculate. Grades are calculated by a Python function that you must
provide, in your ``calculators.py`` module (see below). These will
also be saved in the database, when you run the grade calculation
command.
You can use the grading program without ever calculating grades, but
it will (hopefully!) save you some work if you do.
Grade calculation function
A grade calculation function is a function you define in your
``calculators.py`` module. This function should calculate the
calculated grades for a single student on the basis of entered
grades. You should define one grade calculation function per
course.
Grade calculation functions use a special naming convention so the
grading program knows which function to use when calculating
grades. The name should be::
calculate_grade_<course number>_<semester><year>
For example, if you are teaching a course numbered '12A' in the fall
semester of 2013, you'd write a grade calculation function named::
calculate_grade_12A_fall2013
Each grade calculation function will receive a set of database rows
as input, representing a single student's grades in the current
course. The function should return a dictionary or list of
dictionaries representing grades calculated for that student. For
more information, see the example ``calculators.py`` module.
Validator function
A validator function is a function you define in your
``validators.py`` module. It prepares data that you type into the
user interface to be saved to the database. This function should
accept a string and either return an appropriate value or raise a
Python ``ValueError``. If a validator raises a ``ValueError``, the
user interface asks you to re-enter the value until you type one
that validates. For example, the ``letter_grade`` validator ensures
that any string passed to it is a letter grade, so that you can't
save a letter grade of 'W' by mistake.
schoolutils provides sensible defaults for all validator functions,
so defining your own is not strictly necessary. But you can reduce
data-entry errors by providing custom validator functions, which
will override the defaults. See the sample ``validators.py``
module for more information and a list of the validators for which
you can provide custom definitions.
Command-line options
--------------------
To see command-line options available for the grading program, use::
$ grade --help
Warning
-------
schoolutils is alpha-quality software. It is offered in the hope you
find it useful, but (like all software) it has bugs, so please take
sensible precautions to protect your data. In particular, you should
**backup your grade database file(s)** regularly! This is easy, because
SQLite stores your whole grade database as a single flat file, so just
do it!
As with all Free software, schoolutils has no warranty. Please see
the warranty notice in the license file or the individual source files
for more information.
| /schoolutils-0.1.7.zip/schoolutils-0.1.7/README.rst | 0.744935 | 0.777617 | README.rst | pypi |
import hashlib
import hmac
from six import text_type
import six.moves.urllib.request, six.moves.urllib.parse, six.moves.urllib.error
from xblock.core import XBlock
from xblock.fields import Scope, String
from xblock.fragment import Fragment
from .schoolyourself import SchoolYourselfXBlock
class SchoolYourselfReviewXBlock(SchoolYourselfXBlock):
"""
This block renders a launcher button for a School Yourself review,
which is rendered in an iframe. The block transmits the anonymous
user ID and has a handler that receives information from School
Yourself regarding the user's progress and mastery through the
topic being shown.
"""
has_children = False
has_score = True
weight = 1.0
def get_display_name(self, module_title):
return "Review: %s" % module_title
def student_view(self, context=None):
"""
The primary view of the SchoolYourselfReviewXBlock, shown to students
when viewing courses.
"""
# Construct the URL we're going to stuff into the iframe once
# it gets launched:
partner_url_params = self.get_partner_url_params(self.shared_key)
iframe_url_params = dict(partner_url_params)
iframe_url_params["module"] = self.module_id
mastery_url_params = dict(partner_url_params)
mastery_url_params["tags"] = self.module_id
# Set up the screenshot URL:
screenshot_url = "%s/page/screenshot/%s" % (self.base_url,
self.module_id)
mastery_url = "%s/progress/mastery?%s" % (
self.base_url, six.moves.urllib.parse.urlencode(mastery_url_params))
context = {
"iframe_url": "%s/review/embed?%s" % (
self.base_url, six.moves.urllib.parse.urlencode(iframe_url_params)),
"title": self.module_title,
"icon_url": self.runtime.local_resource_url(self,
"public/review_icon.png"),
"mastery_url": mastery_url
}
# Now actually render the fragment, which is just a button with
# some JS code that handles the click event on that button.
fragment = Fragment(self.render_template("review_student_view.html",
context))
# Load the common JS/CSS libraries:
fragment.add_css_url(
self.runtime.local_resource_url(self, "public/sylib.css"))
fragment.add_javascript_url(
self.runtime.local_resource_url(self, "public/sylib.js"))
# And finally the embedded HTML/JS code:
fragment.add_javascript(self.resource_string(
"static/js/review_student_view.js"))
fragment.add_css(self.resource_string(
"static/css/student_view.css"))
fragment.add_css_url("//fonts.googleapis.com/css?family=Open+Sans:700,400,300")
fragment.initialize_js("SchoolYourselfReviewStudentView")
return fragment
@XBlock.json_handler
def handle_grade(self, data, suffix=""):
"""This is the handler that gets called when we receive grades.
We will verify the message to make sure that it is signed and
that the signature is valid. If everything is good, then we'll
publish a "grade" event for this module.
The actual work is done in handle_grade_json(), and this method
just calls that. This method is just here so that it can be wrapped
by XBlock.json_handler, but the unit test covers the code in
handle_grade_json() to avoid having to wrap everything around a
Request/Response object.
"""
return self.handle_grade_json(data)
def handle_grade_json(self, data):
if not isinstance(data, dict):
return "bad_request"
mastery = data.get("mastery", None)
user_id = data.get("user_id", None)
signature = data.get("signature", None)
if not mastery or not user_id or not signature:
return "forbidden"
# Check that the module ID we care about is actually in the data
# that was sent.
mastery_level = mastery.get(self.module_id, None)
if mastery_level is None:
return "bad_request"
try:
# The mastery level being passed in should be a number, otherwise
# things later on in this method will choke.
mastery_level = float(mastery_level)
except ValueError:
return "bad_request"
# Verify the signature.
sk = self.shared_key
if isinstance(self.shared_key, str):
sk = self.shared_key.encode('utf-8')
verifier = hmac.new(sk, user_id.encode('utf-8'), digestmod='MD5')
for key in sorted(mastery):
verifier.update(key.encode('utf-8'))
# Every entry should be a number.
try:
mastery[key] = float(mastery[key])
except ValueError:
return "bad_request"
verifier.update(b"%.2f" % mastery[key])
# If the signature is invalid, do nothing.
if signature != verifier.hexdigest():
return "invalid_signature"
# If we got here, then everything checks out and we can submit
# a grade for this module.
scaled_mastery_level = min(mastery_level / 0.7, 1.0)
self.runtime.publish(self, "grade",
{ "value": scaled_mastery_level,
"max_value": 1.0 })
return scaled_mastery_level
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench."""
return [
("SchoolYourselfReviewXBlock",
"""\
<vertical_demo>
<schoolyourself_review
base_url="https://schoolyourself.org"
module_id="algebra/multiplication"
module_title="Multiplication, Multiplication, 'Multiplication'"
shared_key="edx_test"
partner_id="edx_test"
/>
</vertical_demo>
"""),
] | /schoolyourself_xblock-0.2-py3-none-any.whl/schoolyourself/schoolyourself_review.py | 0.58948 | 0.21348 | schoolyourself_review.py | pypi |
import six.moves.urllib.request, six.moves.urllib.parse, six.moves.urllib.error
from xblock.core import XBlock
from xblock.fragment import Fragment
from .schoolyourself import SchoolYourselfXBlock
class SchoolYourselfLessonXBlock(SchoolYourselfXBlock):
"""
This block renders a launcher button for a School Yourself lesson,
which is rendered in an iframe. The block transmits the anonymous
user ID.
"""
has_children = False
has_score = False
def get_display_name(self, module_title):
return "Lesson: %s" % module_title
def student_view(self, context=None):
"""
The primary view of the SchoolYourselfLessonXBlock, shown to students
when viewing courses.
"""
# Construct the URL we're going to stuff into the iframe once
# it gets launched:
url_params = self.get_partner_url_params(self.shared_key)
url_params["id"] = self.module_id
# Set up the screenshot URL:
screenshot_url = "%s/page/screenshot/%s" % (self.base_url,
self.module_id)
context = {
"iframe_url": "%s/page/embed?%s" % (self.base_url,
six.moves.urllib.parse.urlencode(url_params)),
"screenshot_url": screenshot_url,
"title": self.module_title,
"description": self.module_description
}
# Now actually render the fragment, which is just a button with
# some JS code that handles the click event on that button.
fragment = Fragment(self.render_template("lesson_student_view.html",
context))
# Load the common JS/CSS libraries:
fragment.add_css_url(
self.runtime.local_resource_url(self, "public/sylib.css"))
fragment.add_javascript_url(
self.runtime.local_resource_url(self, "public/sylib.js"))
fragment.add_css_url("//fonts.googleapis.com/css?family=Open+Sans:700,400,300")
# And finally the embedded HTML/JS code:
fragment.add_javascript(self.resource_string(
"static/js/lesson_student_view.js"))
fragment.add_css(self.resource_string(
"static/css/student_view.css"))
fragment.initialize_js("SchoolYourselfLessonStudentView")
return fragment
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench."""
return [
("SchoolYourselfLessonXBlock",
"""\
<vertical_demo>
<schoolyourself_lesson
base_url="https://schoolyourself.org"
module_id="algebra/multiplication"
module_title="Multiplication"
module_description="Multiplying positive numbers, in any order"
shared_key="edx_test"
partner_id="edx_test"
/>
</vertical_demo>
"""),
] | /schoolyourself_xblock-0.2-py3-none-any.whl/schoolyourself/schoolyourself_lesson.py | 0.678114 | 0.202778 | schoolyourself_lesson.py | pypi |
import hashlib
class CrawledResource:
"""A resource crawled by the crawler.
This is an adapter bewteen crawler and API.
The id is computed by the originating url and the id it has in the url.
"""
def __init__(self, resource, origin_urls:list, id_in_origin=""):
"""Create a new crawled resource with the source urls."""
if not origin_urls:
raise ValueError("Expected the resource to have an origin.")
self._resource = resource
self._origin_urls = origin_urls
self._id_in_origin = id_in_origin
@property
def crawled_resource(self):
"""The unchanged crawled resource."""
return self._resource
@property
def origin_urls(self):
"""The urls where this resource is from."""
return self._origin_urls
@property
def provider(self):
"""The provider which identifies this client."""
return {"description": "This crawler crawls urls and adds them to the database.",
"name": __name__.split(".", 1)[0],
"url": "https://github.com/schul-cloud/schul_cloud_url_crawler",
"url_trace": self.origin_urls}
@property
def resource(self):
"""The resource as seen by the crawler."""
resource = self._resource.copy()
resource.setdefault("providers", [])
resource["providers"] = resource["providers"] + [self.provider]
return resource
def get_api_resource_post(self, id_prefix=""):
"""The jsonapi format for the resource."""
return {"data":{"attributes":self.resource, "id": id_prefix+self.id, "type":"resource"}}
@property
def id(self):
"""Return the id of this resource."""
return self.origin_id + self._id_in_origin
@property
def origin_id(self):
"""The id of the origin of this resource."""
return hashlib.sha256(self.origin_url.encode()).hexdigest()
@property
def origin_url(self):
"""Return the url this resource is from."""
return self._origin_urls[0]
@property
def id_in_origin(self):
"""The id the resource has in its originating url."""
return self._id_in_origin
def __repr__(self):
"""A string representation of this resource."""
return "<{} {}>".format(self.__class__.__name__, self.id) | /schul_cloud_url_crawler-1.0.17.tar.gz/schul_cloud_url_crawler-1.0.17/schul_cloud_url_crawler/crawled_resource.py | 0.728459 | 0.229978 | crawled_resource.py | pypi |
import click
import schul_cloud_url_crawler.resource_client as resource_client
from schul_cloud_resources_api_v1 import ApiClient, ResourceApi
from schul_cloud_resources_api_v1.rest import ApiException
import schul_cloud_resources_api_v1.auth as auth
from urllib3.exceptions import MaxRetryError
import traceback
import sys
UnReachable = (MaxRetryError,)
error_messages = {
3: "The resources server could be reached but basic authentication failed.",
4: "The resources server could be reached but API-key authentication failed.",
5: "The resources server could be reached but it requires authentication with --basic or --apikey.",
6: "The resource server could not be reached.",
7: "You can only provide one authentication mechanism with --basic and --apikey.",
8: "Basic authentication requires a username and a password divided by \":\". Example: --basic=user:password",
}
def error(return_code):
"""Raise an error and explain it."""
ty, err, tb = sys.exc_info()
if err:
traceback.print_exception(ty, err, tb)
click.echo("Error {}: {}".format(return_code, error_messages[return_code]))
exit(return_code)
def authenticate(basic, apikey):
"""Authenticate with the parameters.
Return the return code in case auf authentication failure.
"""
if basic is not None and apikey is not None:
error(7)
if basic is not None:
username_and_password = basic.split(":", 1)
if len(username_and_password) == 1:
error(8)
username, password = username_and_password
auth.basic(username, password)
return 3
elif apikey is not None:
auth.api_key(apikey)
return 4
auth.none()
return 5
@click.command()
@click.argument("api", type=str, required=True)
@click.argument("urls", type=str, nargs=-1, required=False)
@click.option("--basic", nargs=1, type=str, metavar="username:password",
help="Username an password for authentication at the API.")
@click.option("--apikey", nargs=1, type=str, metavar="api-key",
help="The api-key for authentication at the API.")
@click.option("--delete-all", is_flag=True, default=False,
help="Delete all resources stored on the server. "
"This only affects resources you are authenticated for.")
@click.option("--delete-not-mentioned", is_flag=True, default=False,
help="Delete all resources from urls which are not mentioned in the "
"arguments. Enable this if this is your only crawler.")
@click.option("--id", default="url-crawler", nargs=1,
type=str, metavar="crawler-id",
help="The crawler-id is the id of the crawler. "
"By changing the id, you can connect multiple crawlers which do not"
" interact. "
"The crawler only adds and deletes resources with the crawler-id. "
"--delete-all is the only option that affects crawlers with other ids.")
def main(api, urls=[], basic=None, apikey=None, delete_not_mentioned=False,
delete_all=False, id="url-crawler"):
"""Fetch ressources from URLS and post them to the API."""
urls = list(urls)
print(api, urls, basic, apikey, delete_not_mentioned, delete_all, id)
api_client = ApiClient(api)
resource_api = ResourceApi(api_client)
client = resource_client.ResourceClient(resource_api, id)
auth_error = authenticate(basic, apikey)
try:
if delete_all:
client.delete_resources()
if delete_not_mentioned:
client.delete_resources_not_from(urls)
client.update(urls)
except ApiException as err:
if err.status == 401:
click.echo(err.body)
error(auth_error)
raise
except UnReachable:
error(6) | /schul_cloud_url_crawler-1.0.17.tar.gz/schul_cloud_url_crawler-1.0.17/schul_cloud_url_crawler/cli.py | 0.450601 | 0.153422 | cli.py | pypi |
class ArgumentError(Exception):
def __init__(self, message=""):
self.message = message
super().__init__(self.message)
class ArgumentTypeError(ArgumentError):
def __init__(self, func_name, argument_name, allowed_types, actual_type, arg):
if type(argument_name) == str and len(argument_name) > 0:
argname = "{} to be of type".format(argument_name)
else:
argname = "type"
type_str = ""
if type(allowed_types) == list:
allowed_type_names = [t.__name__ for t in allowed_types]
if len(allowed_type_names) == 1:
type_str += allowed_type_names[0]
elif len(allowed_type_names) == 2:
type_str += "{} or {}".format(*allowed_type_names)
else:
type_str = ", ".join([str(t) for t in allowed_type_names[:-1]])
type_str += ", or {}".format(allowed_type_names[-1])
elif type(allowed_types) == type:
type_str = allowed_types.__name__
else:
type_str = str(allowed_types)
self.message = "{} expected {} {}, got {} of type {}".format(
func_name, argname, type_str, arg, actual_type.__name__)
super().__init__(self.message)
class ArgumentTypeListError(ArgumentError):
def __init__(self, func_name, valid_fmts, actual_fmt, actual_vals=None):
arg_plural = "argument"
if len(actual_fmt) > 1:
arg_plural += "s"
s = "Invalid types for {} with {} {}, expected".format(func_name, len(actual_fmt), arg_plural)
if len(valid_fmts) >= 1:
if len(valid_fmts) > 1:
s += " one of"
s += " \n"
s += "".join(["\t{}({})\n".format(func_name, ", ".join([t.__name__ for t in fmt])) for fmt in valid_fmts])
else:
s += "{}()\n".format(func_name)
s += "received {}(".format(func_name)
if actual_vals is not None and len(actual_vals) == len(actual_fmt):
s += ", ".join(["{}: {}".format(arg, t.__name__) for arg, t in zip(actual_vals, actual_fmt)])
else:
s += ", ".join([t.__name__ for t in actual_fmt])
s += ")"
self.message = s
super().__init__(self.message)
class ArgumentNumError(ArgumentError):
def __init__(self, func_name, allowed_nums, actual_num):
num_str = ""
if type(allowed_nums) == list:
if len(allowed_nums) == 1:
num_str += str(allowed_nums[0])
elif len(allowed_nums) == 2:
num_str += "{} or {}".format(*allowed_nums)
else:
num_str = ", ".join([str(n) for n in allowed_nums[:-1]])
num_str += ", or {}".format(allowed_nums[-1])
else:
num_str = str(allowed_nums)
self.message = "{} expected {} arguments, got {}".format(
func_name,
num_str,
actual_num
)
super().__init__(self.message)
class ArgumentConditionError(ArgumentError):
def __init__(self, func_name, arg_name, expected_condition, actual_value):
if type(arg_name) == str and len(arg_name) > 0:
argname = "{}".format(arg_name)
else:
argname = "argument"
self.message = "{} expected {} to match \"{}\", got {}".format(
func_name,
argname,
expected_condition,
actual_value
)
super().__init__(self.message)
pass | /schulich_ignite-0.1.3-py3-none-any.whl/spark/util/Errors.py | 0.506836 | 0.187504 | Errors.py | pypi |
HTMLColors = [
"aliceblue",
"antiquewhite",
"aqua",
"aquamarine",
"azure",
"beige",
"bisque",
"black",
"blanchedalmond",
"blue",
"blueviolet",
"brown",
"burlywood",
"cadetblue",
"chartreuse",
"chocolate",
"coral",
"cornflowerblue",
"cornsilk",
"crimson",
"cyan",
"darkblue",
"darkcyan",
"darkgoldenrod",
"darkgray",
"darkgreen",
"darkkhaki",
"darkmagenta",
"darkolivegreen",
"darkorange",
"darkorchid",
"darkred",
"darksalmon",
"darkseagreen",
"darkslateblue",
"darkslategray",
"darkturquoise",
"darkviolet",
"deeppink",
"deepskyblue",
"dimgray",
"dodgerblue",
"firebrick",
"floralwhite",
"forestgreen",
"fuchsia",
"gainsboro",
"ghostwhite",
"gold",
"goldenrod",
"gray",
"green",
"greenyellow",
"honeydew",
"hotpink",
"indianred",
"indigo",
"ivory",
"khaki",
"lavender",
"lavenderblush",
"lemonchiffon",
"lightblue",
"lightcoral",
"lightcyan",
"lightgoldenrodyellow",
"lightgray",
"lightgreen",
"lightpink",
"lightsalmon",
"lightseagreen",
"lightskyblue",
"lightslategray",
"lightsteelblue",
"lightyellow",
"lime",
"limegreen",
"linen",
"magenta",
"maroon",
"mediumaquamarine",
"mediumblue",
"mediumpurple",
"mediumseagreen",
"mediumslateblue",
"mediumspringgreen",
"mediumturquoise",
"mediumvioletred",
"midnightblue",
"mintcream",
"mistyrose",
"moccasin",
"navajowhite",
"navy",
"oldlace",
"olive",
"olivedrab",
"orange",
"orangered",
"orchid",
"palegoldenrod",
"palegreen",
"paleturquoise",
"palevioletred",
"papayawhip",
"peachpuff",
"peru",
"pink",
"plum",
"powderblue",
"purple",
"red",
"rosybrown",
"royalblue",
"saddlebrown",
"salmon",
"sandybrown",
"seagreen",
"seashell",
"sienna",
"silver",
"skyblue",
"slateblue",
"slategray",
"snow",
"springgreen",
"steelblue",
"tan",
"teal",
"thistle",
"tomato",
"turquoise",
"violet",
"wheat",
"white",
"whitesmoke",
"yellow",
"yellowgreen"
] | /schulich_ignite-0.1.3-py3-none-any.whl/spark/util/HTMLColors.py | 0.585457 | 0.377426 | HTMLColors.py | pypi |
from math import sin, cos
from ..decorators import validate_args, ignite_global
from numbers import Real
@validate_args([Real, Real, Real, Real],
[Real, Real, Real, Real, Real],
[Real, Real, Real, Real, Real, Real],
[Real, Real, Real, Real, Real, Real, str])
@ignite_global
def helper_fill_arc(self, *args):
x, y, r, scale_x, scale_y, start, stop, mode = self.arc_args(*args)
if scale_x == 0 or scale_y == 0:
return
self.canvas.translate(x, y)
self.canvas.scale(scale_x, scale_y)
if mode == "open" or mode == "chord":
self.canvas.fill_arc(0, 0, r, start, stop)
elif mode == "default" or mode == "pie":
self.canvas.begin_path()
start_x = r*cos(start)
start_y = r*sin(start)
self.canvas.move_to(start_x, start_y)
self.canvas.arc(0, 0, r, start, stop)
self.canvas.line_to(0, 0)
self.canvas.close_path()
self.canvas.fill()
self.canvas.scale(1/scale_x, 1/scale_y)
self.canvas.translate(-x, -y)
@validate_args([Real, Real, Real, Real],
[Real, Real, Real, Real, Real],
[Real, Real, Real, Real, Real, Real],
[Real, Real, Real, Real, Real, Real, str])
@ignite_global
def helper_stroke_arc(self, *args):
x, y, r, scale_x, scale_y, start, stop, mode = self.arc_args(*args)
if scale_x == 0 or scale_y == 0:
return
start_x = r*cos(start)
start_y = r*sin(start)
self.canvas.translate(x, y)
self.canvas.scale(scale_x, scale_y)
self.canvas.begin_path()
self.canvas.move_to(start_x, start_y)
self.canvas.arc(0, 0, r, start, stop)
if mode == "open" or mode == "default":
self.canvas.move_to(start_x, start_y)
elif mode == "pie":
self.canvas.line_to(0, 0)
elif mode == "chord":
pass
self.canvas.close_path()
self.canvas.stroke()
self.canvas.scale(1/scale_x, 1/scale_y)
self.canvas.translate(-x, -y)
@validate_args([Real, Real, Real, Real],
[Real, Real, Real, Real, Real],
[Real, Real, Real, Real, Real, Real],
[Real, Real, Real, Real, Real, Real, str])
@ignite_global
def helper_arc(self, *args):
self.fill_arc(*args)
self.stroke_arc(*args) | /schulich_ignite-0.1.3-py3-none-any.whl/spark/util/helper_functions/arc_functions.py | 0.647241 | 0.39097 | arc_functions.py | pypi |
from ..decorators import *
from ..HTMLColors import HTMLColors
import re
from ..Errors import *
from numbers import Real
from math import pi
from math import sqrt
import random
@validate_args([str, str])
def helper_parse_color_string(self, func_name, s):
rws = re.compile(r'\s')
no_ws = rws.sub('', s).lower()
# Check allowed color strings
if no_ws in HTMLColors:
return no_ws
elif no_ws in self.color_strings:
return self.color_strings[s]
# Check other HTML-permissible formats
else:
for regex in self.regexes:
if regex.fullmatch(no_ws) is not None:
return no_ws
# Not in any permitted format
raise ArgumentConditionError(func_name, "", "Valid HTML format or color names", s)
@validate_args([str], [Real], [Real, Real, Real], [Real, Real, Real, Real])
def helper_parse_color(self, *args, func_name="parse_color"):
def clip(x, lb, ub):
return min(max(x, lb), ub)
argc = len(args)
if argc == 1:
if isinstance(args[0], Real):
n = int(clip(args[0], 0, 255))
return f"rgb({n}, {n}, {n})"
elif isinstance(args[0], str):
return self.parse_color_string(func_name, args[0])
raise ArgumentConditionError(func_name, "", "Valid HTML format or color names", args[0])
elif argc == 3 or argc == 4:
color_args = [int(clip(arg, 0, 255)) for arg in args[:3]]
if argc == 3:
return "rgb({}, {}, {})".format(*color_args)
else:
# Clip alpha between 0 and 1
alpha_arg = clip(args[3], 0, 1.0)
return "rgba({}, {}, {}, {})".format(*color_args, alpha_arg)
else:
raise ArgumentNumError(func_name, [1, 3, 4], argc)
@validate_args([str], [Real], [Real, Real, Real], [Real, Real, Real, Real])
@ignite_global
def helper_color(self, *args):
return self.parse_color(*args)
@validate_args([Real, Real, Real, Real],
[Real, Real, Real, Real, Real],
[Real, Real, Real, Real, Real, Real],
[Real, Real, Real, Real, Real, Real, str])
def helper_arc_args(self, *args):
argc = len(args)
x, y, w, h = args[:4]
w, h = abs(w), abs(h)
defaults = [0, 2*pi, "default"]
start, stop, mode = [*args[4:argc], *defaults[argc-4:]]
while start < 0:
start += 2*pi
while start > 2*pi:
start -= 2*pi
while stop < 0:
stop += 2*pi
while stop > 2*pi:
stop += 2*pi
d = max(w, h)/2
if d == 0:
return x, y, d, 0, 0, start, stop, mode
else:
w_ratio = w / d
h_ratio = h / d
return x, y, d/2, w_ratio, h_ratio, start, stop, mode
@validate_args([])
@ignite_global
def helper_random(self, *args):
return random.random()
@validate_args([int])
@ignite_global
def helper_randint(self, *args):
return random.randint(0, args[0])
@validate_args([Real, Real, Real, Real])
@ignite_global
def helper_bounding_box(self, *args):
x, y, w, h = args
left = min(x, x + w)
abs_width = abs(w)
top = min(y, y + h)
abs_height = abs(h)
return (left, top, abs_width, abs_height)
@validate_args([list, list], [list, list, bool], [tuple, tuple], [tuple, tuple, bool])
@ignite_global
def helper_collided(self, *args):
x1, y1, width1, height1 = args[0]
x2, y2, width2, height2 = args[1]
sizes = {'bounding_box1 width': width1, 'bounding_box1 height': height1, 'bounding_box2 width': width2, 'bounding_box2 height': height2}
for size_name, size_val in sizes.items():
if size_val < 0:
raise ArgumentError("collided expected {} to be greater or equal to 0, got {}".format(size_name, size_val))
overlap_on_equal = len(args) == 3 and args[2]
return self.axis_overlapped(x1, width1, x2, width2, overlap_on_equal) and self.axis_overlapped(y1, height1, y2, height2, overlap_on_equal)
@validate_args([Real, Real, Real, Real], [Real, Real, Real, Real, bool])
@ignite_global
def helper_axis_overlapped(self, *args):
point1, length1, point2, length2 = args[:4]
if len(args) == 5 and args[4]:
return point1 + length1 >= point2 and point2 + length2 >= point1
else:
return point1 + length1 > point2 and point2 + length2 > point1
@validate_args([Real, Real, Real, Real])
@ignite_global
def helper_dist(self, *args):
x1, y1, x2, y2 = args[:4]
return sqrt((y2 - y1)**2 + (x2 - x1)**2) | /schulich_ignite-0.1.3-py3-none-any.whl/spark/util/helper_functions/misc_functions.py | 0.577614 | 0.356055 | misc_functions.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, Dict
if TYPE_CHECKING:
from ...core import Core
from functools import reduce
from operator import and_
from ..decorators import *
_phys_to_typed = {
"Backquote": ('`', '~'),
"Digit1": ('1', '!'),
"Digit2": ('2', '@'),
"Digit3": ('3', '#'),
"Digit4": ('4', '$'),
"Digit5": ('5', '%'),
"Digit6": ('6', '^'),
"Digit7": ('7', '&'),
"Digit8": ('8', '*'),
"Digit9": ('9', '('),
"Digit0": ('0', ')'),
"Minus": ('-', '_'),
"Equal": ('=', '+'),
"BracketLeft": ('[', '{'),
"BracketRight": (']', '}'),
"Backslash": ('\\', '|'),
"Semicolon": (';', ':'),
"Quote": ('\'', '"'),
"Comma": (',', '<'),
"Period": ('.', '>'),
"Slash": ('/', '?')
}
def helper_handle_kb_event(self: Core, event: Dict[str, str]):
self.refresh_last_activity()
key_pressed = self._methods.get("key_pressed", None)
key_released = self._methods.get("key_released", None)
key_repeated = self._methods.get("key_repeated", None)
self.key = event['key']
if event['type'] == "keydown":
if not event['repeat']:
self._keys_held[self.key] = True
if key_pressed:
key_pressed()
else:
if key_repeated:
key_repeated()
else:
if event['code'].isalpha() and len(event['code']) == 1:
self._keys_held[event['code'].lower()] = False
self._keys_held[event['code'].upper()] = False
elif event['code'] in _phys_to_typed:
for k in _phys_to_typed[event['code']]:
self._keys_held[k] = False
else:
self._keys_held[self.key] = False
if key_released:
key_released()
@ignite_global
def helper_keys_held(self: Core, *keys, pattern=None):
if pattern is None:
pattern = [True]*len(keys)
match = [self._keys_held.get(key, False) == want for key, want in zip(keys, pattern)]
return reduce(and_, match)
@validate_args([str], [str, bool])
@ignite_global
def helper_key_held(self: Core, *args):
if len(args) == 1:
args.append(True)
return self.keys_held(args[0], pattern=[args[1]]) | /schulich_ignite-0.1.3-py3-none-any.whl/spark/util/helper_functions/keyboard_functions.py | 0.655557 | 0.314392 | keyboard_functions.py | pypi |
import itertools
from gettext import gettext as _
from typing import (
Collection, Container, List, Mapping, Tuple, Sequence
)
from schulze_condorcet.util import as_vote_string, as_vote_tuples
from schulze_condorcet.strength import winning_votes
from schulze_condorcet.types import (
Candidate, DetailedResultLevel, LinkStrength, PairwisePreference, SchulzeResult,
StrengthCallback, VoteString
)
def _schulze_winners(d: Mapping[Tuple[Candidate, Candidate], int],
candidates: Sequence[Candidate]) -> List[Candidate]:
"""This is the abstract part of the Schulze method doing the actual work.
The candidates are the vertices of a graph and the metric (in form
of ``d``) describes the strength of the links between the
candidates, that is edge weights.
We determine the strongest path from each vertex to each other
vertex. This gives a transitive relation, which enables us thus to
determine winners as maximal elements.
"""
# First determine the strongest paths
# This is a variant of the Floyd–Warshall algorithm to determine the
# widest path.
p = {(x, y): d[(x, y)] for x in candidates for y in candidates}
for i in candidates:
for j in candidates:
if i == j:
continue
for k in candidates:
if k in {i, j}:
continue
p[(j, k)] = max(p[(j, k)], min(p[(j, i)], p[(i, k)]))
# Second determine winners
winners = []
for i in candidates:
if all(p[(i, j)] >= p[(j, i)] for j in candidates):
winners.append(i)
return winners
def _check_consistency(votes: Collection[VoteString], candidates: Sequence[Candidate]) -> None:
"""Check that the given vote strings are consistent with the provided candidates.
This means, each vote string contains exactly the given candidates, separated by
'>' and '=', and each candidate occurs in each vote string exactly once.
"""
if any(">" in candidate or "=" in candidate for candidate in candidates):
raise ValueError(_("A candidate contains a forbidden character."))
candidates_set = set(candidates)
for vote in as_vote_tuples(votes):
vote_candidates = [c for c in itertools.chain.from_iterable(vote)]
vote_candidates_set = set(vote_candidates)
if candidates_set != vote_candidates_set:
if candidates_set < vote_candidates_set:
raise ValueError(_("Superfluous candidate in vote string."))
else:
raise ValueError(_("Missing candidate in vote string."))
if not len(vote_candidates) == len(vote_candidates_set):
raise ValueError(_("Every candidate must occur exactly once in each vote."))
def _subindex(alist: Collection[Container[str]], element: str) -> int:
"""The element is in the list at which position in the big list.
:returns: ``ret`` such that ``element in alist[ret]``
"""
for index, sublist in enumerate(alist):
if element in sublist:
return index
raise ValueError(_("Not in list."))
def _pairwise_preference(
votes: Collection[VoteString],
candidates: Sequence[Candidate],
) -> PairwisePreference:
"""Calculate the pairwise preference of all candidates from all given votes."""
counts = {(x, y): 0 for x in candidates for y in candidates}
for vote in as_vote_tuples(votes):
for x in candidates:
for y in candidates:
if _subindex(vote, x) < _subindex(vote, y):
counts[(x, y)] += 1
return counts
def _schulze_evaluate_routine(
votes: Collection[VoteString],
candidates: Sequence[Candidate],
strength: StrengthCallback
) -> Tuple[PairwisePreference, SchulzeResult]:
"""The routine to determine the result of the schulze-condorcet method.
This is outsourced into this helper function to avoid duplicate code or duplicate
calculations inside the schulze_evaluate and schulze_evaluate_detailed functions.
"""
# First we count the number of votes preferring x to y
counts = _pairwise_preference(votes, candidates)
# Second we calculate a numeric link strength abstracting the problem into the realm
# of graphs with one vertex per candidate
d: LinkStrength = {(x, y): strength(support=counts[(x, y)],
opposition=counts[(y, x)],
totalvotes=len(votes))
for x in candidates for y in candidates}
# Third we execute the Schulze method by iteratively determining winners
result: SchulzeResult = []
while True:
done = {x for level in result for x in level}
# avoid sets to preserve ordering
remaining = tuple(c for c in candidates if c not in done)
if not remaining:
break
winners = _schulze_winners(d, remaining)
result.append(winners)
return counts, result
def schulze_evaluate(
votes: Collection[VoteString],
candidates: Sequence[Candidate],
strength: StrengthCallback = winning_votes
) -> VoteString:
"""Use the Schulze method to cumulate preference lists (votes) into one list (vote).
The Schulze method is described here: http://www.9mail.de/m-schulze/schulze1.pdf.
Also the Wikipedia article is pretty nice.
One thing to mention is, that we do not do any tie breaking.
For a nice set of examples see the test suite.
Note that the candidates should already be sorted meaningful. The return of this
function is stable under arbitrary sorting of the candidates, but only identical
if the candidates are passed in the same order. This roots in the fact that the
result ``1=2>0`` and ``2=1>0`` carry the same meaning but are not identical.
Therefore, we determine the order of candidates equal to each other in the final
result by the order of those in the explicitly passed in candidates.
The return of this function is identical under arbitrary sorting of the votes passed
in. Moreover, the order of equal candidates in the passed in votes does not matter.
:param votes: The vote strings on which base we want to determine the overall
preference. One vote has the form ``3>0>1=2>4``, where the names between the
relation signs are exactly those passed in with the ``candidates`` parameter.
:param candidates: We require that the candidates be explicitly passed. This allows
for more flexibility (like returning a useful result for zero votes).
:param strength: A function which will be used as the metric on the graph of all
candidates. See `strength.py` for more detailed information.
:returns: A vote string, reflecting the overall preference.
"""
# Validate votes and candidate input to be consistent
_check_consistency(votes, candidates)
_, result = _schulze_evaluate_routine(votes, candidates, strength)
# Construct a vote string reflecting the overall preference
return as_vote_string(result)
def schulze_evaluate_detailed(
votes: Collection[VoteString],
candidates: Sequence[Candidate],
strength: StrengthCallback = winning_votes
) -> List[DetailedResultLevel]:
"""Construct a more detailed representation of the result by adding some stats.
This works equally to the schulze_evaluate function but constructs a more detailed
result, including how much of a difference there was between the individual levels
of preference in the overall result.
"""
# Validate votes and candidate input to be consistent
_check_consistency(votes, candidates)
counts, result = _schulze_evaluate_routine(votes, candidates, strength)
# Construct the DetailedResult. This contains a list of dicts, one for each
# level of preference, containing the preferred and rejected candidates and the
# numbers of support and opposition (the pairwise preference) between all
# pairwise combinations of preferred and rejected candidates.
detailed: List[DetailedResultLevel] = list()
for preferred_candidates, rejected_candidates in zip(result, result[1:]):
level: DetailedResultLevel = {
# TODO maybe use simply tuples instead of lists here?
'preferred': list(preferred_candidates),
'rejected': list(rejected_candidates),
'support': {
(preferred, rejected): counts[preferred, rejected]
for preferred in preferred_candidates
for rejected in rejected_candidates},
'opposition': {
(preferred, rejected): counts[rejected, preferred]
for preferred in preferred_candidates
for rejected in rejected_candidates}
}
detailed.append(level)
return detailed
def pairwise_preference(
votes: Collection[VoteString],
candidates: Sequence[Candidate],
) -> PairwisePreference:
"""Calculate the pairwise preference of all candidates from all given votes.
While this does not yet reveal the overall preference, it can give some more
insights in the sentiments of the voters regarding two candidates compared to each
other.
"""
# Validate votes and candidate input to be consistent
_check_consistency(votes, candidates)
return _pairwise_preference(votes, candidates) | /schulze-condorcet-2.0.0.tar.gz/schulze-condorcet-2.0.0/schulze_condorcet/schulze_condorcet.py | 0.907072 | 0.453625 | schulze_condorcet.py | pypi |
# schupy -- A python package for modeling and analyzing Schumann resonances
schupy is an open-source python package aimed at modeling and analyzing Schumann resonances (SRs), the global electromagnetic resonances of the Earth-ionosphere cavity resonator in the lowest part of the extremely low frequency band (<100 Hz).
## Usage
#### `forward_tdte` function
The `forward_tdte` function of schupy uses the analytical solution of the 2-D telegraph equation (TDTE) obtained for uniform cavity and is able to determine SRs generated by an arbitrary number of sources located in given positions and returns the theoretical power spectral density of the field components for an arbitrarily located observing station. The sources can be either pointsources or extended ones with a specified size.
The function takes the following arguments:
| Name | Type | Description | Unit | Default value |
| ------------- |:-------------:| ------------- | ------------ | ---------- |
| `s_lat` | LIST | Geographical latitude(s) of the source(s) | deg |
| `s_lon` | LIST | Geographical longitude(s) of the source(s) | deg |
| `s_int` | LIST | Intensiti(es) of the source(s) | C^2 km^2 s^-1 |
| `m_lat` | FLOAT | Geographical latitude of the observing station | deg |
| `m_lon` | FLOAT | Geographical longitude of the observing station | deg |
| `freq` | LIST | Field components are calculated on these frequencies | Hz |
| `radius` | FLOAT | Radius of the extended sources (0 in the case of pointsource(s)) | Mm | 0 |
| `n` | INT | Maximal order of Legendre-polynomials to sum | | 500 |
| `mapshow` | BOOL | Sets whether to show a map of the sources and the station or not | | False |
| `mapsave` | BOOL | Sets whether to save the map of the sources and the station or not | | False |
| `mapfilename` | STR | Name of the file to save the map into | | schupy_map.png
| `plotshow` | BOOL | Sets whether to plot the power spectral densities or not | | False |
| Name | Type | Description | Default value | Possible values |
| ------------- |:-------------:| ------------- | ------------ | ---------- |
| `h` | STRING | Method of calculating complex ionospheric heights | mushtak | mushtak, kulak |
| `ret` | STRING | Returned field components | all | all, Er, Btheta, Bphi |
By setting the `radius` value to any number grater than zero, the user can model extended sources with randomly distributed pointsources inside a circle having the given radius, whose intensities sum up to the given `s_int`.
By specifying `h` the user can choose the preferred method of calculating complex ionospheric heights. The two methods are described in:
*V. C. Mushtak and E. R. Williams (2002): Elf propagation parameters for uniform models of the earth-ionosphere waveguide, Journal of Atmospheric and Solar-Terrestrial Physics, 64.*
and in:
*A. Kulak and J. Mlynarczyk (2013): Elf propagation parameters for the ground-ionosphere waveguide with finite ground conductivity, IEEE Transactions on Antennas and Propagation, 61.*.
schupy can visualize the specified sources and observing station on a world map. The station is shown as a rectangle while the sources are indicated by circles whose sizes are proportional to their intensities. Visualization relies on the `cartopy` package: https://scitools.org.uk/cartopy/docs/latest/
The function plots and returns the following quantities at the location of the given observing station:
- `Er`: the vertical component of the electric field
- `Btheta`: the N-S component of the magnetic field measured by E-W orianted magnetic coils
- `Bphi`: the E-W component of the magnetic field measured by N-S orianted magnetic coils
An exaple to how to run the function:
~~~~
import schupy as sp
import numpy as np
source_latitudes = [10.0, 0.0, 0.0]
source_longitudes = [10.0, -80.0, 110.0]
source_intensities = [1e5, 8e4, 7e4]
frequencies = np.arange(5, 30, 0.1)
obs_latitude = 47.6
obs_longitude = 16.7
sp.forward_tdte(source_latitudes, source_longitudes, source_intensities, obs_latitude, obs_longitude, frequencies, h='mushtak', ret='Bphi', radius = 0, mapshow = False, mapsave = False, plotshow = True)
~~~~
## Acknowledgement
The schupy package is developed by G. Dalya, T. Bozoki, K. Kapas, J. Takatsy, E. Pracser and G. Satori. Please send your questions and comments to `[email protected]`. If you use the Schupy package for your research, please cite our paper:
@article{BOZOKI2019105144,
title = "Modeling Schumann resonances with schupy",
journal = "Journal of Atmospheric and Solar-Terrestrial Physics",
volume = "196",
pages = "105144",
year = "2019",
issn = "1364-6826",
doi = "https://doi.org/10.1016/j.jastp.2019.105144",
url = "http://www.sciencedirect.com/science/article/pii/S1364682619304134",
author = "Tamás Bozoki and Erno Pracser and Gabriella Satori and Gergely Dalya and Kornel Kapas and Janos Takatsy",
keywords = "Schumann resonances, Earth-ionosphere cavity, Numerical model, Python package",
abstract = "Schupy is an open-source python package aimed at modeling and analyzing Schumann resonances (SRs), the global electromagnetic resonances of the Earth-ionosphere cavity resonator in the lowest part of the extremely low frequency band (<100 Hz). Its very-first function forward_tdte applies the solution of the 2-D telegraph equation introduced recently by Prácser et al. (2019) for a uniform cavity and is able to determine theoretical SR spectra for arbitrary source-observer configurations. It can be applied for both modeling extraordinarily large SR-transients or “background” SRs excited by incoherently superimposed lightning strokes within an extended source region. Three short studies are presented which might be important for SR related research. With the forward_tdte function our aim is to provide a medium complexity numerical background for the interpretation of SR observations. We would like to encourage the community to join our project in developing open-source analyzing capacities for SR research as part of the schupy package."
}
| /schupy-1.0.12.tar.gz/schupy-1.0.12/README.md | 0.929136 | 0.955444 | README.md | pypi |
import json
import urllib.parse
from . import urls
from .account_information import Position, Account
from .authentication import SessionManager
class Schwab(SessionManager):
def __init__(self, **kwargs):
"""
The Schwab class. Used to interact with schwab.
"""
self.headless = kwargs.get("headless", True)
self.browserType = kwargs.get("browserType", "firefox")
super(Schwab, self).__init__()
def get_account_info(self):
"""
Returns a dictionary of Account objects where the key is the account number
"""
account_info = dict()
r = self.session.get(urls.positions_data())
response = json.loads(r.text)
for account in response['Accounts']:
positions = list()
for security_group in account["SecurityGroupings"]:
for position in security_group["Positions"]:
positions.append(
Position(
position["DefaultSymbol"],
position["Description"],
int(position["Quantity"]),
float(position["Cost"]),
float(position["MarketValue"])
)._as_dict()
)
account_info[int(account["AccountId"])] = Account(
account["AccountId"],
positions,
account["Totals"]["MarketValue"],
account["Totals"]["CashInvestments"],
account["Totals"]["AccountValue"],
account["Totals"]["Cost"],
)._as_dict()
return account_info
def trade(self, ticker, side, qty, account_id, dry_run=True):
"""
ticker (Str) - The symbol you want to trade,
side (str) - Either 'Buy' or 'Sell',
qty (int) - The amount of shares to buy/sell,
account_id (int) - The account ID to place the trade on. If the ID is XXXX-XXXX,
we're looking for just XXXXXXXX.
Returns messages (list of strings), is_success (boolean)
"""
if side == "Buy":
buySellCode = 1
elif side == "Sell":
buySellCode = 2
else:
raise Exception("side must be either Buy or Sell")
data = {
"IsMinQty":False,
"CustomerId":str(account_id),
"BuySellCode":buySellCode,
"Quantity":str(qty),
"IsReinvestDividends":False,
"SecurityId":ticker,
"TimeInForce":"1", # Day Only
"OrderType":1, # Market Order
"CblMethod":"FIFO",
"CblDefault":"FIFO",
"CostBasis":"FIFO",
}
r = self.session.post(urls.order_verification(), data)
if r.status_code != 200:
return [r.text], False
response = json.loads(r.text)
messages = list()
for message in response["Messages"]:
messages.append(message["Message"])
if dry_run:
return messages, True
data = {
"AccountId": str(account_id),
"ActionType": side,
"ActionTypeText": side,
"BuyAction": side == "Buy",
"CostBasis": "FIFO",
"CostBasisMethod": "FIFO",
"IsMarketHours": True,
"ItemIssueId": int(response['IssueId']),
"NetAmount": response['NetAmount'],
"OrderId": int(response["Id"]),
"OrderType": "Market",
"Principal": response['QuoteAmount'],
"Quantity": str(qty),
"ShortDescription": urllib.parse.quote_plus(response['IssueShortDescription']),
"Symbol": response["IssueSymbol"],
"Timing": "Day Only"
}
r = self.session.post(urls.order_confirmation(), data)
if r.status_code != 200:
messages.append(r.text)
return messages, False
response = json.loads(r.text)
if response["ReturnCode"] == 0:
return messages, True
return messages, False | /schwab_api-0.2.3.tar.gz/schwab_api-0.2.3/schwab_api/schwab.py | 0.575349 | 0.228931 | schwab.py | pypi |
import logging
import sys
__all__ = ['contextfile_logger', 'ForwardingLogger']
class ForwardingLogger(logging.Logger):
"""
This logger forwards messages above a certain level (by default: all messages)
to a configured parent logger. Optionally it can prepend the configured
"forward_prefix" to all *forwarded* log messages.
"forward_suffix" works like "forward_prefix" but appends some string.
Python's default logging module can not handle this because
a) a logger's log level is only applied for messages emitted directly on
that logger (not for propagated log messages), see
https://mg.pov.lt/blog/logging-levels.html
b) adding a log prefix only for certain loggers can only by done by
duplicating handler configuration. Python's handlers are quite basic
so if the duplicated handlers access a shared resource (e.g. a log file)
Python will open it twice (which causes data loss if mode='w' is
used).
c) and last but not least we often need to configure the specific logging
handlers dynamically (e.g. log to a context-dependent file) which is
not doable via Python's fileConfig either - so we can go fully dynamic
here...
"""
def __init__(self, *args, **kwargs):
self._forward_to = kwargs.pop('forward_to')
self._forward_prefix = kwargs.pop('forward_prefix', None)
self._forward_suffix = kwargs.pop('forward_suffix', None)
self._forward_minlevel = kwargs.pop('forward_minlevel', logging.NOTSET)
if (not args) and ('name' not in kwargs):
name = self.__class__.__name__
args = (name, )
super(ForwardingLogger, self).__init__(*args, **kwargs)
def callHandlers(self, record):
nr_handlers = self._call_handlers(record)
if self._forward_to is None:
self._emit_last_resort_message(record, nr_handlers)
# "logging.NOTSET" (default) is defined as 0 so that works here just fine
if (record.levelno >= self._forward_minlevel) and (self._forward_to is not None):
msg = record.msg
if self._forward_prefix:
msg = self._forward_prefix + msg
if self._forward_suffix:
msg += self._forward_suffix
record_kwargs = {
'exc_info': record.exc_info,
}
if hasattr(record, 'stack_info'):
# Python 3
record_kwargs['stack_info'] = record.stack_info
self._forward_to.log(record.levelno, msg, *record.args, **record_kwargs)
def _call_handlers(self, record):
# ,--- mostly copied from logging.Logger.callHandlers -----------------
logger = self
nr_found = 0
while logger:
for handler in logger.handlers:
nr_found = nr_found + 1
if record.levelno >= handler.level:
handler.handle(record)
if logger.propagate:
logger = logger.parent
else:
break
return nr_found
# `--- end copy -------------------------------------------------------
def _emit_last_resort_message(self, record, nr_handlers):
# ,--- mostly copied from logging.Logger.callHandlers -----------------
if nr_handlers > 0:
return
if logging.lastResort:
if record.levelno >= logging.lastResort.level:
logging.lastResort.handle(record)
elif logging.raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
# `--- end copy -------------------------------------------------------
def contextfile_logger(logger_name, log_path=None, handler=None, **kwargs):
"""
Return a ForwardingLogger which logs to the given logfile.
This is a generic example how to use the ForwardingLogger and can be used
to create log files which are placed near the data they are referring to.
"""
log = ForwardingLogger(logger_name,
forward_to=kwargs.pop('forward_to', None),
forward_prefix=kwargs.pop('forward_prefix', None),
forward_minlevel=kwargs.pop('forward_minlevel', logging.NOTSET),
**kwargs
)
if handler is None:
# The logging module does not keep a reference to this FileHandler anywhere
# as we are instantiating it directly (not by name or fileconfig).
# That means Python's garbage collection will work just fine and the
# underlying log file will be closed when our batch-specific
# ForwardingLogger goes out of scope.
handler = logging.FileHandler(log_path, delay=True)
handler.setFormatter(logging.Formatter(
fmt='%(asctime)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
))
log.addHandler(handler)
return log | /schwarzlog-0.6.2.tar.gz/schwarzlog-0.6.2/schwarz/log_utils/forwarding_logger.py | 0.562177 | 0.196402 | forwarding_logger.py | pypi |
import json
from typing import Tuple
import pandas as pd
import requests
BRANCH_URL = "https://bank.gov.ua/NBU_BankInfo/get_data_branch?json"
PARENT_URL = "https://bank.gov.ua/NBU_BankInfo/get_data_branch_glbank?json"
def split_names(s) -> Tuple[str, str]:
"""This will split the `NAME_E` line from the API into a name and a short name"""
name, short_name = [name.strip() for name in s[:-1].split(" (скорочена назва - ")]
return name, short_name
def get_data(filter_insolvent: bool = True) -> pd.DataFrame:
# Get raw dataframes for parent banks and branches
with requests.get(PARENT_URL) as r:
parents = pd.read_json(r.text)
with requests.get(BRANCH_URL) as r:
branches = pd.read_json(r.text)
# Filter out insolvent branches and branches of insolvent banks
if filter_insolvent:
branches = branches.loc[
(branches["N_STAN"] == "Нормальний") & (branches["NSTAN_GOL"] == "Нормальний")
]
# Note that the National Bank of Ukraine provides English names for banking
# institutions, but not for branches. Therefore we enrich the `branches`
# dataframe with the English name for the parent bank
# Add empty column to `branches` for full and short English name for head bank
branches["NGOL_E"] = ""
branches["NGOL_E_SHORT"] = ""
for idx, row in branches.iterrows():
# Get parent bank identifier
glmfo = row["GLMFO"]
# Get the name of parent bank from
parent_names = parents.loc[parents["GLMFO"] == glmfo]["NAME_E"].iloc[0]
parent_full_name, parent_short_name = split_names(parent_names)
branches.loc[idx, "NGOL_E"] = parent_full_name # type: ignore
branches.loc[idx, "NGOL_E_SHORT"] = parent_short_name # type: ignore
return branches
def process():
branches = get_data()
registry = []
for idx, row in branches.iterrows():
registry.append(
{
"country_code": "UA",
"primary": row["TYP"] == 0,
"bic": "",
"bank_code": str(row["MFO"]),
"name": row["FULLNAME"],
"short_name": row["NGOL_E_SHORT"],
}
)
print(f"Fetched {len(registry)} bank records")
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_ua.json", "w+") as fp:
json.dump(process(), fp, indent=2, ensure_ascii=False) | /schwifty-2023.6.0.tar.gz/schwifty-2023.6.0/scripts/get_bank_registry_ua.py | 0.673729 | 0.286263 | get_bank_registry_ua.py | pypi |
import json
import re
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
COUNTRY_CODE_PATTERN = r"[A-Z]{2}"
EMPTY_RANGE = (0, 0)
URL = "https://www.swift.com/standards/data-standards/iban"
def get_raw():
soup = BeautifulSoup(requests.get(URL).content, "html.parser")
link = soup.find("a", attrs={"data-tracking-title": "IBAN Registry (TXT)"})
return requests.get(urljoin(URL, link["href"])).content.decode(encoding="latin1")
def parse_int(raw):
return int(re.search(r"\d+", raw).group())
def parse_range(raw):
pattern = r".*?(?P<from>\d+)\s*-\s*(?P<to>\d+)"
match = re.search(pattern, raw)
if not match:
return EMPTY_RANGE
return (int(match["from"]) - 1, int(match["to"]))
def parse(raw):
columns = {}
for line in raw.split("\r\n"):
header, *rows = line.split("\t")
if header == "IBAN prefix country code (ISO 3166)":
columns["country"] = [re.search(COUNTRY_CODE_PATTERN, item).group() for item in rows]
elif header == "Country code includes other countries/territories":
columns["other_countries"] = [re.findall(COUNTRY_CODE_PATTERN, item) for item in rows]
elif header == "BBAN structure":
columns["bban_spec"] = rows
elif header == "BBAN length":
columns["bban_length"] = [parse_int(item) for item in rows]
elif header == "Bank identifier position within the BBAN":
columns["bank_code_position"] = [parse_range(item) for item in rows]
elif header == "Branch identifier position within the BBAN":
columns["branch_code_position"] = [parse_range(item) for item in rows]
elif header == "IBAN structure":
columns["iban_spec"] = rows
elif header == "IBAN length":
columns["iban_length"] = [parse_int(item) for item in rows]
return [dict(zip(columns.keys(), row)) for row in zip(*columns.values())]
def process(records):
registry = {}
for record in records:
country_codes = [record["country"]]
country_codes.extend(record["other_countries"])
for code in country_codes:
registry[code] = {
"bban_spec": record["bban_spec"],
"iban_spec": record["iban_spec"],
"bban_length": record["bban_length"],
"iban_length": record["iban_length"],
"positions": process_positions(record),
}
return registry
def process_positions(record):
bank_code = record["bank_code_position"]
branch_code = record["branch_code_position"]
if branch_code == EMPTY_RANGE:
branch_code = (bank_code[1], bank_code[1])
return {
"account_code": (max(bank_code[1], branch_code[1]), record["bban_length"]),
"bank_code": bank_code,
"branch_code": branch_code,
}
if __name__ == "__main__":
with open("schwifty/iban_registry/generated.json", "w+") as fp:
json.dump(process(parse(get_raw())), fp, indent=2) | /schwifty-2023.6.0.tar.gz/schwifty-2023.6.0/scripts/get_iban_registry.py | 0.447219 | 0.293664 | get_iban_registry.py | pypi |
from math import sqrt
# Pandas imports
from pandas import DataFrame
# Numpy imports
from numpy import mean, std, median, amin, amax, percentile
# Scipy imports
from scipy.stats import skew, kurtosis, sem
from .base import Analysis, std_output
from .exc import NoDataError, MinimumSizeError
from ..data import Vector, Categorical, is_dict, is_group, is_categorical, is_vector, is_tuple
class VectorStatistics(Analysis):
"""Reports basic summary stats for a provided vector."""
_min_size = 1
_name = 'Statistics'
_n = 'n'
_mean = 'Mean'
_std = 'Std Dev'
_ste = 'Std Error'
_range = 'Range'
_skew = 'Skewness'
_kurt = 'Kurtosis'
_iqr = 'IQR'
_q1 = '25%'
_q2 = '50%'
_q3 = '75%'
_min = 'Minimum'
_max = "Maximum"
def __init__(self, data, sample=True, display=True):
self._sample = sample
d = Vector(data)
if d.is_empty():
raise NoDataError("Cannot perform the test because there is no data")
if len(d) <= self._min_size:
raise MinimumSizeError("length of data is less than the minimum size {}".format(self._min_size))
super(VectorStatistics, self).__init__(d, display=display)
self.logic()
def run(self):
dof = 1 if self._sample else 0
vmin = amin(self._data.data)
vmax = amax(self._data.data)
vrange = vmax - vmin
q1 = percentile(self._data.data, 25)
q3 = percentile(self._data.data, 75)
iqr = q3 - q1
self._results = {self._n: len(self._data.data),
self._mean: mean(self._data.data),
self._std: std(self._data.data, ddof=dof),
self._ste: sem(self._data.data, 0, dof),
self._q2: median(self._data.data),
self._min: vmin,
self._max: vmax,
self._range: vrange,
self._skew: skew(self._data.data),
self._kurt: kurtosis(self._data.data),
self._q1: q1,
self._q3: q3,
self._iqr: iqr,
}
@property
def count(self):
return self._results[self._n]
@property
def mean(self):
return self._results[self._mean]
@property
def std_dev(self):
return self._results[self._std]
@property
def std_err(self):
return self._results[self._ste]
@property
def median(self):
return self._results[self._q2]
@property
def minimum(self):
return self._results[self._min]
@property
def maximum(self):
return self._results[self._max]
@property
def range(self):
return self._results[self._range]
@property
def skewness(self):
return self._results[self._skew]
@property
def kurtosis(self):
return self._results[self._kurt]
@property
def q1(self):
return self._results[self._q1]
@property
def q3(self):
return self._results[self._q3]
@property
def iqr(self):
return self._results[self._iqr]
def __str__(self):
order = [self._n,
self._mean,
self._std,
self._ste,
self._skew,
self._kurt,
self._max,
self._q3,
self._q2,
self._q1,
self._min,
self._iqr,
self._range,
]
return std_output(self._name, results=self._results, order=order)
class GroupStatistics(Analysis):
"""Reports basic summary stats for a group of vectors."""
_min_size = 1
_name = 'Group Statistics'
_group = 'Group'
_n = 'n'
_mean = 'Mean'
_std = 'Std Dev'
_max = 'Max'
_q2 = 'Median'
_min = 'Min'
_total = 'Total'
_pooled = 'Pooled Std Dev'
_gmean = 'Grand Mean'
_gmedian = 'Grand Median'
_num_of_groups = 'Number of Groups'
def __init__(self, *args, **kwargs):
groups = kwargs.get('groups', None)
display = kwargs.get('display', False)
if is_dict(args[0]):
_data, = args
elif is_group(args,):
_data = dict(zip(groups, args)) if groups else dict(zip(list(range(1, len(args) + 1)), args))
else:
_data = None
data = Vector()
for g, d in _data.items():
if len(d) == 0:
raise NoDataError("Cannot perform test because there is no data")
if len(d) <= self._min_size:
raise MinimumSizeError("length of data is less than the minimum size {}".format(self._min_size))
data.append(Vector(d, groups=[g for _ in range(0, len(d))]))
if data.is_empty():
raise NoDataError("Cannot perform test because there is no data")
self.k = None
self.total = None
self.pooled = None
self.gmean = None
self.gmedian = None
super(GroupStatistics, self).__init__(data, display=display)
self.logic()
def logic(self):
if not self._data:
pass
self._results = []
self.run()
if self._display:
print(self)
def run(self):
out = []
for group, vector in self._data.groups.items():
row_result = {self._group: str(group),
self._n: len(vector),
self._mean: mean(vector),
self._std: std(vector, ddof=1),
self._max: amax(vector),
self._q2: median(vector),
self._min: amin(vector),
}
out.append(row_result)
summ = DataFrame(out).sort_values(self._group)
self.total = len(self._data.data)
self.k = len(summ)
if self.k > 1:
self.pooled = sqrt(((summ[self._n] - 1) * summ[self._std] ** 2).sum() / (summ[self._n].sum() - self.k))
self.gmean = summ[self._mean].mean()
self.gmedian = median(summ[self._q2])
self._results = ({
self._num_of_groups: self.k,
self._total: self.total,
self._pooled: self.pooled,
self._gmean: self.gmean,
self._gmedian: self.gmedian,
}, summ)
else:
self._results = summ
def __str__(self):
order = (
self._num_of_groups,
self._total,
self._gmean,
self._pooled,
self._gmedian,
)
group_order = (
self._n,
self._mean,
self._std,
self._min,
self._q2,
self._max,
self._group,
)
if is_tuple(self._results):
out = '{}\n{}'.format(
std_output('Overall Statistics', self._results[0], order=order),
std_output(self._name, self._results[1].to_dict(orient='records'), order=group_order),
)
else:
out = std_output(self._name, self._results.to_dict(orient='records'), order=group_order)
return out
@property
def grand_mean(self):
return self.gmean
@property
def grand_median(self):
return self.gmedian
@property
def pooled_std(self):
return self.pooled
class GroupStatisticsStacked(Analysis):
_min_size = 1
_name = 'Group Statistics'
_agg_name = 'Overall Statistics'
_group = 'Group'
_n = 'n'
_mean = 'Mean'
_std = 'Std Dev'
_max = 'Max'
_q2 = 'Median'
_min = 'Min'
_total = 'Total'
_pooled = 'Pooled Std Dev'
_gmean = 'Grand Mean'
_gmedian = 'Grand Median'
_num_of_groups = 'Number of Groups'
def __init__(self, values, groups=None, **kwargs):
display = kwargs['display'] if 'display' in kwargs else True
if groups is None:
if is_vector(values):
data = values
else:
raise AttributeError('ydata argument cannot be None.')
else:
data = Vector(values, groups=groups)
if data.is_empty():
raise NoDataError("Cannot perform test because there is no data")
self.pooled = None
self.gmean = None
self.gmedian = None
self.total = None
self.k = None
super(GroupStatisticsStacked, self).__init__(data, display=display)
self.logic()
def logic(self):
if not self._data:
pass
self._results = []
self.run()
if self._display:
print(self)
def run(self):
out = []
for group, vector in self._data.groups.items():
if len(vector) <= self._min_size:
raise MinimumSizeError("length of data is less than the minimum size {}".format(self._min_size))
row_result = {self._group: group,
self._n: len(vector),
self._mean: mean(vector),
self._std: std(vector, ddof=1),
self._max: amax(vector),
self._q2: median(vector),
self._min: amin(vector),
}
out.append(row_result)
summ = DataFrame(out).sort_values(self._group)
self.total = len(self._data.data)
self.k = len(summ)
if self.k > 1:
self.pooled = sqrt(((summ[self._n] - 1) * summ[self._std] ** 2).sum() / (summ[self._n].sum() - self.k))
self.gmean = summ[self._mean].mean()
self.gmedian = median(summ[self._q2])
self._results = ({
self._num_of_groups: self.k,
self._total: self.total,
self._pooled: self.pooled,
self._gmean: self.gmean,
self._gmedian: self.gmedian,
}, summ)
else:
self._results = summ
def __str__(self):
order = (
self._num_of_groups,
self._total,
self._gmean,
self._pooled,
self._gmedian,
)
group_order = (
self._n,
self._mean,
self._std,
self._min,
self._q2,
self._max,
self._group,
)
if is_tuple(self._results):
out = '{}\n{}'.format(
std_output(self._agg_name, self._results[0], order=order),
std_output(self._name, self._results[1].to_dict(orient='records'), order=group_order),
)
else:
out = std_output(self._name, self._results.to_dict(orient='records'), order=group_order)
return out
@property
def grand_mean(self):
return self.gmean
@property
def grand_median(self):
return self.gmedian
@property
def pooled_std(self):
return self.pooled
class CategoricalStatistics(Analysis):
"""Reports basic summary stats for Categorical data."""
_min_size = 1
_name = 'Statistics'
_agg_name = 'Overall Statistics'
_rank = 'Rank'
_cat = 'Category'
_freq = 'Frequency'
_perc = 'Percent'
_total = 'Total'
_num_of_grps = 'Number of Groups'
def __init__(self, data, **kwargs):
order = kwargs['order'] if 'order' in kwargs else None
dropna = kwargs['dropna'] if 'dropna' in kwargs else False
display = kwargs['display'] if 'display' in kwargs else True
self.ordered = True if order is not None else False
d = data if is_categorical(data) else Categorical(data, order=order, dropna=dropna)
if d.is_empty():
raise NoDataError("Cannot perform the test because there is no data")
super(CategoricalStatistics, self).__init__(d, display=display)
self.logic()
def run(self):
col = dict(categories=self._cat,
counts=self._freq,
percents=self._perc,
ranks=self._rank)
self.data.summary.rename(columns=col, inplace=True)
if self.data.num_of_groups > 1:
self._results = ({
self._total: self.data.total,
self._num_of_grps: self.data.num_of_groups,
}, self.data.summary.to_dict(orient='records'))
else:
self._results = self.data.summary.to_dict(orient='records')
def __str__(self):
order = (
self._total,
self._num_of_grps,
)
grp_order = (
self._rank,
self._freq,
self._perc,
self._cat,
)
if is_tuple(self._results):
out = '{}\n{}'.format(
std_output(self._agg_name, self._results[0], order=order),
std_output(self._name, self._results[1], order=grp_order),
)
else:
out = std_output(self._name, self._results, order=grp_order)
return out | /sci_analysis-2.2.1rc0.tar.gz/sci_analysis-2.2.1rc0/sci_analysis/analysis/stats.py | 0.906564 | 0.328341 | stats.py | pypi |
from numpy import float_, int_
class Analysis(object):
"""Generic analysis root class.
Members:
_data - the data used for analysis.
_display - flag for whether to display the analysis output.
_results - A dict of the results of the test.
Methods:
logic - This method needs to run the analysis, set the results member, and display the output at bare minimum.
run - This method should return the results of the specific analysis.
output - This method shouldn't return a value and only produce a side-effect.
"""
_name = "Analysis"
def __init__(self, data, display=True):
"""Initialize the data and results members.
Override this method to initialize additional members or perform
checks on data.
"""
self._data = data
self._display = display
self._results = {}
@property
def name(self):
"""The name of the test class"""
return self._name
@property
def data(self):
"""The data used for analysis"""
return self._data
@property
def results(self):
"""A dict of the results returned by the run method"""
return self._results
def logic(self):
"""This method needs to run the analysis, set the results member, and
display the output at bare minimum.
Override this method to modify the execution sequence of the analysis.
"""
if self._data is None:
return
self.run()
if self._display:
print(self)
def run(self):
"""This method should perform the specific analysis and set the results dict.
Override this method to perform a specific analysis or calculation.
"""
raise NotImplementedError
def __str__(self):
return std_output(self._name, self._results, tuple(self._results.keys()))
def std_output(name, results, order, precision=4, spacing=14):
"""
Parameters
----------
name : str
The name of the analysis report.
results : dict or list
The input dict or list to print.
order : list or tuple
The list of keys in results to display and the order to display them in.
precision : int
The number of decimal places to show for float values.
spacing : int
The max number of characters for each printed column.
Returns
-------
output_string : str
The report to be printed to stdout.
"""
def format_header(col_names):
line = ""
for n in col_names:
line += '{:{}s}'.format(n, spacing)
return line
def format_row(_row, _order):
line = ""
for column in _order:
value = _row[column]
t = type(value)
if t in [float, float_]:
line += '{:< {}.{}f}'.format(value, spacing, precision)
elif t in [float, float_]:
line += '{:< {}d}'.format(value, spacing)
else:
line += '{:<{}s}'.format(str(value), spacing)
return line
def format_items(label, value):
if type(value) in {float, float_}:
line = '{:{}s}'.format(label, max_length) + ' = ' + '{:< .{}f}'.format(value, precision)
elif type(value) in {int, int_}:
line = '{:{}s}'.format(label, max_length) + ' = ' + '{:< d}'.format(value)
else:
line = '{:{}s}'.format(label, max_length) + ' = ' + str(value)
return line
table = list()
header = ''
if isinstance(results, list):
header = format_header(order)
for row in results:
table.append(format_row(row, order))
elif isinstance(results, dict):
max_length = max([len(label) for label in results.keys()])
for key in order:
table.append(format_items(key, results[key]))
out = [
'',
'',
name,
'-' * len(name),
''
]
if len(header) > 0:
out.extend([
header,
'-' * len(header)
])
out.append('\n'.join(table))
return '\n'.join(out) | /sci_analysis-2.2.1rc0.tar.gz/sci_analysis-2.2.1rc0/sci_analysis/analysis/base.py | 0.899621 | 0.594845 | base.py | pypi |
from scipy.stats import linregress, pearsonr, spearmanr
from pandas import DataFrame
from ..data import Vector, is_vector
from .base import Analysis, std_output
from .exc import NoDataError, MinimumSizeError
from .hypo_tests import NormTest
class Comparison(Analysis):
"""Perform a test on two independent vectors of equal length."""
_min_size = 3
_name = "Comparison"
_h0 = "H0: "
_ha = "HA: "
_default_alpha = 0.05
def __init__(self, xdata, ydata=None, alpha=None, display=True):
self._alpha = alpha or self._default_alpha
if ydata is None:
if is_vector(xdata):
v = xdata
else:
raise AttributeError('ydata argument cannot be None.')
else:
v = Vector(xdata, other=ydata)
if v.data.empty or v.other.empty:
raise NoDataError("Cannot perform test because there is no data")
if len(v.data) <= self._min_size or len(v.other) <= self._min_size:
raise MinimumSizeError("length of data is less than the minimum size {}".format(self._min_size))
super(Comparison, self).__init__(v, display=display)
self.logic()
@property
def xdata(self):
"""The predictor vector for comparison tests"""
return self.data.data
@property
def ydata(self):
"""The response vector for comparison tests"""
return self.data.other
@property
def predictor(self):
"""The predictor vector for comparison tests"""
return self.data.data
@property
def response(self):
"""The response vector for comparison tests"""
return self.data.other
@property
def statistic(self):
"""The test statistic returned by the function called in the run method"""
# TODO: Need to catch the case where self._results is an empty dictionary.
return self._results['statistic']
@property
def p_value(self):
"""The p-value returned by the function called in the run method"""
return self._results['p value']
def __str__(self):
out = list()
order = list()
res = list(self._results.keys())
if 'p value' in res:
order.append('p value')
res.remove('p value')
order.extend(res)
out.append(std_output(self.name, self._results, reversed(order)))
out.append('')
out.append(self._h0 if self.p_value > self._alpha else self._ha)
out.append('')
return '\n'.join(out)
def run(self):
raise NotImplementedError
class LinearRegression(Comparison):
"""Performs a linear regression between two vectors."""
_name = "Linear Regression"
_n = 'n'
_slope = 'Slope'
_intercept = 'Intercept'
_r_value = 'r'
_r_squared = 'r^2'
_std_err = 'Std Err'
_p_value = 'p value'
def __init__(self, xdata, ydata=None, alpha=None, display=True):
super(LinearRegression, self).__init__(xdata, ydata, alpha=alpha, display=display)
def run(self):
slope, intercept, r, p_value, std_err = linregress(self.xdata, self.ydata)
count = len(self.xdata)
self._results.update({
self._n: count,
self._slope: slope,
self._intercept: intercept,
self._r_value: r,
self._r_squared: r ** 2,
self._std_err: std_err,
self._p_value: p_value
})
@property
def slope(self):
return self._results[self._slope]
@property
def intercept(self):
return self._results[self._intercept]
@property
def r_squared(self):
return self._results[self._r_squared]
@property
def r_value(self):
return self._results[self._r_value]
@property
def statistic(self):
return self._results[self._r_squared]
@property
def std_err(self):
return self._results[self._std_err]
def __str__(self):
"""If the result is greater than the significance, print the null hypothesis, otherwise,
the alternate hypothesis"""
out = list()
order = [
self._n,
self._slope,
self._intercept,
self._r_value,
self._r_squared,
self._std_err,
self._p_value
]
out.append(std_output(self._name, self._results, order=order))
out.append('')
return '\n'.join(out)
class Correlation(Comparison):
"""Performs a pearson or spearman correlation between two vectors."""
_names = {'pearson': 'Pearson Correlation Coefficient', 'spearman': 'Spearman Correlation Coefficient'}
_h0 = "H0: There is no significant relationship between predictor and response"
_ha = "HA: There is a significant relationship between predictor and response"
_r_value = 'r value'
_p_value = 'p value'
_alpha_name = 'alpha'
def __init__(self, xdata, ydata=None, alpha=None, display=True):
self._test = None
super(Correlation, self).__init__(xdata, ydata, alpha=alpha, display=display)
def run(self):
if NormTest(self.xdata, self.ydata, display=False, alpha=self._alpha).p_value > self._alpha:
r_value, p_value = pearsonr(self.xdata, self.ydata)
r = "pearson"
else:
r_value, p_value = spearmanr(self.xdata, self.ydata)
r = "spearman"
self._name = self._names[r]
self._test = r
self._results.update({
self._r_value: r_value,
self._p_value: p_value,
self._alpha_name: self._alpha
})
@property
def r_value(self):
"""The correlation coefficient returned by the the determined test type"""
return self._results[self._r_value]
@property
def statistic(self):
return self._results[self._r_value]
@property
def test_type(self):
"""The test that was used to determine the correlation coefficient"""
return self._test
def __str__(self):
out = list()
out.append(std_output(self.name, self._results, [self._alpha_name, self._r_value, self._p_value]))
out.append('')
out.append(self._h0 if self.p_value > self._alpha else self._ha)
out.append('')
return '\n'.join(out)
class GroupComparison(Analysis):
_min_size = 1
_name = 'Group Comparison'
_default_alpha = 0.05
def __init__(self, xdata, ydata=None, groups=None, alpha=None, display=True):
if ydata is None:
if is_vector(xdata):
vector = xdata
else:
raise AttributeError("ydata argument cannot be None.")
else:
vector = Vector(xdata, other=ydata, groups=groups)
if vector.is_empty():
raise NoDataError("Cannot perform test because there is no data")
super(GroupComparison, self).__init__(vector, display=display)
self._alpha = alpha or self._default_alpha
self.logic()
def run(self):
raise NotImplementedError
class GroupCorrelation(GroupComparison):
_names = {
'pearson': 'Pearson Correlation Coefficient',
'spearman': 'Spearman Correlation Coefficient',
}
_min_size = 2
_r_value = 'r value'
_p_value = 'p value'
_group_name = 'Group'
_n = 'n'
def __init__(self, xdata, ydata=None, groups=None, alpha=None, display=True):
self._test = None
super(GroupCorrelation, self).__init__(xdata, ydata=ydata, groups=groups, alpha=alpha, display=display)
def run(self):
out = []
# Remove any groups that are less than or equal to the minimum value from analysis.
small_grps = [grp for grp, seq in self.data.groups.items() if len(seq) <= self._min_size]
self.data.drop_groups(small_grps)
if NormTest(*self.data.flatten(), display=False, alpha=self._alpha).p_value > self._alpha:
r = "pearson"
func = pearsonr
else:
r = 'spearman'
func = spearmanr
self._name = self._names[r]
self._test = r
for grp, pairs in self.data.paired_groups.items():
r_value, p_value = func(*pairs)
row_results = ({self._r_value: r_value,
self._p_value: p_value,
self._group_name: str(grp),
self._n: str(len(pairs[0]))})
out.append(row_results)
self._results = DataFrame(out).sort_values(self._group_name).to_dict(orient='records')
def __str__(self):
order = (
self._n,
self._r_value,
self._p_value,
self._group_name
)
return std_output(self._name, self._results, order=order)
@property
def counts(self):
return tuple(s[self._n] for s in self._results)
@property
def r_value(self):
return tuple(s[self._r_value] for s in self._results)
@property
def statistic(self):
return tuple(s[self._r_value] for s in self._results)
@property
def p_value(self):
return tuple(s[self._p_value] for s in self._results)
class GroupLinearRegression(GroupComparison):
_name = "Linear Regression"
_n = 'n'
_slope = 'Slope'
_intercept = 'Intercept'
_r_value = 'r'
_r_squared = 'r^2'
_std_err = 'Std Err'
_p_value = 'p value'
_group_name = 'Group'
def run(self):
out = []
# Remove any groups that are less than or equal to the minimum value from analysis.
small_grps = [grp for grp, seq in self.data.groups.items() if len(seq) <= self._min_size]
self.data.drop_groups(small_grps)
for grp, pairs in self.data.paired_groups.items():
slope, intercept, r, p_value, std_err = linregress(*pairs)
count = len(pairs[0])
out.append({
self._n: str(count),
self._slope: slope,
self._intercept: intercept,
self._r_value: r,
self._r_squared: r ** 2,
self._std_err: std_err,
self._p_value: p_value,
self._group_name: str(grp)
})
if not out:
raise NoDataError
self._results = DataFrame(out).sort_values(self._group_name).to_dict(orient='records')
def __str__(self):
order = (
self._n,
self._slope,
self._intercept,
self._r_squared,
self._std_err,
self._p_value,
self._group_name
)
return std_output(self._name, self._results, order=order)
@property
def counts(self):
return tuple(s[self._n] for s in self._results)
@property
def r_value(self):
return tuple(s[self._r_value] for s in self._results)
@property
def statistic(self):
return tuple(s[self._r_squared] for s in self._results)
@property
def p_value(self):
return tuple(s[self._p_value] for s in self._results)
@property
def slope(self):
return tuple(s[self._slope] for s in self._results)
@property
def intercept(self):
return tuple(s[self._intercept] for s in self._results)
@property
def r_squared(self):
return tuple(s[self._r_squared] for s in self._results)
@property
def std_err(self):
return tuple(s[self._std_err] for s in self._results) | /sci_analysis-2.2.1rc0.tar.gz/sci_analysis-2.2.1rc0/sci_analysis/analysis/comparison.py | 0.80213 | 0.474509 | comparison.py | pypi |
from .hypo_tests import NormTest, KSTest, TwoSampleKSTest, MannWhitney, TTest, Anova, Kruskal, EqualVariance
from .comparison import LinearRegression, Correlation, GroupCorrelation, GroupLinearRegression
from .stats import VectorStatistics, GroupStatistics, GroupStatisticsStacked, CategoricalStatistics
def determine_analysis_type(data, other=None, groups=None, labels=None):
"""Attempts to determine the type of data and returns the corresponding sci_analysis Data object.
Parameters
----------
data : array-like
The sequence of unknown data type.
other : array-like or None
A second sequence of unknown data type.
groups : array-like or None
The group names to include if data is determined to be a Vector.
labels : array-like or None
The sequence of data point labels.
Returns
-------
data : sci_analysis.data.Data
A subclass of sci_analysis Data that corresponds to the analysis type to perform.
"""
from numpy import (
float16, float32, float64,
int8, int16, int32, int64
)
from pandas import Series
from ..data import is_iterable, is_vector, is_categorical, Vector, Categorical
from .exc import NoDataError
numeric_types = [float16, float32, float64, int8, int16, int32, int64]
if not is_iterable(data):
raise ValueError('data cannot be a scalar value.')
elif len(data) == 0:
raise NoDataError
elif is_vector(data):
return data
elif is_categorical(data):
return data
else:
if not hasattr(data, 'dtype'):
data = Series(data)
if other is not None:
if not hasattr(other, 'dtype'):
other = Series(other)
if data.dtype in numeric_types:
if other is not None and other.dtype in numeric_types:
if groups is not None:
return Vector(data, other=other, groups=groups, labels=labels)
else:
return Vector(data, other=other, labels=labels)
else:
if groups is not None:
return Vector(data, groups=groups, labels=labels)
else:
return Vector(data, labels=labels)
else:
return Categorical(data)
def analyse(xdata, ydata=None, groups=None, labels=None, **kwargs):
"""
Alias for analyze.
Parameters
----------
xdata : array-like
The primary set of data.
ydata : array-like
The response or secondary set of data.
groups : array-like
The group names used for location testing or Bivariate analysis.
labels : array-like or None
The sequence of data point labels.
alpha : float
The sensitivity to use for hypothesis tests.
Returns
-------
xdata, ydata : tuple(array-like, array-like)
The input xdata and ydata.
Notes
-----
xdata : array-like(num), ydata : None --- Distribution
xdata : array-like(str), ydata : None --- Frequencies
xdata : array-like(num), ydata : array-like(num) --- Bivariate
xdata : array-like(num), ydata : array-like(num), groups : array-like --- Group Bivariate
xdata : list(array-like(num)), ydata : None --- Location Test(unstacked)
xdata : list(array-like(num)), ydata : None, groups : array-like --- Location Test(unstacked)
xdata : dict(array-like(num)), ydata : None --- Location Test(unstacked)
xdata : array-like(num), ydata : None, groups : array-like --- Location Test(stacked)
"""
return analyze(xdata, ydata=ydata, groups=groups, labels=labels, **kwargs)
def analyze(xdata, ydata=None, groups=None, labels=None, alpha=0.05, **kwargs):
"""
Automatically performs a statistical analysis based on the input arguments.
Parameters
----------
xdata : array-like
The primary set of data.
ydata : array-like
The response or secondary set of data.
groups : array-like
The group names used for location testing or Bivariate analysis.
labels : array-like or None
The sequence of data point labels.
alpha : float
The sensitivity to use for hypothesis tests.
Returns
-------
xdata, ydata : tuple(array-like, array-like)
The input xdata and ydata.
Notes
-----
xdata : array-like(num), ydata : None --- Distribution
xdata : array-like(str), ydata : None --- Frequencies
xdata : array-like(num), ydata : array-like(num) --- Bivariate
xdata : array-like(num), ydata : array-like(num), groups : array-like --- Group Bivariate
xdata : list(array-like(num)), ydata : None --- Location Test(unstacked)
xdata : list(array-like(num)), ydata : None, groups : array-like --- Location Test(unstacked)
xdata : dict(array-like(num)), ydata : None --- Location Test(unstacked)
xdata : array-like(num), ydata : None, groups : array-like --- Location Test(stacked)
"""
from ..graphs import GraphHisto, GraphScatter, GraphBoxplot, GraphFrequency, GraphGroupScatter
from ..data import (is_dict, is_iterable, is_group, is_dict_group, is_vector)
from .exc import NoDataError
debug = True if 'debug' in kwargs else False
tested = list()
if xdata is None:
raise ValueError("xdata was not provided.")
if not is_iterable(xdata):
raise TypeError("xdata is not an array.")
if len(xdata) == 0:
raise NoDataError("No data was passed to analyze")
# Compare Group Means and Variance
if is_group(xdata) or is_dict_group(xdata):
tested.append('Oneway')
if is_dict(xdata):
if groups is not None:
GraphBoxplot(xdata, groups=groups, **kwargs)
else:
GraphBoxplot(xdata, **kwargs)
groups = list(xdata.keys())
xdata = list(xdata.values())
else:
if groups is not None:
GraphBoxplot(*xdata, groups=groups, **kwargs)
else:
GraphBoxplot(*xdata, **kwargs)
out_stats = GroupStatistics(*xdata, groups=groups, display=False)
# Show the box plot and stats
print(out_stats)
if len(xdata) == 2:
norm = NormTest(*xdata, alpha=alpha, display=False)
if norm.p_value > alpha:
TTest(xdata[0], xdata[1], alpha=alpha)
tested.append('TTest')
elif len(xdata[0]) > 20 and len(xdata[1]) > 20:
MannWhitney(xdata[0], xdata[1], alpha=alpha)
tested.append('MannWhitney')
else:
TwoSampleKSTest(xdata[0], xdata[1], alpha=alpha)
tested.append('TwoSampleKSTest')
else:
e = EqualVariance(*xdata, alpha=alpha)
# If normally distributed and variances are equal, perform one-way ANOVA
# Otherwise, perform a non-parametric Kruskal-Wallis test
if e.test_type == 'Bartlett' and e.p_value > alpha:
Anova(*xdata, alpha=alpha)
tested.append('Anova')
else:
Kruskal(*xdata, alpha=alpha)
tested.append('Kruskal')
return tested if debug else None
if ydata is not None:
_data = determine_analysis_type(xdata, other=ydata, groups=groups, labels=labels)
else:
_data = determine_analysis_type(xdata, groups=groups, labels=labels)
if is_vector(_data) and not _data.other.empty:
# Correlation and Linear Regression
if len(_data.groups) > 1:
tested.append('Group Bivariate')
# Show the scatter plot, correlation and regression stats
GraphGroupScatter(_data, **kwargs)
GroupLinearRegression(_data, alpha=alpha)
GroupCorrelation(_data, alpha=alpha)
return tested if debug else None
else:
tested.append('Bivariate')
# Show the scatter plot, correlation and regression stats
GraphScatter(_data, **kwargs)
LinearRegression(_data, alpha=alpha)
Correlation(_data, alpha=alpha)
return tested if debug else None
elif is_vector(_data) and len(_data.groups) > 1:
# Compare Stacked Group Means and Variance
tested.append('Stacked Oneway')
# Show the box plot and stats
out_stats = GroupStatisticsStacked(_data, display=False)
GraphBoxplot(_data, gmean=out_stats.gmean, gmedian=out_stats.gmedian, **kwargs)
print(out_stats)
group_data = tuple(_data.groups.values())
if len(group_data) == 2:
norm = NormTest(*group_data, alpha=alpha, display=False)
if norm.p_value > alpha:
TTest(*group_data)
tested.append('TTest')
elif len(group_data[0]) > 20 and len(group_data[1]) > 20:
MannWhitney(*group_data)
tested.append('MannWhitney')
else:
TwoSampleKSTest(*group_data)
tested.append('TwoSampleKSTest')
else:
e = EqualVariance(*group_data, alpha=alpha)
if e.test_type == 'Bartlett' and e.p_value > alpha:
Anova(*group_data, alpha=alpha)
tested.append('Anova')
else:
Kruskal(*group_data, alpha=alpha)
tested.append('Kruskal')
return tested if debug else None
else:
# Histogram and Basic Stats or Categories and Frequencies
if is_vector(_data):
tested.append('Distribution')
# Show the histogram and stats
out_stats = VectorStatistics(_data, sample=kwargs.get('sample', False), display=False)
if 'distribution' in kwargs:
distro = kwargs['distribution']
distro_class = getattr(
__import__(
'scipy.stats',
globals(),
locals(),
[distro],
0,
),
distro,
)
parms = distro_class.fit(xdata)
fit = KSTest(xdata, distribution=distro, parms=parms, alpha=alpha, display=False)
tested.append('KSTest')
else:
fit = NormTest(xdata, alpha=alpha, display=False)
tested.append('NormTest')
GraphHisto(_data, mean=out_stats.mean, std_dev=out_stats.std_dev, **kwargs)
print(out_stats)
print(fit)
return tested if debug else None
else:
tested.append('Frequencies')
# Show the histogram and stats
GraphFrequency(_data, **kwargs)
CategoricalStatistics(xdata, **kwargs)
return tested if debug else None | /sci_analysis-2.2.1rc0.tar.gz/sci_analysis-2.2.1rc0/sci_analysis/analysis/__init__.py | 0.898785 | 0.709227 | __init__.py | pypi |
import warnings
import six
from math import sqrt, fabs
# matplotlib imports
from matplotlib.pyplot import (
show, subplot, yticks, xlabel, ylabel, figure, setp, savefig, close, xticks, subplots_adjust
)
from matplotlib.gridspec import GridSpec
from matplotlib.patches import Circle
# Numpy imports
from numpy import (
polyfit, polyval, sort, arange, array, linspace, mgrid, vstack, std, sum, mean, median
)
# Scipy imports
from scipy.stats import probplot, gaussian_kde, t
# local imports
from .base import Graph
from ..data import Vector, is_dict, is_group, is_vector
from ..analysis.exc import NoDataError
def future(message):
warnings.warn(message, FutureWarning, stacklevel=2)
class VectorGraph(Graph):
def __init__(self, sequence, **kwargs):
"""Converts the data argument to a Vector object and sets it to the Graph
object's vector member. Sets the xname and yname arguments as the axis
labels. The default values are "x" and "y".
"""
if is_vector(sequence):
super(VectorGraph, self).__init__(sequence, **kwargs)
else:
super(VectorGraph, self).__init__(Vector(sequence), **kwargs)
if len(self._data.groups.keys()) == 0:
raise NoDataError("Cannot draw graph because there is no data.")
self.draw()
def draw(self):
"""
Prepares and displays the graph based on the set class members.
"""
raise NotImplementedError
class GraphHisto(VectorGraph):
"""Draws a histogram.
New class members are bins, color and box_plot. The bins member is the number
of histogram bins to draw. The color member is the color of the histogram area.
The box_plot member is a boolean flag for whether to draw the corresponding
box plot.
"""
_xsize = 5
_ysize = 4
def __init__(self, data, **kwargs):
"""GraphHisto constructor.
:param data: The data to be graphed.
:param _bins: The number of histogram bins to draw. This arg sets the bins member.
:param _name: The optional x-axis label.
:param _distribution: The theoretical distribution to fit.
:param _box_plot: Toggle the display of the optional boxplot.
:param _cdf: Toggle the display of the optional cumulative density function plot.
:param _fit: Toggle the display of the best fit line for the specified distribution.
:param _mean: The mean to be displayed on the graph title.
:param _std: The standard deviation to be displayed on the graph title.
:param _sample: Sets x-bar and s if true, else mu and sigma for displaying on the graph title.
:param _title: The title of the graph.
:param _save_to: Save the graph to the specified path.
:return: pass
"""
self._bins = kwargs.get('bins', 20)
self._distribution = kwargs.get('distribution', 'norm')
self._box_plot = kwargs.get('boxplot', True)
self._cdf = kwargs.get('cdf', False)
self._fit = kwargs.get('fit', False)
self._mean = kwargs.get('mean')
self._std = kwargs.get('std_dev')
self._sample = kwargs.get('sample', False)
self._title = kwargs.get('title', 'Distribution')
self._save_to = kwargs.get('save_to')
yname = kwargs.get('yname', 'Probability')
name = kwargs.get('name') or kwargs.get('xname') or 'Data'
super(GraphHisto, self).__init__(data, xname=name, yname=yname)
def fit_distro(self):
"""
Calculate the fit points for a specified distribution.
Returns
-------
fit_parms : tuple
First value - The x-axis points
Second value - The pdf y-axis points
Third value - The cdf y-axis points
"""
distro_class = getattr(
__import__(
'scipy.stats',
globals(),
locals(),
[self._distribution],
0,
),
self._distribution
)
parms = distro_class.fit(self._data.data)
distro = linspace(distro_class.ppf(0.001, *parms), distro_class.ppf(0.999, *parms), 100)
distro_pdf = distro_class.pdf(distro, *parms)
distro_cdf = distro_class.cdf(distro, *parms)
return distro, distro_pdf, distro_cdf
def calc_cdf(self):
"""
Calcuate the cdf points.
Returns
-------
coordinates : tuple
First value - The cdf x-axis points
Second value - The cdf y-axis points
"""
x_sorted_vector = sort(self._data.data)
if len(x_sorted_vector) == 0:
return 0, 0
y_sorted_vector = arange(len(x_sorted_vector) + 1) / float(len(x_sorted_vector))
x_cdf = array([x_sorted_vector, x_sorted_vector]).T.flatten()
y_cdf = array([y_sorted_vector[:(len(y_sorted_vector)-1)], y_sorted_vector[1:]]).T.flatten()
return x_cdf, y_cdf
def draw(self):
"""
Draws the histogram based on the set parameters.
Returns
-------
pass
"""
# Setup the grid variables
histo_span = 3
box_plot_span = 1
cdf_span = 3
h_ratios = [histo_span]
p = []
if self._box_plot:
self._ysize += 0.5
self._nrows += 1
h_ratios.insert(0, box_plot_span)
if self._cdf:
self._ysize += 2
self._nrows += 1
h_ratios.insert(0, cdf_span)
# Create the figure and grid spec
f = figure(figsize=(self._xsize, self._ysize))
gs = GridSpec(self._nrows, self._ncols, height_ratios=h_ratios, hspace=0)
# Set the title
title = self._title
if self._mean and self._std:
if self._sample:
title = r"{}{}$\bar x = {:.4f}, s = {:.4f}$".format(title, "\n", self._mean, self._std)
else:
title = r"{}{}$\mu = {:.4f}$, $\sigma = {:.4f}$".format(title, "\n", self._mean, self._std)
f.suptitle(title, fontsize=14)
# Adjust the bin size if it's greater than the vector size
if len(self._data.data) < self._bins:
self._bins = len(self._data.data)
# Fit the distribution
if self._fit:
distro, distro_pdf, distro_cdf = self.fit_distro()
else:
distro, distro_pdf, distro_cdf = None, None, None
# Draw the cdf
if self._cdf:
x_cdf, y_cdf = self.calc_cdf()
ax_cdf = subplot(gs[0])
ax_cdf.plot(x_cdf, y_cdf, 'k-')
ax_cdf.xaxis.grid(True, linestyle='-', which='major', color='grey', alpha=0.75)
ax_cdf.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=0.75)
p.append(ax_cdf.get_xticklabels())
if self._fit:
ax_cdf.plot(distro, distro_cdf, 'r--', linewidth=2)
yticks(arange(11) * 0.1)
ylabel("Cumulative Probability")
else:
ax_cdf = None
# Draw the box plot
if self._box_plot:
if self._cdf:
ax_box = subplot(gs[len(h_ratios) - 2], sharex=ax_cdf)
else:
ax_box = subplot(gs[len(h_ratios) - 2])
bp = ax_box.boxplot(self._data.data, vert=False, showmeans=True)
setp(bp['boxes'], color='k')
setp(bp['whiskers'], color='k')
vp = ax_box.violinplot(self._data.data, vert=False, showextrema=False, showmedians=False, showmeans=False)
setp(vp['bodies'], facecolors=self.get_color(0))
ax_box.xaxis.grid(True, linestyle='-', which='major', color='grey', alpha=0.75)
yticks([])
p.append(ax_box.get_xticklabels())
ax_hist = subplot(gs[len(h_ratios) - 1], sharex=ax_box)
else:
ax_hist = subplot(gs[len(h_ratios) - 1])
# Draw the histogram
# First try to use the density arg which replaced normed (which is now depricated) in matplotlib 2.2.2
try:
ax_hist.hist(self._data.data, self._bins, density=True, color=self.get_color(0), zorder=0)
except TypeError:
ax_hist.hist(self._data.data, self._bins, normed=True, color=self.get_color(0), zorder=0)
ax_hist.xaxis.grid(True, linestyle='-', which='major', color='grey', alpha=0.75)
ax_hist.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=0.75)
if self._fit:
ax_hist.plot(distro, distro_pdf, 'r--', linewidth=2)
if len(p) > 0:
setp(p, visible=False)
# set the labels and display the figure
ylabel(self._yname)
xlabel(self._xname)
if self._save_to:
savefig(self._save_to)
close(f)
else:
show()
pass
class GraphScatter(VectorGraph):
"""Draws an x-by-y scatter plot.
Unique class members are fit and style. The fit member is a boolean flag for
whether to draw the linear best fit line. The style member is a tuple of
formatted strings that set the matplotlib point style and line style. It is
also worth noting that the vector member for the GraphScatter class is a
tuple of xdata and ydata.
"""
_nrows = 1
_ncols = 1
_xsize = 6
_ysize = 5
def __init__(self, xdata, ydata=None, **kwargs):
"""GraphScatter constructor.
:param xdata: The x-axis data.
:param ydata: The y-axis data.
:param fit: Display the optional line fit.
:param points: Display the scatter points.
:param contours: Display the density contours
:param boxplot_borders: Display the boxplot borders
:param highlight: an array-like with points to highlight based on labels
:param labels: a vector object with the graph labels
:param title: The title of the graph.
:param save_to: Save the graph to the specified path.
:return: pass
"""
self._fit = kwargs.get('fit', True)
self._points = kwargs.get('points', True)
self._labels = kwargs.get('labels', None)
self._highlight = kwargs.get('highlight', None)
self._contours = kwargs.get('contours', False)
self._contour_props = (31, 1.1)
self._boxplot_borders = kwargs.get('boxplot_borders', False)
self._title = kwargs['title'] if 'title' in kwargs else 'Bivariate'
self._save_to = kwargs.get('save_to', None)
yname = kwargs.get('yname', 'y Data')
xname = kwargs.get('xname', 'x Data')
if ydata is None:
if is_vector(xdata):
super(GraphScatter, self).__init__(xdata, xname=xname, yname=yname)
else:
raise AttributeError('ydata argument cannot be None.')
else:
super(GraphScatter, self).__init__(
Vector(xdata, other=ydata, labels=self._labels),
xname=xname,
yname=yname,
)
def calc_contours(self):
"""
Calculates the density contours.
Returns
-------
contour_parms : tuple
First value - x-axis points
Second value - y-axis points
Third value - z-axis points
Fourth value - The contour levels
"""
xmin = self._data.data.min()
xmax = self._data.data.max()
ymin = self._data.other.min()
ymax = self._data.other.max()
values = vstack([self._data.data, self._data.other])
kernel = gaussian_kde(values)
_x, _y = mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = vstack([_x.ravel(), _y.ravel()])
_z = kernel.evaluate(positions).T.reshape(_x.shape)
return _x, _y, _z, arange(_z.min(), _z.max(), (_z.max() - _z.min()) / self._contour_props[0])
def calc_fit(self):
"""
Calculates the best fit line using sum of squares.
Returns
-------
fit_coordinates : list
A list of the min and max fit points.
"""
x = self._data.data
y = self._data.other
p = polyfit(x, y, 1)
fit = polyval(p, x)
if p[0] > 0:
return (x.min(), x.max()), (fit.min(), fit.max())
else:
return (x.min(), x.max()), (fit.max(), fit.min())
def draw(self):
"""
Draws the scatter plot based on the set parameters.
Returns
-------
pass
"""
# Setup the grid variables
x = self._data.data
y = self._data.other
h_ratio = [1, 1]
w_ratio = [1, 1]
# Setup the figure and gridspec
if self._boxplot_borders:
self._nrows, self._ncols = 2, 2
self._xsize = self._xsize + 0.5
self._ysize = self._ysize + 0.5
h_ratio, w_ratio = (1.5, 5.5), (5.5, 1.5)
main_plot = 2
else:
main_plot = 0
# Setup the figure
f = figure(figsize=(self._xsize, self._ysize))
f.suptitle(self._title, fontsize=14)
if self._boxplot_borders:
gs = GridSpec(self._nrows, self._ncols, height_ratios=h_ratio, width_ratios=w_ratio, hspace=0, wspace=0)
else:
gs = GridSpec(self._nrows, self._ncols)
ax1 = None
ax3 = None
# Draw the boxplot borders
if self._boxplot_borders:
ax1 = subplot(gs[0])
ax3 = subplot(gs[3])
bpx = ax1.boxplot(x, vert=False, showmeans=True)
bpy = ax3.boxplot(y, vert=True, showmeans=True)
setp(bpx['boxes'], color='k')
setp(bpx['whiskers'], color='k')
setp(bpy['boxes'], color='k')
setp(bpy['whiskers'], color='k')
vpx = ax1.violinplot(x, vert=False, showmedians=False, showmeans=False, showextrema=False)
vpy = ax3.violinplot(y, vert=True, showmedians=False, showmeans=False, showextrema=False)
setp(vpx['bodies'], facecolors=self.get_color(0))
setp(vpy['bodies'], facecolors=self.get_color(0))
ax1.xaxis.grid(True, linestyle='-', which='major', color='grey', alpha=0.75)
ax3.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=0.75)
setp(
[
ax1.get_xticklabels(), ax1.get_yticklabels(), ax3.get_xticklabels(), ax3.get_yticklabels()
], visible=False
)
# Draw the main graph
ax2 = subplot(gs[main_plot], sharex=ax1, sharey=ax3)
# Draw the points
if self._points:
# A 2-D array needs to be passed to prevent matplotlib from applying the default cmap if the size < 4.
color = (self.get_color(0),)
alpha_trans = 0.7
if self._highlight is not None:
# Find index of the labels which are in the highlight list
labelmask = self._data.labels.isin(self._highlight)
# Get x and y position of those labels
x_labels = x.loc[labelmask]
y_labels = y.loc[labelmask]
x_nolabels = x.loc[~labelmask]
y_nolabels = y.loc[~labelmask]
ax2.scatter(x_labels, y_labels, c=color, marker='o', linewidths=0, alpha=alpha_trans, zorder=1)
ax2.scatter(x_nolabels, y_nolabels, c=color, marker='o', linewidths=0, alpha=.2, zorder=1)
for k in self._data.labels[labelmask].index:
ax2.annotate(self._data.labels[k], xy=(x[k], y[k]), alpha=1, color=color[0])
else:
ax2.scatter(x, y, c=color, marker='o', linewidths=0, alpha=alpha_trans, zorder=1)
# Draw the contours
if self._contours:
x_prime, y_prime, z, levels = self.calc_contours()
ax2.contour(x_prime, y_prime, z, levels, linewidths=self._contour_props[1], nchunk=16,
extend='both', zorder=2)
# Draw the fit line
if self._fit:
fit_x, fit_y = self.calc_fit()
ax2.plot(fit_x, fit_y, 'r--', linewidth=2, zorder=3)
# Draw the grid lines and labels
ax2.xaxis.grid(True, linestyle='-', which='major', color='grey', alpha=0.75)
ax2.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=0.75)
xlabel(self._xname)
ylabel(self._yname)
# Save the figure to disk or display
if self._save_to:
savefig(self._save_to)
close(f)
else:
show()
pass
class GraphGroupScatter(VectorGraph):
"""Draws an x-by-y scatter plot with more than a single group.
Unique class members are fit and style. The fit member is a boolean flag for
whether to draw the linear best fit line. The style member is a tuple of
formatted strings that set the matplotlib point style and line style. It is
also worth noting that the vector member for the GraphScatter class is a
tuple of xdata and ydata.
"""
_nrows = 1
_ncols = 1
_xsize = 6
_ysize = 5
def __init__(self, xdata, ydata=None, groups=None, **kwargs):
"""GraphScatter constructor.
:param xdata: The x-axis data.
:param ydata: The y-axis data.
:param _fit: Display the optional line fit.
:param _highlight: Give list of groups to highlight in scatter.
:param _points: Display the scatter points.
:param _contours: Display the density contours
:param _boxplot_borders: Display the boxplot borders
:param _labels: a vector object with the graph labels
:param _title: The title of the graph.
:param _save_to: Save the graph to the specified path.
:return: pass
"""
self._fit = kwargs['fit'] if 'fit' in kwargs else True
self._points = kwargs['points'] if 'points' in kwargs else True
self._labels = kwargs['labels'] if 'labels' in kwargs else None
self._highlight = kwargs['highlight'] if 'highlight' in kwargs else None
self._boxplot_borders = kwargs['boxplot_borders'] if 'boxplot_borders' in kwargs else True
self._title = kwargs['title'] if 'title' in kwargs else 'Group Bivariate'
self._save_to = kwargs['save_to'] if 'save_to' in kwargs else None
yname = kwargs['yname'] if 'yname' in kwargs else 'y Data'
xname = kwargs['xname'] if 'xname' in kwargs else 'x Data'
if ydata is None:
if is_vector(xdata):
super(GraphGroupScatter, self).__init__(xdata, xname=xname, yname=yname)
else:
raise AttributeError('ydata argument cannot be None.')
else:
super(GraphGroupScatter, self).__init__(Vector(
xdata,
other=ydata,
groups=groups,
labels=self._labels
), xname=xname, yname=yname)
@staticmethod
def calc_fit(x, y):
"""
Calculates the best fit line using sum of squares.
Returns
-------
fit_coordinates : list
A list of the min and max fit points.
"""
p = polyfit(x, y, 1)
fit = polyval(p, x)
if p[0] > 0:
return (x.min(), x.max()), (fit.min(), fit.max())
else:
return (x.min(), x.max()), (fit.max(), fit.min())
def draw(self):
"""
Draws the scatter plot based on the set parameters.
Returns
-------
pass
"""
# Setup the grid variables
x = self._data.data
y = self._data.other
groups = sorted(self._data.groups.keys())
h_ratio = [1, 1]
w_ratio = [1, 1]
# Setup the figure and gridspec
if self._boxplot_borders:
self._nrows, self._ncols = 2, 2
self._xsize = self._xsize + 0.5
self._ysize = self._ysize + 0.5
h_ratio, w_ratio = (1.5, 5.5), (5.5, 1.5)
main_plot = 2
else:
main_plot = 0
# Setup the figure
f = figure(figsize=(self._xsize, self._ysize))
f.suptitle(self._title, fontsize=14)
if self._boxplot_borders:
gs = GridSpec(self._nrows, self._ncols, height_ratios=h_ratio, width_ratios=w_ratio, hspace=0, wspace=0)
else:
gs = GridSpec(self._nrows, self._ncols)
ax1 = None
ax3 = None
# Draw the boxplot borders
if self._boxplot_borders:
ax1 = subplot(gs[0])
ax3 = subplot(gs[3])
bpx = ax1.boxplot(x, vert=False, showmeans=True)
bpy = ax3.boxplot(y, vert=True, showmeans=True)
setp(bpx['boxes'], color='k')
setp(bpx['whiskers'], color='k')
setp(bpy['boxes'], color='k')
setp(bpy['whiskers'], color='k')
vpx = ax1.violinplot(x, vert=False, showmedians=False, showmeans=False, showextrema=False)
vpy = ax3.violinplot(y, vert=True, showmedians=False, showmeans=False, showextrema=False)
setp(vpx['bodies'], facecolors=self.get_color(0))
setp(vpy['bodies'], facecolors=self.get_color(0))
ax1.xaxis.grid(True, linestyle='-', which='major', color='grey', alpha=0.75)
ax3.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=0.75)
setp([ax1.get_xticklabels(), ax1.get_yticklabels(), ax3.get_xticklabels(), ax3.get_yticklabels()],
visible=False)
# Draw the main graph
ax2 = subplot(gs[main_plot], sharex=ax1, sharey=ax3)
for grp, (grp_x, grp_y) in self._data.paired_groups.items():
i = groups.index(grp)
alpha_trans = 0.65
if self._highlight is not None:
try:
if grp not in self._highlight:
alpha_trans = 0.2
except TypeError:
pass
if isinstance(grp, six.string_types) and len(grp) > 20:
grp = grp[0:21] + '...'
# Draw the points
if self._points:
# A 2-D array needs to be passed to prevent matplotlib from applying the default cmap if the size < 4.
color = (self.get_color(i),)
scatter_kwargs = dict(
c=color,
marker='o',
linewidths=0,
zorder=1,
)
# Draw the point labels
if self._data.has_labels and self._highlight is not None:
# If a group is in highlights and labels are also given
if grp in self._highlight:
scatter_kwargs.update(
dict(
alpha=alpha_trans,
label=grp
)
)
ax2.scatter(grp_x, grp_y, **scatter_kwargs)
# Highlight the specified labels
else:
labelmask = self._data.group_labels[grp].isin(self._highlight)
# Get x and y position of those labels
x_labels = grp_x.loc[labelmask]
y_labels = grp_y.loc[labelmask]
x_nolabels = grp_x.loc[~labelmask]
y_nolabels = grp_y.loc[~labelmask]
scatter_kwargs.update(
dict(
alpha=0.65,
label=grp if any(labelmask) else None,
)
)
ax2.scatter(x_labels, y_labels, **scatter_kwargs)
scatter_kwargs.update(
dict(
alpha=0.2,
label=None if any(labelmask) else grp,
)
)
ax2.scatter(x_nolabels, y_nolabels, **scatter_kwargs)
# Add the annotations
for k in self._data.group_labels[grp][labelmask].index:
clr = color[0]
ax2.annotate(self._data.group_labels[grp][k], xy=(grp_x[k], grp_y[k]), alpha=1, color=clr)
else:
scatter_kwargs.update(
dict(
alpha=alpha_trans,
label=grp,
)
)
ax2.scatter(grp_x, grp_y, **scatter_kwargs)
# Draw the fit line
if self._fit:
fit_x, fit_y = self.calc_fit(grp_x, grp_y)
if self._points:
ax2.plot(fit_x, fit_y, linestyle='--', color=self.get_color(i), linewidth=2, zorder=2)
else:
ax2.plot(fit_x, fit_y, linestyle='--', color=self.get_color(i), linewidth=2, zorder=2, label=grp)
# Draw the legend
if (self._fit or self._points) and len(groups) > 1:
ax2.legend(loc='best')
# Draw the grid lines and labels
ax2.xaxis.grid(True, linestyle='-', which='major', color='grey', alpha=0.75)
ax2.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=0.75)
xlabel(self._xname)
ylabel(self._yname)
# Save the figure to disk or display
if self._save_to:
savefig(self._save_to)
close(f)
else:
show()
pass
class GraphBoxplot(VectorGraph):
"""Draws box plots of the provided data as well as an optional probability plot.
Unique class members are groups, nqp and prob. The groups member is a list of
labels for each boxplot. If groups is an empty list, sequentially ascending
numbers are used for each boxplot. The nqp member is a flag that turns the
probability plot on or off. The prob member is a list of tuples that contains
the data used to graph the probability plot. It is also worth noting that the
vector member for the GraphBoxplot is a list of lists that contain the data
for each boxplot.
"""
_nrows = 1
_ncols = 1
_xsize = 5.75
_ysize = 5
_default_alpha = 0.05
def __init__(self, *args, **kwargs):
"""GraphBoxplot constructor. NOTE: If vectors is a dict, the boxplots are
graphed in random order instead of the provided order.
:param groups: An optional list of boxplot labels. The order should match the order in vectors.
:param nqp: Display the optional probability plot.
:param _title: The title of the graph.
:param _save_to: Save the graph to the specified path.
:return: pass
"""
name = kwargs['name'] if 'name' in kwargs else 'Values'
categories = kwargs['categories'] if 'categories' in kwargs else 'Categories'
xname = kwargs['xname'] if 'xname' in kwargs else categories
yname = kwargs['yname'] if 'yname' in kwargs else name
self._title = kwargs['title'] if 'title' in kwargs else 'Oneway'
self._nqp = kwargs['nqp'] if 'nqp' in kwargs else True
self._save_to = kwargs['save_to'] if 'save_to' in kwargs else None
self._gmean = kwargs['gmean'] if 'gmean' in kwargs else True
self._gmedian = kwargs['gmedian'] if 'gmedian' in kwargs else True
self._circles = kwargs['circles'] if 'circles' in kwargs else True
self._alpha = kwargs['alpha'] if 'alpha' in kwargs else self._default_alpha
if 'title' in kwargs:
self._title = kwargs['title']
elif self._nqp:
self._title = 'Oneway and Normal Quantile Plot'
else:
self._title = 'Oneway'
if is_vector(args[0]):
data = args[0]
elif is_dict(args[0]):
data = Vector()
for g, d in args[0].items():
data.append(Vector(d, groups=[g] * len(d)))
else:
if is_group(args) and len(args) > 1:
future('Graphing boxplots by passing multiple arguments will be removed in a future version. '
'Instead, pass unstacked arguments as a dictionary.')
data = Vector()
if 'groups' in kwargs:
if len(kwargs['groups']) != len(args):
raise AttributeError('The length of passed groups does not match the number passed data.')
for g, d in zip(kwargs['groups'], args):
data.append(Vector(d, groups=[g] * len(d)))
else:
for d in args:
data.append(Vector(d))
else:
if 'groups' in kwargs:
if len(kwargs['groups']) != len(args[0]):
raise AttributeError('The length of passed groups does not match the number passed data.')
data = Vector(args[0], groups=kwargs['groups'])
else:
data = Vector(args[0])
super(GraphBoxplot, self).__init__(data, xname=xname, yname=yname, save_to=self._save_to)
@staticmethod
def grand_mean(data):
return mean([mean(sample) for sample in data])
@staticmethod
def grand_median(data):
return median([median(sample) for sample in data])
def tukey_circles(self, data):
num = []
den = []
crit = []
radii = []
xbar = []
for sample in data:
df = len(sample) - 1
num.append(std(sample, ddof=1) ** 2 * df)
den.append(df)
crit.append(t.ppf(1 - self._alpha, df))
mse = sum(num) / sum(den)
for i, sample in enumerate(data):
radii.append(fabs(crit[i]) * sqrt(mse / len(sample)))
xbar.append(mean(sample))
return tuple(zip(xbar, radii))
def draw(self):
"""Draws the boxplots based on the set parameters."""
# Setup the grid variables
w_ratio = [1]
if self._circles:
w_ratio = [4, 1]
self._ncols += 1
if self._nqp:
w_ratio.append(4 if self._circles else 1)
self._ncols += 1
groups, data = zip(*[
(g, v['ind'].reset_index(drop=True)) for g, v in self._data.values.groupby('grp') if not v.empty]
)
# Create the quantile plot arrays
prob = [probplot(v) for v in data]
# Create the figure and gridspec
if self._nqp and len(prob) > 0:
self._xsize *= 2
f = figure(figsize=(self._xsize, self._ysize))
f.suptitle(self._title, fontsize=14)
gs = GridSpec(self._nrows, self._ncols, width_ratios=w_ratio, wspace=0)
# Draw the boxplots
ax1 = subplot(gs[0])
bp = ax1.boxplot(data, showmeans=True, labels=groups)
setp(bp['boxes'], color='k')
setp(bp['whiskers'], color='k')
vp = ax1.violinplot(data, showextrema=False, showmedians=False, showmeans=False)
for i in range(len(groups)):
setp(vp['bodies'][i], facecolors=self.get_color(i))
ax1.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=0.75)
if self._gmean:
ax1.axhline(float(self.grand_mean(data)), c='k', linestyle='--', alpha=0.4)
if self._gmedian:
ax1.axhline(float(self.grand_median(data)), c='k', linestyle=':', alpha=0.4)
if any([True if len(str(g)) > 9 else False for g in groups]) or len(groups) > 5:
xticks(rotation=60)
subplots_adjust(bottom=0.2)
ylabel(self._yname)
xlabel(self._xname)
# Draw the Tukey-Kramer circles
if self._circles:
ax2 = subplot(gs[1], sharey=ax1)
for i, (center, radius) in enumerate(self.tukey_circles(data)):
c = Circle((0.5, center), radius=radius, facecolor='none', edgecolor=self.get_color(i))
ax2.add_patch(c)
# matplotlib 2.2.2 requires adjustable='datalim' to display properly.
ax2.set_aspect('equal', adjustable='datalim')
setp(ax2.get_xticklabels(), visible=False)
setp(ax2.get_yticklabels(), visible=False)
ax2.set_xticks([])
# Draw the normal quantile plot
if self._nqp and len(prob) > 0:
ax3 = subplot(gs[2], sharey=ax1) if self._circles else subplot(gs[1], sharey=ax1)
for i, g in enumerate(prob):
osm = g[0][0]
osr = g[0][1]
slope = g[1][0]
intercept = g[1][1]
ax3.plot(osm, osr, marker='^', color=self.get_color(i), label=groups[i])
ax3.plot(osm, slope * osm + intercept, linestyle='--', linewidth=2, color=self.get_color(i))
ax3.xaxis.grid(True, linestyle='-', which='major', color='grey', alpha=0.75)
ax3.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=0.75)
ax3.legend(loc='best')
xlabel("Quantiles")
setp(ax3.get_yticklabels(), visible=False)
# Save the figure to disk or display
if self._save_to:
savefig(self._save_to)
close(f)
else:
show()
pass | /sci_analysis-2.2.1rc0.tar.gz/sci_analysis-2.2.1rc0/sci_analysis/graphs/vector.py | 0.815673 | 0.537163 | vector.py | pypi |
_colors = (
(0.0, 0.3, 0.7), # blue
(1.0, 0.1, 0.1), # red
(0.0, 0.7, 0.3), # green
(1.0, 0.5, 0.0), # orange
(0.1, 1.0, 1.0), # cyan
(1.0, 1.0, 0.0), # yellow
(1.0, 0.0, 1.0), # magenta
(0.5, 0.0, 1.0), # purple
(0.5, 1.0, 0.0), # light green
(0.0, 0.0, 0.0) # black
)
_color_names = (
'blue',
'red',
'green',
'orange',
'cyan',
'yellow',
'magenta',
'purple',
'light green',
'black'
)
class Graph(object):
"""The super class all other sci_analysis graphing classes descend from.
Classes that descend from Graph should implement the draw method at bare minimum.
Graph members are _nrows, _ncols, _xsize, _ysize, _data, _xname and _yname. The _nrows
member is the number of graphs that will span vertically. The _ncols member is
the number of graphs that will span horizontally. The _xsize member is the horizontal
size of the graph area. The _ysize member is the vertical size of the graph area.
The _data member the data to be plotted. The _xname member is the x-axis label.
The _yname member is the y-axis label.
Parameters
----------
_nrows : int, static
The number of graphs that will span vertically.
_ncols : int, static
The number of graphs that will span horizontally.
_xsize : int, static
The horizontal size of the graph area.
_ysize : int, static
The vertical size of the graph area.
_min_size : int, static
The minimum required length of the data to be graphed.
_xname : str
The x-axis label.
_yname : str
The y-axis label.
_data : Data or list(d1, d2, ..., dn)
The data to graph.
Returns
-------
pass
"""
_nrows = 1
_ncols = 1
_xsize = 5
_ysize = 5
_min_size = 1
def __init__(self, data, **kwargs):
self._xname = kwargs['xname'] if 'xname' in kwargs else 'x'
self._yname = kwargs['yname'] if 'yname' in kwargs else 'y'
self._data = data
def get_color_by_name(self, color='black'):
"""Return a color array based on the string color passed.
Parameters
----------
color : str
A string color name.
Returns
-------
color : tuple
A color tuple that corresponds to the passed color string.
"""
return self.get_color(_color_names.index(color))
@staticmethod
def get_color(num):
"""Return a color based on the given num argument.
Parameters
----------
num : int
A numeric value greater than zero that returns a corresponding color.
Returns
-------
color : tuple
A color tuple calculated from the num argument.
"""
desired_color = []
floor = int(num) // len(_colors)
remainder = int(num) % len(_colors)
selected = _colors[remainder]
if floor > 0:
for value in selected:
desired_color.append(value / (2.0 * floor) + 0.4)
return tuple(desired_color)
else:
return selected
def draw(self):
"""
Prepares and displays the graph based on the set class members.
"""
raise NotImplementedError | /sci_analysis-2.2.1rc0.tar.gz/sci_analysis-2.2.1rc0/sci_analysis/graphs/base.py | 0.840619 | 0.585575 | base.py | pypi |
import math
# matplotlib imports
from matplotlib.pyplot import show, xticks, savefig, close, subplots, subplots_adjust
# local imports
from .base import Graph
from ..data import Categorical, is_group, is_categorical
from ..analysis.exc import MinimumSizeError, NoDataError
class CategoricalGraph(Graph):
def __init__(self, *args, **kwargs):
order = kwargs['order'] if 'order' in kwargs else None
dropna = kwargs['dropna'] if 'dropna' in kwargs else False
seq_name = kwargs['name'] if 'name' in kwargs else None
data = list()
for d in args:
new = d if is_categorical(d) else Categorical(d, name=seq_name, order=order, dropna=dropna)
if new.is_empty():
raise NoDataError('Cannot draw graph because there is no data.')
if len(new) <= self._min_size:
raise MinimumSizeError('Length of data is less than the minimum size {}.'.format(self._min_size))
data.append(new)
if not is_group(data):
raise NoDataError('Cannot draw graph because there is no data.')
if len(data) == 1:
data = data[0]
super(CategoricalGraph, self).__init__(data, **kwargs)
self.draw()
def draw(self):
"""
Prepares and displays the graph based on the set class members.
"""
raise NotImplementedError
class GraphFrequency(CategoricalGraph):
_xsize = 8.5
_ysize = 5.5
def __init__(self, data, **kwargs):
self._percent = kwargs['percent'] if 'percent' in kwargs else False
self._vertical = kwargs['vertical'] if 'vertical' in kwargs else True
self._grid = kwargs['grid'] if 'grid' in kwargs else False
self._labels = kwargs['labels'] if 'labels' in kwargs else True
self._title = kwargs['title'] if 'title' in kwargs else 'Frequencies'
self._save_to = kwargs['save_to'] if 'save_to' in kwargs else None
order = kwargs['order'] if 'order' in kwargs else None
dropna = kwargs['dropna'] if 'dropna' in kwargs else False
yname = 'Percent' if self._percent else 'Frequency'
name = 'Categories'
if 'name' in kwargs:
name = kwargs['name']
elif 'xname' in kwargs:
name = kwargs['xname']
super(GraphFrequency, self).__init__(data, xname=name, yname=yname, order=order, dropna=dropna)
def add_numeric_labels(self, bars, axis):
if self._vertical:
if len(bars) < 3:
size = 'xx-large'
elif len(bars) < 9:
size = 'x-large'
elif len(bars) < 21:
size = 'large'
elif len(bars) < 31:
size = 'medium'
else:
size = 'small'
for bar in bars:
x_pos = bar.get_width()
y_pos = bar.get_y() + bar.get_height() / 2.
x_off = x_pos + 0.05
adjust = .885 if self._percent else .95
if not self._percent and x_pos != 0:
adjust = adjust - math.floor(math.log10(x_pos)) * .035
label = '{:.1f}'.format(x_pos) if self._percent else '{}'.format(x_pos)
col = 'k'
if x_pos != 0 and (x_off / axis.get_xlim()[1]) > .965 - math.floor(math.log10(x_pos)) * .02:
x_off = x_pos * adjust
col = 'w'
axis.annotate(label,
xy=(x_pos, y_pos),
xytext=(x_off, y_pos),
va='center',
color=col,
size=size)
else:
if len(bars) < 21:
size = 'medium'
elif len(bars) < 31:
size = 'small'
else:
size = 'x-small'
for bar in bars:
y_pos = bar.get_height()
x_pos = bar.get_x() + bar.get_width() / 2.
y_off = y_pos + 0.05
label = '{:.1f}'.format(y_pos) if self._percent else '{}'.format(y_pos)
col = 'k'
if (y_off / axis.get_ylim()[1]) > 0.95:
y_off = y_pos * .95
col = 'w'
axis.annotate(label,
xy=(x_pos, y_pos),
xytext=(x_pos, y_off),
ha='center',
size=size,
color=col)
def draw(self):
freq = self._data.percents if self._percent else self._data.counts
categories = self._data.categories.tolist()
nbars = tuple(range(1, len(freq) + 1))
grid_props = dict(linestyle='-', which='major', color='grey', alpha=0.75)
bar_props = dict(color=self.get_color(0), zorder=3)
# Create the figure and axes
if self._vertical:
f, ax = subplots(figsize=(self._ysize, self._xsize))
else:
f, ax = subplots(figsize=(self._xsize, self._ysize))
# Set the title
f.suptitle(self._title, fontsize=14)
# Create the graph, grid and labels
if self._grid:
ax.xaxis.grid(True, **grid_props) if self._vertical else ax.yaxis.grid(True, **grid_props)
categories = ['{}...'.format(cat[:18]) if len(str(cat)) > 20 else cat for cat in categories]
max_len = max([len(str(cat)) for cat in categories])
offset = max_len / 5 * 0.09
if self._vertical:
bars = ax.barh(nbars, freq.tolist(), **bar_props)
ax.set_xlabel(self._yname)
ax.set_yticks(nbars)
ax.set_yticklabels(categories)
subplots_adjust(left=offset)
ax.invert_yaxis()
else:
bars = ax.bar(nbars, freq.tolist(), **bar_props)
ax.set_ylabel(self._yname)
ax.set_xticks(nbars)
angle = 90 if len(nbars) > 15 else 60
xticks(rotation=angle)
ax.set_xticklabels(categories)
subplots_adjust(bottom=offset)
if self._labels:
self.add_numeric_labels(bars, ax)
# Save the figure to disk or display
if self._save_to:
savefig(self._save_to)
close(f)
else:
show()
pass | /sci_analysis-2.2.1rc0.tar.gz/sci_analysis-2.2.1rc0/sci_analysis/graphs/categorical.py | 0.532182 | 0.290578 | categorical.py | pypi |
import pandas as pd
import numpy as np
# Import from local
from .data import Data, is_data
from .data_operations import flatten, is_iterable
class EmptyVectorError(Exception):
"""
Exception raised when the length of a Vector object is 0.
"""
pass
class UnequalVectorLengthError(Exception):
"""
Exception raised when the length of two Vector objects are not equal, i.e., len(Vector1) != len(Vector2)
"""
pass
def is_numeric(obj):
"""
Test if the passed array_like argument is a sci_analysis Numeric object.
Parameters
----------
obj : object
The input object.
Returns
-------
test result : bool
The test result of whether seq is a sci_analysis Numeric object or not.
"""
return isinstance(obj, Numeric)
def is_vector(obj):
"""
Test if the passed array_like argument is a sci_analysis Vector object.
Parameters
----------
obj : object
The input object.
Returns
-------
test result : bool
The test result of whether seq is a sci_analysis Vector object or not.
"""
return isinstance(obj, Vector)
class Numeric(Data):
"""An abstract class that all Data classes that represent numeric data should inherit from."""
_ind = 'ind'
_dep = 'dep'
_grp = 'grp'
_lbl = 'lbl'
_col_names = (_ind, _dep, _grp, _lbl)
def __init__(self, sequence=None, other=None, groups=None, labels=None, name=None):
"""Takes an array-like object and converts it to a pandas Series with any non-numeric values converted to NaN.
Parameters
----------
sequence : int | list | set | tuple | np.array | pd.Series
The input object
other : list | set | tuple | np.array | pd.Series, optional
The secondary input object
groups : list | set | tuple | np.array | pd.Series, optional
The sequence of group names for sub-arrays
labels : list | set | tuple | np.array | pd.Series, optional
The sequence of data point labels
name : str, optional
The name of the Numeric object
"""
self._auto_groups = True if groups is None else False
self._values = pd.DataFrame([], columns=self._col_names)
if sequence is None:
super(Numeric, self).__init__(v=self._values, n=name)
self._type = None
self._values.loc[:, self._grp] = self._values[self._grp].astype('category')
elif is_data(sequence):
super(Numeric, self).__init__(v=sequence.values, n=name)
self._type = sequence.data_type
self._auto_groups = sequence.auto_groups
elif isinstance(sequence, pd.DataFrame):
raise ValueError('sequence cannot be a pandas DataFrame object. Use a Series instead.')
else:
sequence = pd.to_numeric(self.data_prep(sequence), errors='coerce')
other = pd.to_numeric(self.data_prep(other), errors='coerce') if other is not None else np.nan
groups = self.data_prep(groups) if groups is not None else 1
# TODO: This try block needs some work
try:
self._values[self._ind] = sequence
self._values[self._dep] = other
self._values[self._grp] = groups
self._values.loc[:, self._grp] = self._values[self._grp].astype('category')
if labels is not None:
self._values[self._lbl] = labels
except ValueError:
raise UnequalVectorLengthError('length of data does not match length of other.')
if any(self._values[self._dep].notnull()):
self._values = self.drop_nan_intersect()
else:
self._values = self.drop_nan()
self._type = self._values[self._ind].dtype
self._name = name
@staticmethod
def data_prep(seq):
"""
Converts the values of _name to conform to the Data object standards.
Parameters
----------
seq : array-like
The input array to be prepared.
Returns
-------
data : np.array
The enclosed data represented as a numpy array.
"""
if hasattr(seq, 'shape'):
if len(seq.shape) > 1:
return flatten(seq)
else:
return seq
else:
return flatten(seq)
def drop_nan(self):
"""
Removes NaN values from the Numeric object and returns the resulting pandas Series. The length of the returned
object is the original object length minus the number of NaN values removed from the object.
Returns
-------
arr : pandas.Series
A copy of the Numeric object's internal Series with all NaN values removed.
"""
return self._values.dropna(how='any', subset=[self._ind])
def drop_nan_intersect(self):
"""
Removes the value from the internal Vector object and seq at i where i is nan in the internal Vector object or
seq.
Returns
-------
arr : pandas.DataFrame
A copy of the Numeric object's internal DataFrame with all nan values removed.
"""
return self._values.dropna(how='any', subset=[self._ind, self._dep])
def drop_groups(self, grps):
"""Drop the specified group name from the Numeric object.
Parameters
----------
grps : str|int|list[str]|list[int]
The name of the group to remove.
Returns
-------
arr : pandas.DataFrame
A copy of the Numeric object's internal DataFrame with all records belonging to the specified group removed.
"""
if not is_iterable(grps):
grps = [grps]
dropped = self._values.query("{} not in {}".format(self._grp, grps)).copy()
dropped[self._grp] = dropped[self._grp].cat.remove_categories(grps)
self._values = dropped
return dropped
@property
def data_type(self):
return self._type
@property
def data(self):
return self._values[self._ind]
@property
def other(self):
return pd.Series([]) if all(self._values[self._dep].isnull()) else self._values[self._dep]
@property
def groups(self):
groups = self._values.groupby(self._grp)
return {grp: seq[self._ind].rename(grp) for grp, seq in groups if not seq.empty}
@property
def labels(self):
return self._values[self._lbl].fillna('None')
@property
def paired_groups(self):
groups = self._values.groupby(self._grp)
return {grp: (df[self._ind], df[self._dep]) for grp, df in groups if not df.empty}
@property
def group_labels(self):
groups = self._values.groupby(self._grp)
return {grp: df[self._lbl] for grp, df in groups if not df.empty}
@property
def values(self):
return self._values
@property
def auto_groups(self):
return self._auto_groups
@property
def has_labels(self):
return any(pd.notna(self._values[self._lbl]))
class Vector(Numeric):
"""
The sci_analysis representation of continuous, numeric data.
"""
def __init__(self, sequence=None, other=None, groups=None, labels=None, name=None):
"""
Takes an array-like object and converts it to a pandas Series of
dtype float64, with any non-numeric values converted to NaN.
Parameters
----------
sequence : array-like or int or float or None
The input object
other : array-like
The secondary input object
groups : array-like
The sequence of group names for sub-arrays
labels : list | set | tuple | np.array | pd.Series, optional
The sequence of data point labels
name : str, optional
The name of the Vector object
"""
super(Vector, self).__init__(sequence=sequence, other=other, groups=groups, labels=labels, name=name)
if not self._values.empty:
self._values[self._ind] = self._values[self._ind].astype('float')
self._values[self._dep] = self._values[self._dep].astype('float')
def is_empty(self):
"""
Overrides the super class's method to also check for length of zero.
Returns
-------
test_result : bool
The result of whether the length of the Vector object is 0 or not.
Examples
--------
>>> Vector([1, 2, 3, 4, 5]).is_empty()
False
>>> Vector([]).is_empty()
True
"""
return self._values.empty
def append(self, other):
"""
Append the values of another vector to self.
Parameters
----------
other : Vector
The Vector object to be appended to self.
Returns
-------
vector : Vector
The original Vector object with new values.
Examples
--------
>>> Vector([1, 2, 3]).append(Vector([4, 5, 6])).data
pandas.Series([1., 2., 3., 4., 5., 6.])
"""
if not is_vector(other):
raise ValueError("Vector object cannot be added to a non-vector object.")
if other.data.empty:
return self
if self.auto_groups and other.auto_groups and len(self._values) > 0:
new_cat = max(self._values[self._grp].cat.categories) + 1
other.values['grp'] = new_cat
self._values = pd.concat([self._values, other.values], copy=False)
self._values.reset_index(inplace=True, drop=True)
self._values.loc[:, self._grp] = self._values[self._grp].astype('category')
return self
def flatten(self):
"""
Disassociates independent and dependent data into individual groups.
Returns
-------
data : tuple(Series)
A tuple of pandas Series.
"""
if not self.other.empty:
return (tuple(data[self._ind] for grp, data in self.values.groupby(self._grp)) +
tuple(data[self._dep] for grp, data in self.values.groupby(self._grp)))
else:
return tuple(data[self._ind] for grp, data in self.values.groupby(self._grp)) | /sci_analysis-2.2.1rc0.tar.gz/sci_analysis-2.2.1rc0/sci_analysis/data/numeric.py | 0.818592 | 0.724889 | numeric.py | pypi |
def is_data(obj):
"""
Test if the passed array_like argument is a sci_analysis Data object.
Parameters
----------
obj : object
The input object.
Returns
-------
test result : bool
The test result of whether seq is a sci_analysis Data object or not.
"""
return isinstance(obj, Data)
class Data(object):
"""
The super class used by all objects representing data for analysis
in sci_analysis. All analysis classes should expect the data provided through
arguments to be a descendant of this class.
Data members are data_type, data and name. data_type is used for identifying
the container class. The data member stores the data provided through an
argument. The name member is an optional name for the Data object.
"""
def __init__(self, v=None, n=None):
"""
Sets the data and name members.
Parameters
----------
v : array_like
The input object
n : str
The name of the Data object
"""
self._values = v
self._name = n
def is_empty(self):
"""
Tests if this Data object's data member equals 'None' and returns the result.
Returns
-------
test result : bool
The result of whether self._values is set or not
"""
return self._values is None
@property
def data(self):
return self._values
@property
def name(self):
return self._name
def __repr__(self):
"""
Prints the Data object using the same representation as its data member.
Returns
-------
output : str
The string representation of the encapsulated data.
"""
return self._values.__repr__()
def __len__(self):
"""Returns the length of the data member. If data is not defined, 0 is returned. If the data member is a scalar
value, 1 is returned.
Returns
-------
length : int
The length of the encapsulated data.
"""
if self._values is not None:
try:
return len(self._values)
except TypeError:
return 1
else:
return 0
def __getitem__(self, item):
"""
Gets the value of the data member at index item and returns it.
Parameters
----------
item : int
An index of the encapsulating data.
Returns
-------
value : object
The value of the encapsulated data at the specified index, otherwise None if no such index exists.
"""
try:
return self._values[item]
except (IndexError, AttributeError):
return None
def __contains__(self, item):
"""
Tests whether the encapsulated data contains the specified index or not.
Parameters
----------
item : int
An index of the encapsulating data.
Returns
-------
test result : bool
The test result of whether item is a valid index of the encapsulating data or not.
"""
try:
return item in self._values
except AttributeError:
return None
def __iter__(self):
"""
Give this Data object the iterative behavior of its encapsulated data.
Returns
-------
itr :iterator
An iterator based on the encapsulated sequence.
"""
return self._values.__iter__() | /sci_analysis-2.2.1rc0.tar.gz/sci_analysis-2.2.1rc0/sci_analysis/data/data.py | 0.927961 | 0.827689 | data.py | pypi |
import six
import numpy as np
import pandas as pd
def to_float(seq):
"""
Takes an arguement seq, tries to convert each value to a float and returns the result. If a value cannot be
converted to a float, it is replaced by 'nan'.
Parameters
----------
seq : array-like
The input object.
Returns
-------
subseq : array_like
seq with values converted to a float or "nan".
>>> to_float(['1', '2', '3', 'four', '5'])
[1.0, 2.0, 3.0, nan, 5.0]
"""
float_list = list()
for i in range(len(seq)):
try:
float_list.append(float(seq[i]))
except ValueError:
float_list.append(float("nan"))
except TypeError:
float_list.append(to_float(seq[i]))
return float_list
def flatten(seq):
"""
Recursively reduces the dimension of seq to one.
Parameters
----------
seq : array-like
The input object.
Returns
-------
subseq : array_like
A flattened copy of the input object.
Flatten a two-dimensional list into a one-dimensional list
>>> flatten([[1, 2, 3], [4, 5, 6]])
array([1, 2, 3, 4, 5, 6])
Flatten a three-dimensional list into a one-dimensional list
>>> flatten([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
>>> flatten(([1, 2, 3], [4, 5, 6]))
array([1, 2, 3, 4, 5, 6])
>>> flatten(list(zip([1, 2, 3], [4, 5, 6])))
array([1, 4, 2, 5, 3, 6])
>>> flatten([(1, 2), (3, 4), (5, 6), (7, 8)])
array([1, 2, 3, 4, 5, 6, 7, 8])
"""
return np.array(seq).flatten()
def is_tuple(obj):
"""
Checks if a given sequence is a tuple.
Parameters
----------
obj : object
The input array.
Returns
-------
test result : bool
The test result of whether seq is a tuple or not.
>>> is_tuple(('a', 'b'))
True
>>> is_tuple(['a', 'b'])
False
>>> is_tuple(4)
False
"""
return True if isinstance(obj, tuple) else False
def is_iterable(obj):
"""
Checks if a given variable is iterable, but not a string.
Parameters
----------
obj : Any
The input argument.
Returns
-------
test result : bool
The test result of whether variable is iterable or not.
>>> is_iterable([1, 2, 3])
True
>>> is_iterable((1, 2, 3))
True
>>> is_iterable({'one': 1, 'two': 2, 'three': 3})
True
Strings arguments return False.
>>> is_iterable('foobar')
False
Scalars return False.
>>> is_iterable(42)
False
"""
if isinstance(obj, six.string_types):
return False
try:
obj.__iter__()
return True
except (AttributeError, TypeError):
return False
def is_array(obj):
"""
Checks if a given sequence is a numpy Array object.
Parameters
----------
obj : object
The input argument.
Returns
-------
test result : bool
The test result of whether seq is a numpy Array or not.
>>> import numpy as np
>>> is_array([1, 2, 3, 4, 5])
False
>>> is_array(np.array([1, 2, 3, 4, 5]))
True
"""
return hasattr(obj, 'dtype')
def is_series(obj):
"""
Checks if a given sequence is a Pandas Series object.
Parameters
----------
obj : object
The input argument.
Returns
-------
bool
>>> is_series([1, 2, 3])
False
>>> is_series(pd.Series([1, 2, 3]))
True
"""
return isinstance(obj, pd.Series)
def is_dict(obj):
"""
Checks if a given sequence is a dictionary.
Parameters
----------
obj : object
The input argument.
Returns
-------
test result : bool
The test result of whether seq is a dictionary or not.
>>> is_dict([1, 2, 3])
False
>>> is_dict((1, 2, 3))
False
>>> is_dict({'one': 1, 'two': 2, 'three': 3})
True
>>> is_dict('foobar')
False
"""
return isinstance(obj, dict)
def is_group(seq):
"""
Checks if a given variable is a list of iterable objects.
Parameters
----------
seq : array_like
The input argument.
Returns
-------
test result : bool
The test result of whether seq is a list of array_like values or not.
>>> is_group([[1, 2, 3], [4, 5, 6]])
True
>>> is_group({'one': 1, 'two': 2, 'three': 3})
False
>>> is_group(([1, 2, 3], [4, 5, 6]))
True
>>> is_group([1, 2, 3, 4, 5, 6])
False
>>> is_group({'foo': [1, 2, 3], 'bar': [4, 5, 6]})
False
"""
try:
if any(is_iterable(x) for x in seq):
return True
else:
return False
except TypeError:
return False
def is_dict_group(seq):
"""
Checks if a given variable is a dictionary of iterable objects.
Parameters
----------
seq : array-like
The input argument.
Returns
-------
test result : bool
The test result of whether seq is a dictionary of array_like values or not.
>>> is_dict_group([[1, 2, 3], [4, 5, 6]])
False
>>> is_dict_group(([1, 2, 3], [4, 5, 6]))
False
>>> is_dict_group([1, 2, 3, 4, 5, 6])
False
>>> is_dict_group({'foo': [1, 2, 3], 'bar': [4, 5, 6]})
True
"""
try:
if is_group(list(seq.values())):
return True
else:
return False
except (AttributeError, TypeError):
return False
def is_number(obj):
"""
Checks if the given object is a number.
Parameters
----------
obj : Any
The input argument.
Returns
-------
test result : bool
The test result of whether obj can be converted to a number or not.
>>> is_number(3)
True
>>> is_number(1.34)
True
>>> is_number('3')
True
>>> is_number(np.array(3))
True
>>> is_number('a')
False
>>> is_number([1, 2, 3])
False
>>> is_number(None)
False
"""
try:
float(obj)
return True
except (ValueError, TypeError):
return False | /sci_analysis-2.2.1rc0.tar.gz/sci_analysis-2.2.1rc0/sci_analysis/data/data_operations.py | 0.886439 | 0.695222 | data_operations.py | pypi |
from warnings import warn
# Import packages
import pandas as pd
# Import from local
from .data import Data, is_data
from .data_operations import flatten, is_iterable
class NumberOfCategoriesWarning(Warning):
warn_categories = 50
def __str__(self):
return "The number of categories is greater than {} which might make analysis difficult. " \
"If this isn't a mistake, consider subsetting the data first".format(self.warn_categories)
def is_categorical(obj):
"""
Test if the passed array_like argument is a sci_analysis Categorical object.
Parameters
----------
obj : object
The input object.
Returns
-------
test result : bool
The test result of whether obj is a sci_analysis Categorical object.
"""
return isinstance(obj, Categorical)
class Categorical(Data):
"""
The sci_analysis representation of categorical, quantitative or textual data.
"""
def __init__(self, sequence=None, name=None, order=None, dropna=False):
"""Takes an array-like object and converts it to a pandas Categorical object.
Parameters
----------
sequence : array-like or Data or Categorical
The input object.
name : str, optional
The name of the Categorical object.
order : array-like
The order that categories in sequence should appear.
dropna : bool
Remove all occurances of numpy NaN.
"""
if sequence is None:
self._values = pd.Series([])
self._order = order
self._name = name
self._summary = pd.DataFrame([], columns=['counts', 'ranks', 'percents', 'categories'])
elif is_data(sequence):
new_name = sequence.name or name
super(Categorical, self).__init__(v=sequence.data, n=new_name)
self._order = sequence.order
self._values = sequence.data
self._name = sequence.name
self._summary = sequence.summary
else:
self._name = name
self._values = pd.Series(sequence)
try:
self._values.astype('category')
except TypeError:
self._values = pd.Series(flatten(sequence))
except ValueError:
self._values = pd.Series([])
# Try to preserve the original dtype of the categories.
try:
if not any(self._values % 1):
self._values = self._values.astype(int)
except TypeError:
pass
self._values = self._values.astype('category')
if order is not None:
if not is_iterable(order):
order = [order]
self._values = self._values.cat.set_categories(order).cat.reorder_categories(order, ordered=True)
if dropna:
self._values = self._values.dropna()
try:
sequence += 1
self._order = None if self._values.empty else self._values.cat.categories
except TypeError:
self._order = order
counts = self._values.value_counts(sort=False, dropna=False, ascending=False)
self._summary = pd.DataFrame({
'counts': counts,
'ranks': counts.rank(method='dense', na_option='bottom', ascending=False).astype('int'),
'percents': (counts / counts.sum() * 100) if not all(counts == 0) else 0.0
})
self._summary['categories'] = self._summary.index.to_series()
if order is not None:
self._summary.sort_index(level=self._order, inplace=True, axis=0, na_position='last')
else:
self._summary.sort_values('ranks', inplace=True)
if not self._summary.empty and len(self.categories) > NumberOfCategoriesWarning.warn_categories:
warn(NumberOfCategoriesWarning())
def is_empty(self):
"""
Overrides the super class's method to also check for length of zero.
Returns
-------
test_result : bool
The result of whether the length of the Vector object is 0 or not.
"""
return self._values.empty
@property
def summary(self):
return self._summary
@property
def counts(self):
return self._summary.counts
@property
def percents(self):
return self._summary.percents
@property
def order(self):
return self._order
@property
def ranks(self):
return self._summary.ranks
@property
def categories(self):
return self._summary.categories
@property
def total(self):
return len(self._values)
@property
def num_of_groups(self):
return len(self._summary) | /sci_analysis-2.2.1rc0.tar.gz/sci_analysis-2.2.1rc0/sci_analysis/data/categorical.py | 0.883995 | 0.547646 | categorical.py | pypi |
class DefaultPreferences(type):
"""The type for Default Preferences that cannot be modified"""
def __setattr__(cls, key, value):
if key == "defaults":
raise AttributeError("Cannot override defaults")
else:
return type.__setattr__(cls, key, value)
def __delattr__(cls, item):
if item == "defaults":
raise AttributeError("Cannot delete defaults")
else:
return type.__delattr__(cls, item)
class Preferences(object):
"""The base Preferences class"""
__metaclass__ = DefaultPreferences
def list(self):
print(self.__dict__)
return self.__dict__
def defaults(self):
return tuple(self.__dict__.values())
class GraphPreferences(object):
"""Handles graphing preferences."""
class Plot(object):
boxplot = True
histogram = True
cdf = False
oneway = True
probplot = True
scatter = True
tukey = False
histogram_borders = False
boxplot_borders = False
defaults = (boxplot, histogram, cdf, oneway, probplot, scatter, tukey, histogram_borders, boxplot_borders)
distribution = {'counts': False,
'violin': False,
'boxplot': True,
'fit': False,
'fit_style': 'r--',
'fit_width': '2',
'cdf_style': 'k-',
'distribution': 'norm',
'bins': 20,
'color': 'green'
}
bivariate = {'points': True,
'point_style': 'k.',
'contours': False,
'contour_width': 1.25,
'fit': True,
'fit_style': 'r-',
'fit_width': 1,
'boxplot': True,
'violin': True,
'bins': 20,
'color': 'green'
}
oneway = {'boxplot': True,
'violin': False,
'point_style': '^',
'line_style': '-'
} | /sci_analysis-2.2.1rc0.tar.gz/sci_analysis-2.2.1rc0/sci_analysis/preferences/preferences.py | 0.727395 | 0.156427 | preferences.py | pypi |
import pandas as pd
import os
from sci_annot_eval.common.bounding_box import AbsoluteBoundingBox, RelativeBoundingBox
from . parsers.parserInterface import Parser
from sci_annot_eval import evaluation
def build_id_file_dict(path: str):
result = {}
for file in os.listdir(path):
no_extension = file.split('.')[0]
result[no_extension] = os.path.join(path, file)
return result
def build_3D_dict(input: dict):
"""
Converts the nested dictionary into an input for pandas' multiIndex.
See https://stackoverflow.com/questions/24988131/nested-dictionary-to-multiindex-dataframe-where-dictionary-keys-are-column-label
"""
return {
(outerKey, innerKey): values
for outerKey, innerDict in input.items()
for innerKey, values in innerDict.items()
}
def benchmark(
render_summary_parquet_path: str,
gtruth_parser: Parser,
pred_parser: Parser,
gtruth_dir: str,
pred_dir: str,
output_parquet_path: str,
IOU_threshold: float = 0.8
):
result_dict = {}
gtruth_file_dict = build_id_file_dict(gtruth_dir)
pred_file_dict = build_id_file_dict(pred_dir)
render_summ = pd.read_parquet(render_summary_parquet_path)
for row in render_summ.itertuples():
id = row.Index
ground_truth = []
if id in gtruth_file_dict.keys():
ground_truth = gtruth_parser.parse_file_relative(gtruth_file_dict[id])
predictions = []
if id in pred_file_dict.keys():
predictions = pred_parser.parse_file_relative(pred_file_dict[id])
result_dict[id] = evaluation.evaluate(predictions, ground_truth, IOU_threshold)
"""
Produces a DF in this shape:
class class2 ... class_1 ...
metric metric_1 metric_2 ... metric_1 metric_2 metric_3 ...
id
id_1 -1 -2 1 2 2
id_2 -3 -4 3 4 0
"""
result_df = pd.DataFrame.from_dict(result_dict, orient='index').stack()
result_df = pd.DataFrame(result_df.values.tolist(), index=result_df.index, columns=['TP', 'FP', 'FN'])\
.unstack()\
.swaplevel(axis=1)\
.sort_index(axis=1, level=0)\
.rename_axis(index='id', columns=['class', 'metric'])
result_df.to_parquet(output_parquet_path) | /sci_annot_eval-0.0.9-py3-none-any.whl/sci_annot_eval/benchmarking.py | 0.557845 | 0.216094 | benchmarking.py | pypi |
import argparse
from sci_annot_eval.common.bounding_box import AbsoluteBoundingBox, RelativeBoundingBox
from sci_annot_eval.exporters.sci_annot_exporter import SciAnnotExporter
from . helpers import rasterize_pdfs, pdffigures2_page_splitter, deepfigures_prediction
import coloredlogs
import logging
from enum import Enum
from . benchmarking import benchmark
from . parsers import sci_annot_parser, pdffigures2_parser, parserInterface
from sci_annot_eval.common.prediction_field_mapper import Pdffigures2FieldMapper, DeepfiguresFieldMapper
import os
import pandas as pd
# TODO: Type hint values
class RegisteredParsers(Enum):
SCI_ANNOT = sci_annot_parser.SciAnnotParser()
PDF_FIGURES_2 = pdffigures2_parser.PdfFigures2Parser(Pdffigures2FieldMapper)
DEEPFIGURES = pdffigures2_parser.PdfFigures2Parser(DeepfiguresFieldMapper)
def run_benchmark(
render_summary_parquet_path: str,
gtruth_parser_name: str,
pred_parser_name: str,
gtruth_dir: str,
pred_dir: str,
output_parquet_path: str,
IOU_threshold: float = 0.8,
**kwargs
):
gtruth_parser = RegisteredParsers.__getitem__(gtruth_parser_name)
pred_parser = RegisteredParsers.__getitem__(pred_parser_name)
benchmark(
render_summary_parquet_path,
gtruth_parser.value,
pred_parser.value,
gtruth_dir,
pred_dir,
output_parquet_path,
IOU_threshold
)
def run_deepfigures_prediction(
deepfigures_root: str,
input_folder: str,
output_folder: str,
run_summary_csv_path: str,
**kwargs
):
deepfigures_prediction.run_deepfigures_prediction_for_folder(
deepfigures_root,
input_folder,
output_folder,
run_summary_csv_path
)
def run_transpile(
input_dir: str,
input_parser_name: str,
render_summary_parquet_path: str,
output_dir: str,
**kwargs
):
input_parser = RegisteredParsers.__getitem__(input_parser_name).value
exporter = SciAnnotExporter()
if not os.path.exists(output_dir):
logging.debug('Output directory does not exist. Creating it...')
os.mkdir(output_dir)
files = os.listdir(input_dir)
render_summ = pd.read_parquet(render_summary_parquet_path)
for i, file in enumerate(files):
id = file[:-5]
logging.debug(f'Transpiling file {i+1}/{len(files)} with id {id}')
summary_row = render_summ[render_summ.index == id]
relative_input = input_parser.parse_file_relative(os.path.join(input_dir, file))
exporter.export_to_file(
relative_input,
int(summary_row.width.values[0]),
int(summary_row.height.values[0]),
os.path.join(output_dir, file)
)
logging.info(f'Transpiled {len(files)} files')
# TODO: Make args consistent
def main():
parser = argparse.ArgumentParser(description='Command line tool for managing the sci_annot evaluator and its helper functions', argument_default=argparse.SUPPRESS)
parser.add_argument('--verbose', '-v', dest='verbose', help='Enable verbose logging (info, debug)', action='count', default=0)
subparsers = parser.add_subparsers()
parser_rasterize = subparsers.add_parser(
'rasterize',
description='Rasterize all pdfs in input folder and additionally produce a summary parquet file called render_summary.parquet in the output folder.',
argument_default=argparse.SUPPRESS
)
parser_rasterize.add_argument('-i', dest='input-dir', metavar='input_folder', help='Input folder containing PDFs.', required=True)
parser_rasterize.add_argument('-o' ,dest='output-dir', metavar='output_folder', help='Output folder to save page rasters.', required=True)
parser_rasterize.add_argument('--dpi', metavar='DPI', help='DPI to render at (default is 150).', type=int)
parser_rasterize.add_argument('-f', dest='format', metavar='format', help='Output format for images (default is png).')
parser_rasterize.add_argument('-t', dest='nr-threads', metavar='threads', help='Number of threads to use when rasterizing (default is 8).')
parser_rasterize.set_defaults(func=rasterize_pdfs.rasterize)
parser_pdffig2 = subparsers.add_parser(
'split-pdffigures2',
description='Take original pdffigures2 output and split it into validator-friendly per-page files.',
argument_default=argparse.SUPPRESS
)
parser_pdffig2.add_argument('-i', dest='input-dir', metavar='input_folder', help='Input folder containing the original predictions.', required=True)
parser_pdffig2.add_argument('-o' ,dest='output-dir', metavar='output_folder', help='Output folder to save per-page predictions.', required=True)
parser_pdffig2.add_argument('-p' ,dest='run-prefix', metavar='prefix', help='Prediction prefix specified with -d when running pdffigures2', required=True)
parser_pdffig2.add_argument('-s' ,dest='render_summary_path', metavar='path', help='Path to render summary parquet file', required=True)
parser_pdffig2.set_defaults(func=pdffigures2_page_splitter.split_pages)
parser_benchmark = subparsers.add_parser(
'benchmark',
description='Evaluate predictions against a ground truth and produce TP, FP, and FN metrics for each page',
argument_default=argparse.SUPPRESS
)
parser_benchmark.add_argument('-g', '--ground-truth-dir', dest='gtruth_dir', metavar='DIR', help='Directory containing files with ground truth annotations. Each should be named like PDF_ID-PAGENR.EXTENSION.', required=True)
parser_benchmark.add_argument('-p', '--predictions-dir', dest='pred_dir', metavar='DIR', help='Directory containing files with prediction annotations. Each should be named like: PDF_ID-PAGENR.EXTENSION.', required=True)
parser_benchmark.add_argument('-G', '--ground-truth-parser', dest='gtruth_parser_name', help='Parser to use for each file in the ground truth directory.', choices=RegisteredParsers.__members__, required=True)
parser_benchmark.add_argument('-P', '--predictions-parser', dest='pred_parser_name', help='Parser to use for each file in the parser directory.', choices=RegisteredParsers.__members__, required=True)
parser_benchmark.add_argument('-r', '--render-summary', dest='render_summary_parquet_path', metavar='PATH', help='Path to render_summary.parquet. This table contains all of the pages to test on.', required=True)
parser_benchmark.add_argument('-o', '--output-path', dest='output_parquet_path', metavar='PATH', help='Tells the tool where to create a parquet file which contains the benchmark output', required=True)
parser_benchmark.add_argument('-t', '--IOU-threshold', dest='IOU_threshold', metavar='THRESHOLD', help='Area under curve threshold over which annotations count as valid (default is 0.8)', type=float)
parser_benchmark.set_defaults(func= run_benchmark)
parser_deepfigures_predict = subparsers.add_parser(
'deepfigures-predict',
description='Use deepfigures to detect elements from each pdf in the input folder',
argument_default=argparse.SUPPRESS
)
parser_deepfigures_predict.add_argument('deepfigures_root', metavar='DIR', help='Folder containing manage.py and all other requirements for deepfigures-open')
parser_deepfigures_predict.add_argument('input_folder', metavar='DIR', help='Folder containing input PDFs')
parser_deepfigures_predict.add_argument('output_folder', metavar='DIR', help='Folder in which predictions should be saved')
parser_deepfigures_predict.add_argument('run_summary_csv_path', metavar='FILE', help='Path to save run information')
parser_deepfigures_predict.set_defaults(func=run_deepfigures_prediction)
parser_transpile = subparsers.add_parser(
'transpile',
description='Take a folder of predictions in one format and output them in another',
argument_default=argparse.SUPPRESS
)
parser_transpile.add_argument('-i', '--input-dir', dest='input_dir', metavar='DIR', help='Directory containing files with prediction annotations. Each should be named like: PDF_ID-PAGENR.EXTENSION.', required=True)
parser_transpile.add_argument('-I', '--input-parser', dest='input_parser_name', help='Parser to use for each file in the input directory.', choices=RegisteredParsers.__members__, required=True)
parser_transpile.add_argument('-o', '--output-dir', dest='output_dir', metavar='PATH', help='Where to create the transpiled files', required=True)
parser_transpile.add_argument('-r', '--render-summary', dest='render_summary_parquet_path', metavar='PATH', help='Path to render_summary.parquet. This is required in order to create right absolute coordinates', required=True)
parser_transpile.set_defaults(func=run_transpile)
args = parser.parse_args()
logging_config = {"fmt":'%(asctime)s %(levelname)s: %(message)s', "level": logging.WARNING}
if(args.verbose == 1):
logging_config['level'] = logging.INFO
elif(args.verbose == 2):
logging_config['level'] = logging.DEBUG
coloredlogs.install(**logging_config)
logging.debug('DEBUG LOGGING ENABLED')
logging.info('INFO LOGGING ENABLED')
if hasattr(args, 'func'):
args.func(**vars(args))
if __name__ == '__main__':
main() | /sci_annot_eval-0.0.9-py3-none-any.whl/sci_annot_eval/cli_entrypoint.py | 0.446977 | 0.207235 | cli_entrypoint.py | pypi |
import cv2 as cv
import numpy as np
from ..common.bounding_box import AbsoluteBoundingBox, RelativeBoundingBox
def delete_multiple_elements(list_object, indices):
indices = sorted(indices, reverse=True)
for idx in indices:
list_object.pop(idx)
def make_absolute(
bbox_list: list[RelativeBoundingBox],
canvas_width: int,
canvas_height: int
) -> list[AbsoluteBoundingBox]:
result_dict: dict[RelativeBoundingBox, AbsoluteBoundingBox] = {}
for box in bbox_list:
if type(box) is not RelativeBoundingBox:
raise TypeError(f'Annotation {box} is not of type RelativeBoundingBox!')
abs_box = AbsoluteBoundingBox(
box.type,
box.x*canvas_width,
box.y*canvas_height,
box.height*canvas_height,
box.width*canvas_width,
box.parent
)
result_dict[box] = abs_box
# Replace old parent references with new ones
for id, annotation in result_dict.items():
if annotation.parent:
annotation.parent = result_dict[annotation.parent]
return list(result_dict.values())
def make_relative(
bbox_list: list[AbsoluteBoundingBox],
canvas_width: int,
canvas_height: int
) -> list[RelativeBoundingBox]:
result_dict: dict[AbsoluteBoundingBox, RelativeBoundingBox] = {}
for box in bbox_list:
if type(box) is not AbsoluteBoundingBox:
raise TypeError(f'Annotation {box} is not of type AbsoluteBoundingBox!')
abs_box = RelativeBoundingBox(
box.type,
box.x/float(canvas_width),
box.y/float(canvas_height),
box.height/float(canvas_height),
box.width/float(canvas_width),
box.parent
)
result_dict[box] = abs_box
# Replace old parent references with new ones
for id, annotation in result_dict.items():
if annotation.parent:
annotation.parent = result_dict[annotation.parent]
return list(result_dict.values())
# TODO: Add float32 support!
def crop_to_content(
img: np.ndarray,
orig_coords: AbsoluteBoundingBox,
threshold: int= 248
) -> tuple[float, float, float, float]:
ox = int(orig_coords.x)
oy = int(orig_coords.y)
ow = int(orig_coords.width)
oh = int(orig_coords.height)
selected_slice = img[oy:oy+oh+1, ox:ox+ow+1]
is_color = len(img.shape) == 3 and img.shape[2] == 3
if is_color:
gray = cv.cvtColor(selected_slice, cv.COLOR_BGR2GRAY)
else:
gray = selected_slice
gray = 255 * (gray < threshold).astype(np.uint8)
coords = cv.findNonZero(gray) # Find all non-zero points (text)
x, y, w, h = cv.boundingRect(coords) # Find minimum spanning bounding box
return (ox+x, oy+y, w, h)
def crop_all_to_content(
image: bytes,
orig_annots: list[AbsoluteBoundingBox],
threshold: int= 248
) -> list[AbsoluteBoundingBox]:
"""Takes a page as a bytes object and crops the whitespace out of the provided annotations.
Args:
image (bytes): _description_
orig_annots (list[AbsoluteBoundingBox]): _description_
threshold (int, optional): _description_. Defaults to 248.
Returns:
list[AbsoluteBoundingBox]: _description_
"""
image_as_np = np.frombuffer(image, dtype=np.uint8)
img = cv.imdecode(image_as_np, cv.IMREAD_COLOR)
result_dict = {}
for annot in orig_annots:
x, y, w, h = crop_to_content(img, annot, threshold)
cropped = AbsoluteBoundingBox(
annot.type,
x,
y,
h,
w,
annot.parent
)
result_dict[annot] = cropped
# Replace old parent references with new ones
for id, annotation in result_dict.items():
if annotation.parent:
annotation.parent = result_dict[annotation.parent]
return list(result_dict.values()) | /sci_annot_eval-0.0.9-py3-none-any.whl/sci_annot_eval/helpers/helpers.py | 0.639624 | 0.354629 | helpers.py | pypi |
from sci_annot_eval.common.sci_annot_annotation import Annotation, SciAnnotOutput
from ..common.bounding_box import AbsoluteBoundingBox, RelativeBoundingBox
from . exporterInterface import Exporter
import json
from typing import TypedDict, Any
class SciAnnotExporter(Exporter):
def export_to_dict(self, input: list[RelativeBoundingBox], canvas_width: int, canvas_height: int, **kwargs) -> SciAnnotOutput:
result: SciAnnotOutput = {
'canvasHeight': canvas_height,
'canvasWidth': canvas_width,
'annotations': []
}
source = kwargs['source'] if 'source' in kwargs.keys() else 'Unknown'
for annotation in input:
if type(annotation) is not RelativeBoundingBox:
raise TypeError(f'Annotation {annotation} is not of type RelativeBoundingBox!')
absolute_x = annotation.x * canvas_width
absolute_y = annotation.y * canvas_height
absolute_height = annotation.height * canvas_height
absolute_width = annotation.width * canvas_width
generated_anno: Annotation = {
"type": "Annotation",
"body": [
{
"type": "TextualBody",
"purpose": "img-cap-enum",
"value": f"{annotation.type}"
}
],
"target": {
"source": source,
"selector": {
"type": "FragmentSelector",
"conformsTo": "http://www.w3.org/TR/media-frags/",
"value": f"xywh=pixel:{absolute_x},{absolute_y},{absolute_width},{absolute_height}"
}
},
"@context": "http://www.w3.org/ns/anno.jsonld",
"id": f"#{hash(annotation)}"
}
if(annotation.parent):
generated_anno['body'].append({
"type": "TextualBody",
"purpose": "parent",
"value": f"#{hash(annotation.parent)}"
})
result['annotations'].append(generated_anno)
return result
def export_to_str(self, input: list[RelativeBoundingBox], canvas_width: int, canvas_height: int, **kwargs) -> str:
res = self.export_to_dict(input, canvas_width, canvas_height, **kwargs)
return json.dumps(res, indent=4)
def export_to_file(
self,
input: list[RelativeBoundingBox],
canvas_width: int,
canvas_height: int,
file_location: str,
**kwargs
):
res = self.export_to_str(input, canvas_width, canvas_height, **kwargs)
with open(file_location, 'w') as f:
f.write(res) | /sci_annot_eval-0.0.9-py3-none-any.whl/sci_annot_eval/exporters/sci_annot_exporter.py | 0.767777 | 0.270817 | sci_annot_exporter.py | pypi |
from . parserInterface import Parser
from sci_annot_eval.common.bounding_box import AbsoluteBoundingBox, BoundingBox, RelativeBoundingBox, TargetType
from sci_annot_eval.common.prediction_field_mapper import PredictionFieldMapper
from .. helpers import helpers
import json
from typing import Any, Type
class PdfFigures2Parser(Parser):
"""This parser works for both Pdffigures2 and Deepfigures
"""
def __init__(self, field_mapper: Type[PredictionFieldMapper]):
self.field_mapper = field_mapper
def extract_x12y12(self, boundaries: dict[str, float]) -> tuple[float, float, float, float]:
x = boundaries['x1']
y = boundaries['y1']
x2 = boundaries['x2']
y2 = boundaries['y2']
w = x2 - x
h = y2 - y
return x, y, w, h
def parse_dict_absolute(self, input: dict[str, Any]) -> list[AbsoluteBoundingBox]:
result: list[AbsoluteBoundingBox] = []
figures = input['figures']
for figure in figures:
fig_x, fig_y, fig_w, fig_h = self.extract_x12y12(figure[self.field_mapper.region_boundary])
fig_type = figure[self.field_mapper.figure_type]
fig_bbox = AbsoluteBoundingBox(fig_type, fig_x, fig_y, fig_h, fig_w, None)
result.append(fig_bbox)
if(self.field_mapper.caption_boundary in figure.keys()):
cap_x, cap_y, cap_w, cap_h = self.extract_x12y12(figure[self.field_mapper.caption_boundary])
result.append(AbsoluteBoundingBox(
TargetType.CAPTION.value, cap_x, cap_y, cap_h, cap_w, fig_bbox
))
regionless_captions = []
if 'regionless-captions' in input.keys():
regionless_captions = input['regionless-captions']
for r_caption in regionless_captions:
r_cap_x, r_cap_y, r_cap_w, r_cap_h = self.extract_x12y12(r_caption['boundary'])
result.append(AbsoluteBoundingBox(
TargetType.CAPTION.value, r_cap_x, r_cap_y, r_cap_h, r_cap_w, None
))
return result
def parse_dict_relative(self, input: dict[str, Any]) -> list[RelativeBoundingBox]:
return helpers.make_relative(self.parse_dict_absolute(input), int(input['width']), int(input['height']))
def parse_text_absolute(self, input: str) -> list[AbsoluteBoundingBox]:
return self.parse_dict_absolute(json.loads(input))
def parse_text_relative(self, input: str) -> list[RelativeBoundingBox]:
return self.parse_dict_relative(json.loads(input))
def parse_file_absolute(self, path: str) -> list[AbsoluteBoundingBox]:
with open(path, 'r') as fd:
return self.parse_dict_absolute(json.load(fd))
def parse_file_relative(self, path: str) -> list[RelativeBoundingBox]:
with open(path, 'r') as fd:
return self.parse_dict_relative(json.load(fd)) | /sci_annot_eval-0.0.9-py3-none-any.whl/sci_annot_eval/parsers/pdffigures2_parser.py | 0.714927 | 0.430447 | pdffigures2_parser.py | pypi |
from sci_annot_eval.common.bounding_box import RelativeBoundingBox
from . parserInterface import Parser
from .. common.bounding_box import AbsoluteBoundingBox, BoundingBox, RelativeBoundingBox, TargetType
from ..common.sci_annot_annotation import Annotation, SciAnnotOutput
from .. helpers import helpers
import re
import json
from typing import Any, Optional
from typing import Mapping
class SciAnnotParser(Parser):
location_regex= re.compile(r'\d+(?:\.\d+)?')
child_types = [TargetType.CAPTION]
def get_annotation_type(self, annot: Annotation)-> TargetType:
for block in annot['body']:
if block['purpose'] == 'img-cap-enum':
return TargetType(block['value'])
raise ValueError(f'Annotation has no type: {annot}')
def get_annotation_parent_id(self, annot: Annotation)-> Optional[str] :
for block in annot['body']:
if block['purpose'] == 'parent':
return block['value']
return None
def parse_location_string(self, annot: Annotation)-> tuple[float, float, float, float]:
loc = annot['target']['selector']['value']
parsed_loc = self.location_regex.findall(loc)
if (len(parsed_loc) != 4):
raise ValueError(f'Location string couldn\'t be parsed: {loc}')
# Python's typing is not so clever yet...
return (float(parsed_loc[0]), float(parsed_loc[1]), float(parsed_loc[2]), float(parsed_loc[3]))
def parse_dict_absolute(self, input: Mapping) -> list[AbsoluteBoundingBox]:
result: dict[str, AbsoluteBoundingBox] = {}
for annotation in input['annotations']:
id = annotation['id']
ann_type = self.get_annotation_type(annotation)
x, y, width, height = self.parse_location_string(annotation)
parent_id = None
if ann_type in self.child_types:
parent_id = self.get_annotation_parent_id(annotation)
result[id] = AbsoluteBoundingBox(
ann_type.value,
x,
y,
height,
width,
parent_id,
)
for id, annotation in result.items():
if annotation.parent:
annotation.parent = result[annotation.parent]
res_list = list(result.values())
return res_list
def parse_dict_relative(self, input: Mapping[str, Any]) -> list[RelativeBoundingBox]:
canvas_height = int(input['canvasHeight'])
canvas_width = int(input['canvasWidth'])
return helpers.make_relative(self.parse_dict_absolute(input), canvas_width, canvas_height)
def parse_text_absolute(self, input: str) -> list[AbsoluteBoundingBox]:
return self.parse_dict_absolute(json.loads(input))
def parse_text_relative(self, input: str) -> list[RelativeBoundingBox]:
return self.parse_dict_relative(json.loads(input))
def parse_file_absolute(self, path: str) -> list[AbsoluteBoundingBox]:
with open(path, 'r') as fd:
return self.parse_dict_absolute(json.load(fd))
def parse_file_relative(self, path: str) -> list[RelativeBoundingBox]:
with open(path, 'r') as fd:
return self.parse_dict_relative(json.load(fd)) | /sci_annot_eval-0.0.9-py3-none-any.whl/sci_annot_eval/parsers/sci_annot_parser.py | 0.81721 | 0.308359 | sci_annot_parser.py | pypi |
import datetime
from sci_api_req import config
from ..api_provider import ApiProvider
class DONKIProvider(ApiProvider):
"""
The Space Weather Database Of Notifications, Knowledge, Information (DONKI) is
a comprehensive on-line tool for space weather forecasters, scientists, and the
general space science community. DONKI provides chronicles the daily interpretations
of space weather observations, analysis, models, forecasts, and notifications
provided by the Space Weather Research Center (SWRC), comprehensive knowledge-base
search functionality to support anomaly resolution and space science research,
intelligent linkages, relationships, cause-and-effects between space weather
activities and comprehensive webservice API access to information stored in DONKI.
For more information see: https://api.nasa.gov/api.html#DONKI. Requires NASA api key
"""
def __init__(self):
super(ApiProvider).__init__()
self._api_url = "https://api.nasa.gov/DONKI/"
@property
def api_key(self) -> str:
return config.get_api_keys('NASA')
def coronal_mass_ejection(self, start_date=datetime.date.today() - datetime.timedelta(30),
end_date=datetime.date.today()):
@self._get_request(
'CME',
startDate=start_date,
endDate=end_date)
def inner(response):
return response
return inner
def coronal_mass_ejection_analysis(
self, start_date=datetime.date.today() - datetime.timedelta(30),
end_date=datetime.date.today(), most_accurate_only=True,
complete_entry_only=True, speed=0, halfAngle=0, catalog="ALL",
keyword="NONE") -> dict:
@self._get_request(
'CMEAnalysis',
startDate=start_date,
endDate=end_date,
mostAccurateOnly=most_accurate_only,
completeEntryOnly=complete_entry_only,
speed=speed, halfAngle=halfAngle,
catalog=catalog,
keyword=keyword)
def inner(response):
return response
return inner
def geomagnetic_storm(self, start_date=datetime.date.today() - datetime.timedelta(30),
end_date=datetime.date.today()):
@self._get_request('GST', startDate=start_date, endDate=end_date)
def inner(response):
return response
return inner
def interplanetary_shock(self, start_date=datetime.date.today() - datetime.timedelta(30),
end_date=datetime.date.today(), location="ALL", catalog="ALL"):
@self._get_request('IPS', startDate=start_date, endDate=end_date, location=location,
catalog=catalog)
def inner(response):
return response
return inner
def solar_flare(self, start_date=datetime.date.today() - datetime.timedelta(30),
end_date=datetime.date.today()):
@self._get_request('FLR', startDate=start_date, endDate=end_date)
def inner(response):
return response
return inner
def solar_energetic_particle(self, start_date=datetime.date.today() - datetime.timedelta(30),
end_date=datetime.date.today()):
@self._get_request('SEP', startDate=start_date, endDate=end_date)
def inner(response):
return response
return inner
def magnetopause_crossing(self, start_date=datetime.date.today() - datetime.timedelta(30),
end_date=datetime.date.today()):
@self._get_request('MPC', startDate=start_date, endDate=end_date)
def inner(response):
return response
return inner
def radiation_belt_enhancment(self, start_date=datetime.date.today() - datetime.timedelta(30),
end_date=datetime.date.today()):
@self._get_request('RBE', startDate=start_date, endDate=end_date)
def inner(response):
return response
return inner
def hight_speed_stream(self, start_date=datetime.date.today() - datetime.timedelta(30),
end_date=datetime.date.today()):
@self._get_request('HSS', startDate=start_date, endDate=end_date)
def inner(response):
return response
return inner
def wsa_enlil_simulation(self, start_date=datetime.date.today() - datetime.timedelta(30),
end_date=datetime.date.today()):
@self._get_request('EnlilSimulations', startDate=start_date, endDate=end_date)
def inner(response):
return response
return inner
def notifications(self, start_date: datetime.date, end_date: datetime.date, type="all"):
@self._get_request('notifications', startDate=start_date, endDate=end_date, type=type)
def inner(response):
return response
return inner | /sci_api_req-0.1.1-py3-none-any.whl/sci_api_req/providers/NASA/donki_provider.py | 0.659405 | 0.338651 | donki_provider.py | pypi |
from ..api_provider import ApiProvider
from sci_api_req import config
import datetime
class NeoWsProvider(ApiProvider):
"""
You can use NeoWs(Near Earth Object Web Service) to search for Asteroids based on
their closest approach date to Earth, lookup a specific Asteroid with its NASA JPL
small body id, as well as browse the overall data-set. Requires Nasa Key. For more
information check that https://api.nasa.gov/api.html#NeoWS
"""
def __init__(self):
super(ApiProvider).__init__()
self._api_url = "https://api.nasa.gov/neo/rest/v1/"
@property
def api_key(self) -> str:
return config.get_api_keys('NASA')
"""Retrieve a list of Asteroids based on their closest approach date to Earth."""
def feed(self, start_date: datetime.date, end_date: datetime.date, detailed=True) -> dict:
@self._get_request('feed',
start_date=start_date,
end_date=end_date,
detailed=detailed)
def inner(response):
return response
return inner
"""Find Near Earth Objects for today"""
def feed(self, detailed=True) -> dict:
@self._get_request('feed/today', detailed=detailed)
def inner(response):
return response
return inner
"""Lookup a specific Asteroid based on its NASA JPL small body (SPK-ID) ID"""
def lookup(self, id) -> dict:
@self._get_request('neo/{}'.format(id))
def inner(response):
return response
return inner
"""Browse the overall Asteroid data-set"""
def browse(self) -> dict:
@self._get_request('neo/browse')
def inner(response):
return response
return inner
"""Retrieve Sentry (Impact Risk) Near Earth Objects"""
def sentry(self, is_active=True, page=0, size=50) -> dict:
@self._get_request('neo/sentry', is_active=str(is_active), page=str(page), size=str(size))
def inner(response):
return response
return inner
"""Retrieve Sentry (Impact Risk) Near Earth Objectby ID"""
def sentry_by_id(self, id) -> dict:
@self._get_request('neo/sentry/{}'.format(id))
def inner(response):
return response
return inner
"""Get the Near Earth Object data set totals"""
def stats(self) -> dict:
@self._get_request('stats')
def inner(response):
return response
return inner | /sci_api_req-0.1.1-py3-none-any.whl/sci_api_req/providers/NASA/neows_provider.py | 0.846578 | 0.35855 | neows_provider.py | pypi |
import logging
from pathlib import Path
import subprocess
import warnings
from typing import Dict, List, Optional, Tuple, Union
from fab.util import string_checksum
logger = logging.getLogger(__name__)
class Compiler(object):
"""
A command-line compiler whose flags we wish to manage.
"""
def __init__(self, exe, compile_flag, module_folder_flag):
self.exe = exe
self.compile_flag = compile_flag
self.module_folder_flag = module_folder_flag
# We should probably extend this for fPIC, two-stage and optimisation levels.
COMPILERS: Dict[str, Compiler] = {
'gfortran': Compiler(exe='gfortran', compile_flag='-c', module_folder_flag='-J'),
'ifort': Compiler(exe='ifort', compile_flag='-c', module_folder_flag='-module'),
}
# todo: We're not sure we actually want to do modify incoming flags. Discuss...
# todo: this is compiler specific, rename - and do we want similar functions for other steps?
def remove_managed_flags(compiler, flags_in):
"""
Remove flags which Fab manages.
Fab prefers to specify a few compiler flags itself.
For example, Fab wants to place module files in the `build_output` folder.
The flag to do this differs with compiler.
We don't want duplicate, possibly conflicting flags in our tool invocation so this function is used
to remove any flags which Fab wants to manage.
If the compiler is not known to Fab, we rely on the user to specify these flags in their config.
.. note::
This approach is due for discussion. It might not be desirable to modify user flags at all.
"""
def remove_flag(flags: List[str], flag: str, len):
while flag in flags:
warnings.warn(f'removing managed flag {flag} for compiler {compiler}')
flag_index = flags.index(flag)
for _ in range(len):
flags.pop(flag_index)
known_compiler = COMPILERS.get(compiler)
if not known_compiler:
logger.warning('Unable to remove managed flags for unknown compiler. User config must specify managed flags.')
return flags_in
flags_out = [*flags_in]
remove_flag(flags_out, known_compiler.compile_flag, 1)
remove_flag(flags_out, known_compiler.module_folder_flag, 2)
return flags_out
def flags_checksum(flags: List[str]):
"""
Return a checksum of the flags.
"""
return string_checksum(str(flags))
def run_command(command: List[str], env=None, cwd: Optional[Union[Path, str]] = None, capture_output=True):
"""
Run a CLI command.
:param command:
List of strings to be sent to :func:`subprocess.run` as the command.
:param env:
Optional env for the command. By default it will use the current session's environment.
:param capture_output:
If True, capture and return stdout. If False, the command will print its output directly to the console.
"""
command = list(map(str, command))
logger.debug(f'run_command: {" ".join(command)}')
res = subprocess.run(command, capture_output=capture_output, env=env, cwd=cwd)
if res.returncode != 0:
msg = f'Command failed with return code {res.returncode}:\n{command}'
if res.stdout:
msg += f'\n{res.stdout.decode()}'
if res.stderr:
msg += f'\n{res.stderr.decode()}'
raise RuntimeError(msg)
if capture_output:
return res.stdout.decode()
def get_tool(tool_str: Optional[str] = None) -> Tuple[str, List[str]]:
"""
Get the compiler, preprocessor, etc, from the given string.
Separate the tool and flags for the sort of value we see in environment variables, e.g. `gfortran -c`.
Returns the tool and a list of flags.
:param env_var:
The environment variable from which to find the tool.
"""
tool_str = tool_str or ''
tool_split = tool_str.split()
if not tool_split:
raise ValueError(f"Tool not specified in '{tool_str}'. Cannot continue.")
return tool_split[0], tool_split[1:]
# todo: add more compilers and test with more versions of compilers
def get_compiler_version(compiler: str) -> str:
"""
Try to get the version of the given compiler.
Expects a version in a certain part of the --version output,
which must adhere to the n.n.n format, with at least 2 parts.
Returns a version string, e.g '6.10.1', or empty string.
:param compiler:
The command line tool for which we want a version.
"""
try:
res = run_command([compiler, '--version'])
except FileNotFoundError:
raise ValueError(f'Compiler not found: {compiler}')
except RuntimeError as err:
logger.warning(f"Error asking for version of compiler '{compiler}': {err}")
return ''
# Pull the version string from the command output.
# All the versions of gfortran and ifort we've tried follow the same pattern, it's after a ")".
try:
version = res.split(')')[1].split()[0]
except IndexError:
logger.warning(f"Unexpected version response from compiler '{compiler}': {res}")
return ''
# expect major.minor[.patch, ...]
# validate - this may be overkill
split = version.split('.')
if len(split) < 2:
logger.warning(f"unhandled compiler version format for compiler '{compiler}' is not <n.n[.n, ...]>: {version}")
return ''
# todo: do we care if the parts are integers? Not all will be, but perhaps major and minor?
logger.info(f'Found compiler version for {compiler} = {version}')
return version | /sci_fab-1.0-py3-none-any.whl/fab/tools.py | 0.582254 | 0.257199 | tools.py | pypi |
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Iterable, Union, Dict, List
from fab.constants import BUILD_TREES
from fab.dep_tree import filter_source_tree, AnalysedDependent
from fab.util import suffix_filter
class ArtefactsGetter(ABC):
"""
Abstract base class for artefact getters.
"""
@abstractmethod
def __call__(self, artefact_store):
"""
:param artefact_store:
The artefact store from which to retrieve.
"""
pass
class CollectionGetter(ArtefactsGetter):
"""
A simple artefact getter which returns one :term:`Artefact Collection` from the artefact_store.
Example::
`CollectionGetter('preprocessed_fortran')`
"""
def __init__(self, collection_name):
"""
:param collection_name:
The name of the artefact collection to retrieve.
"""
self.collection_name = collection_name
def __call__(self, artefact_store):
super().__call__(artefact_store)
return artefact_store.get(self.collection_name, [])
class CollectionConcat(ArtefactsGetter):
"""
Returns a concatenated list from multiple :term:`Artefact Collections <Artefact Collection>`
(each expected to be an iterable).
An :class:`~fab.artefacts.ArtefactsGetter` can be provided instead of a collection_name.
Example::
# The default source code getter for the Analyse step might look like this.
DEFAULT_SOURCE_GETTER = CollectionConcat([
'preprocessed_c',
'preprocessed_fortran',
SuffixFilter('all_source', '.f90'),
])
"""
def __init__(self, collections: Iterable[Union[str, ArtefactsGetter]]):
"""
:param collections:
An iterable containing collection names (strings) or other ArtefactsGetters.
"""
self.collections = collections
# todo: ensure the labelled values are iterables
def __call__(self, artefact_store: Dict):
super().__call__(artefact_store)
# todo: this should be a set, in case a file appears in multiple collections
result = []
for collection in self.collections:
if isinstance(collection, str):
result.extend(artefact_store.get(collection, []))
elif isinstance(collection, ArtefactsGetter):
result.extend(collection(artefact_store))
return result
class SuffixFilter(ArtefactsGetter):
"""
Returns the file paths in a :term:`Artefact Collection` (expected to be an iterable),
filtered by suffix.
Example::
# The default source getter for the FortranPreProcessor step.
DEFAULT_SOURCE = SuffixFilter('all_source', '.F90')
"""
def __init__(self, collection_name: str, suffix: Union[str, List[str]]):
"""
:param collection_name:
The name of the artefact collection.
:param suffix:
A suffix string including the dot, or iterable of.
"""
self.collection_name = collection_name
self.suffixes = [suffix] if isinstance(suffix, str) else suffix
def __call__(self, artefact_store):
super().__call__(artefact_store)
# todo: returning an empty list is probably "dishonest" if the collection doesn't exist - return None instead?
fpaths: Iterable[Path] = artefact_store.get(self.collection_name, [])
return suffix_filter(fpaths, self.suffixes)
class FilterBuildTrees(ArtefactsGetter):
"""
Filter build trees by suffix.
Returns one list of files to compile per build tree, of the form Dict[name, List[AnalysedDependent]]
Example::
# The default source getter for the CompileFortran step.
DEFAULT_SOURCE_GETTER = FilterBuildTrees(suffix='.f90')
"""
def __init__(self, suffix: Union[str, List[str]], collection_name: str = BUILD_TREES):
"""
:param suffix:
A suffix string, or iterable of, including the preceding dot.
:param collection_name:
The name of the artefact collection where we find the source trees.
Defaults to the value in :py:const:`fab.constants.BUILD_TREES`.
"""
self.collection_name = collection_name
self.suffixes = [suffix] if isinstance(suffix, str) else suffix
def __call__(self, artefact_store):
super().__call__(artefact_store)
build_trees = artefact_store[self.collection_name]
build_lists: Dict[str, List[AnalysedDependent]] = {}
for root, tree in build_trees.items():
build_lists[root] = filter_source_tree(source_tree=tree, suffixes=self.suffixes)
return build_lists | /sci_fab-1.0-py3-none-any.whl/fab/artefacts.py | 0.793706 | 0.346403 | artefacts.py | pypi |
from argparse import ArgumentParser
from pathlib import Path
from typing import Dict, Optional
from fab.steps.analyse import analyse
from fab.steps.c_pragma_injector import c_pragma_injector
from fab.steps.compile_c import compile_c
from fab.steps.link import link_exe
from fab.steps.root_inc_files import root_inc_files
import fab
from fab.artefacts import CollectionGetter
from fab.build_config import BuildConfig
from fab.constants import PRAGMAD_C
from fab.steps.compile_fortran import compile_fortran, get_fortran_compiler
from fab.steps.find_source_files import find_source_files
from fab.steps.grab.folder import grab_folder
from fab.steps.preprocess import preprocess_c, preprocess_fortran
def _generic_build_config(folder: Path, kwargs: Optional[Dict] = None) -> BuildConfig:
folder = folder.resolve()
kwargs = kwargs or {}
# Within the fab workspace, we'll create a project workspace.
# Ideally we'd just use folder.name, but to avoid clashes, we'll use the full absolute path.
label = '/'.join(folder.parts[1:])
linker, linker_flags = calc_linker_flags()
with BuildConfig(project_label=label, **kwargs) as config:
grab_folder(config, folder),
find_source_files(config),
root_inc_files(config), # JULES helper, get rid of this eventually
preprocess_fortran(config),
c_pragma_injector(config),
preprocess_c(config, source=CollectionGetter(PRAGMAD_C)),
analyse(config, find_programs=True),
compile_fortran(config),
compile_c(config),
link_exe(config, linker=linker, flags=linker_flags),
return config
def calc_linker_flags():
fc, _ = get_fortran_compiler()
# linker and flags depend on compiler
linkers = {
'gfortran': ('gcc', ['-lgfortran']),
# todo: test this and get it running
# 'ifort': (..., [...])
}
try:
linker, linker_flags = linkers[fc]
except KeyError:
raise NotImplementedError(f"Fab's zero configuration mode does not yet work with compiler '{fc}'")
return linker, linker_flags
def cli_fab():
"""
Running Fab from the command line will attempt to build the project in the current or given folder.
"""
# todo: use common_arg_parser()?
arg_parser = ArgumentParser()
arg_parser.add_argument('folder', nargs='?', default='.', type=Path)
arg_parser.add_argument('--version', action='version', version=f'%(prog)s {fab.__version__}')
args = arg_parser.parse_args()
_generic_build_config(args.folder) | /sci_fab-1.0-py3-none-any.whl/fab/cli.py | 0.540196 | 0.239905 | cli.py | pypi |
import logging
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Union, Tuple
from fparser.common.readfortran import FortranFileReader # type: ignore
from fparser.two.parser import ParserFactory # type: ignore
from fparser.two.utils import FortranSyntaxError # type: ignore
from fab import FabException
from fab.dep_tree import AnalysedDependent
from fab.parse import EmptySourceFile
from fab.util import log_or_dot, file_checksum
logger = logging.getLogger(__name__)
def iter_content(obj):
"""
Return a generator which yields every node in the tree.
"""
yield obj
if hasattr(obj, "content"):
for child in _iter_content(obj.content):
yield child
def _iter_content(content):
for obj in content:
yield obj
if hasattr(obj, "content"):
for child in _iter_content(obj.content):
yield child
def _has_ancestor_type(obj, obj_type):
# Recursively check if an object has an ancestor of the given type.
if not obj.parent:
return False
if type(obj.parent) == obj_type:
return True
return _has_ancestor_type(obj.parent, obj_type)
def _typed_child(parent, child_type, must_exist=False):
# Look for a child of a certain type.
# Returns the child or None.
# Raises ValueError if more than one child of the given type is found.
children = list(filter(lambda child: type(child) == child_type, parent.children))
if len(children) > 1:
raise ValueError(f"too many children found of type {child_type}")
if children:
return children[0]
if must_exist:
raise FabException(f'Could not find child of type {child_type} in {parent}')
return None
class FortranAnalyserBase(ABC):
"""
Base class for Fortran parse-tree analysers, e.g FortranAnalyser and X90Analyser.
"""
_intrinsic_modules = ['iso_fortran_env', 'iso_c_binding']
def __init__(self, result_class, std=None):
"""
:param result_class:
The type (class) of the analysis result. Defined by the subclass.
:param std:
The Fortran standard.
"""
self.result_class = result_class
self.f2008_parser = ParserFactory().create(std=std or "f2008")
# todo: this, and perhaps other runtime variables like it, might be better set at construction
# if we construct these objects at runtime instead...
# runtime, for child processes to read
self._config = None
def run(self, fpath: Path) \
-> Union[Tuple[AnalysedDependent, Path], Tuple[EmptySourceFile, None], Tuple[Exception, None]]:
"""
Parse the source file and record what we're interested in (subclass specific).
Reloads previous analysis results if available.
Returns the analysis data and the result file where it was stored/loaded.
"""
# calculate the prebuild filename
file_hash = file_checksum(fpath).file_hash
analysis_fpath = self._get_analysis_fpath(fpath, file_hash)
# do we already have analysis results for this file?
if analysis_fpath.exists():
log_or_dot(logger, f"found analysis prebuild for {fpath}")
# Load the result file into whatever result class we use.
loaded_result = self.result_class.load(analysis_fpath)
if loaded_result:
# This result might have been created by another user; their prebuild folder copied to ours.
# If so, the fpath in the result will *not* point to the file we eventually want to compile,
# it will point to the user's original file, somewhere else. So replace it with our own path.
loaded_result.fpath = fpath
return loaded_result, analysis_fpath
log_or_dot(logger, f"analysing {fpath}")
# parse the file, get a node tree
node_tree = self._parse_file(fpath=fpath)
if isinstance(node_tree, Exception):
return Exception(f"error parsing file '{fpath}':\n{node_tree}"), None
if node_tree.content[0] is None:
logger.debug(f" empty tree found when parsing {fpath}")
# todo: If we don't save the empty result we'll keep analysing it every time!
return EmptySourceFile(fpath), None
# find things in the node tree
analysed_file = self.walk_nodes(fpath=fpath, file_hash=file_hash, node_tree=node_tree)
analysis_fpath = self._get_analysis_fpath(fpath, file_hash)
analysed_file.save(analysis_fpath)
return analysed_file, analysis_fpath
def _get_analysis_fpath(self, fpath, file_hash) -> Path:
return Path(self._config.prebuild_folder / f'{fpath.stem}.{file_hash}.an')
def _parse_file(self, fpath):
"""Get a node tree from a fortran file."""
reader = FortranFileReader(str(fpath), ignore_comments=False)
reader.exit_on_error = False # don't call sys.exit, it messes up the multi-processing
try:
tree = self.f2008_parser(reader)
return tree
except FortranSyntaxError as err:
# we can't return the FortranSyntaxError, it breaks multiprocessing!
logger.error(f"\nfparser raised a syntax error in {fpath}\n{err}")
return Exception(f"syntax error in {fpath}\n{err}")
except Exception as err:
logger.error(f"\nunhandled error '{type(err)}' in {fpath}\n{err}")
return Exception(f"unhandled error '{type(err)}' in {fpath}\n{err}")
@abstractmethod
def walk_nodes(self, fpath, file_hash, node_tree) -> AnalysedDependent:
"""
Examine the nodes in the parse tree, recording things we're interested in.
Return type depends on our subclass, and will be a subclass of AnalysedDependent.
"""
raise NotImplementedError | /sci_fab-1.0-py3-none-any.whl/fab/parse/fortran_common.py | 0.708011 | 0.220804 | fortran_common.py | pypi |
import json
import logging
from abc import ABC
from pathlib import Path
from typing import Union, Optional, Dict, Any, Set
from fab.util import file_checksum
logger = logging.getLogger(__name__)
class ParseException(Exception):
pass
class AnalysedFile(ABC):
"""
Analysis results for a single file. Abstract base class.
"""
def __init__(self, fpath: Union[str, Path], file_hash: Optional[int] = None):
"""
:param fpath:
The path of the file which was analysed.
:param file_hash:
The checksum of the file which was analysed.
If omitted, Fab will evaluate lazily.
If not provided, the `self.file_hash` property is lazily evaluated in case the file does not yet exist.
"""
self.fpath = Path(fpath)
self._file_hash = file_hash
@property
def file_hash(self):
if self._file_hash is None:
if not self.fpath.exists():
raise ValueError(f"analysed file '{self.fpath}' does not exist")
self._file_hash: int = file_checksum(self.fpath).file_hash
return self._file_hash
def __eq__(self, other):
# todo: better to use self.field_names() instead of vars(self) in order to evaluate any lazy attributes?
return vars(self) == vars(other)
# persistence
def to_dict(self) -> Dict[str, Any]:
"""
Create a dict representing the object.
The dict may be written to json, so can't contain sets.
Lists are sorted for reproducibility in testing.
"""
return {
"fpath": str(self.fpath),
"file_hash": self.file_hash
}
@classmethod
def from_dict(cls, d):
raise NotImplementedError
def save(self, fpath: Union[str, Path]):
# subclasses don't need to override this method
d = self.to_dict()
d["cls"] = self.__class__.__name__
json.dump(d, open(fpath, 'wt'), indent=4)
@classmethod
def load(cls, fpath: Union[str, Path]):
# subclasses don't need to override this method
d = json.load(open(fpath))
found_class = d["cls"]
if found_class != cls.__name__:
raise ValueError(f"Expected class name '{cls.__name__}', found '{found_class}'")
return cls.from_dict(d)
# human readability
@classmethod
def field_names(cls):
"""
Defines the order in which we want fields to appear in str or repr strings.
Calling this helps to ensure any lazy attributes are evaluated before use,
e.g when constructing a string representation of the instance, or generating a hash value.
"""
return ['fpath', 'file_hash']
def __str__(self):
# We use self.field_names() instead of vars(self) in order to evaluate any lazy attributes.
values = [getattr(self, field_name) for field_name in self.field_names()]
return f'{self.__class__.__name__} ' + ' '.join(map(str, values))
def __repr__(self):
params = ', '.join([f'{f}={repr(getattr(self, f))}' for f in self.field_names()])
return f'{self.__class__.__name__}({params})'
# We need to be hashable before we can go into a set, which is useful for our subclasses.
# Note, the numerical result will change with each Python invocation.
def __hash__(self):
# Build up a list of things to hash, from our attributes.
# We use self.field_names() rather than vars(self) because we want to evaluate any lazy attributes.
# We turn dicts and sets into sorted tuples for hashing.
# todo: There's a good reason dicts and sets aren't supposed to be hashable.
# Please see https://github.com/metomi/fab/issues/229
things = set()
for field_name in self.field_names():
thing = getattr(self, field_name)
if isinstance(thing, Dict):
things.add(tuple(sorted(thing.items())))
elif isinstance(thing, Set):
things.add(tuple(sorted(thing)))
else:
things.add(thing)
return hash(tuple(things))
# todo: There's a design weakness relating to this class:
# we don't save empty results, which means we'll keep reanalysing them.
# We should save empty files and allow the loading to detect this, as it already reads the class name.
class EmptySourceFile(AnalysedFile):
"""
An analysis result for a file which resulted in an empty parse tree.
"""
def __init__(self, fpath: Union[str, Path]):
"""
:param fpath:
The path of the file which was analysed.
"""
super().__init__(fpath=fpath)
@classmethod
def from_dict(cls, d):
# todo: load & save should be implemented here and used by the calling code, to save reanalysis.
raise NotImplementedError | /sci_fab-1.0-py3-none-any.whl/fab/parse/__init__.py | 0.696578 | 0.257552 | __init__.py | pypi |
import logging
from typing import Optional, Iterable
from fab.steps import step
from fab.util import file_walk
logger = logging.getLogger(__name__)
class _PathFilter(object):
# Simple pattern matching using string containment check.
# Deems an incoming path as included or excluded.
def __init__(self, *filter_strings: str, include: bool):
"""
:param filter_strings:
One or more strings to be used as pattern matches.
:param include:
Set to True or False to include or exclude matching paths.
"""
self.filter_strings: Iterable[str] = filter_strings
self.include = include
def check(self, path):
if any(str(i) in str(path) for i in self.filter_strings):
return self.include
return None
class Include(_PathFilter):
"""
A path filter which includes matching paths, this convenience class improves config readability.
"""
def __init__(self, *filter_strings):
"""
:param filter_strings:
One or more strings to be used as pattern matches.
"""
super().__init__(*filter_strings, include=True)
def __str__(self):
return f'Include({", ".join(self.filter_strings)})'
class Exclude(_PathFilter):
"""
A path filter which excludes matching paths, this convenience class improves config readability.
"""
def __init__(self, *filter_strings):
"""
:param filter_strings:
One or more strings to be used as pattern matches.
"""
super().__init__(*filter_strings, include=False)
def __str__(self):
return f'Exclude({", ".join(self.filter_strings)})'
@step
def find_source_files(config, source_root=None, output_collection="all_source",
path_filters: Optional[Iterable[_PathFilter]] = None):
"""
Find the files in the source folder, with filtering.
Files can be included or excluded with simple pattern matching.
Every file is included by default, unless the filters say otherwise.
Path filters are expected to be provided by the user in an *ordered* collection.
The two convenience subclasses, :class:`~fab.steps.walk_source.Include` and :class:`~fab.steps.walk_source.Exclude`,
improve readability.
Order matters. For example::
path_filters = [
Exclude('my_folder'),
Include('my_folder/my_file.F90'),
]
In the above example, swapping the order would stop the file being included in the build.
A path matches a filter string simply if it *contains* it,
so the path *my_folder/my_file.F90* would match filters "my_folder", "my_file" and "er/my".
:param config:
The :class:`fab.build_config.BuildConfig` object where we can read settings
such as the project workspace folder or the multiprocessing flag.
:param source_root:
Optional path to source folder, with a sensible default.
:param output_collection:
Name of artefact collection to create, with a sensible default.
:param path_filters:
Iterable of Include and/or Exclude objects, to be processed in order.
:param name:
Human friendly name for logger output, with sensible default.
"""
path_filters = path_filters or []
"""
Recursively get all files in the given folder, with filtering.
:param artefact_store:
Contains artefacts created by previous Steps, and where we add our new artefacts.
This is where the given :class:`~fab.artefacts.ArtefactsGetter` finds the artefacts to process.
:param config:
The :class:`fab.build_config.BuildConfig` object where we can read settings
such as the project workspace folder or the multiprocessing flag.
"""
source_root = source_root or config.source_root
# file filtering
filtered_fpaths = []
# todo: we shouldn't need to ignore the prebuild folder here, it's not underneath the source root.
for fpath in file_walk(source_root, ignore_folders=[config.prebuild_folder]):
wanted = True
for path_filter in path_filters:
# did this filter have anything to say about this file?
res = path_filter.check(fpath)
if res is not None:
wanted = res
if wanted:
filtered_fpaths.append(fpath)
else:
logger.debug(f"excluding {fpath}")
if not filtered_fpaths:
raise RuntimeError("no source files found after filtering")
config._artefact_store[output_collection] = filtered_fpaths | /sci_fab-1.0-py3-none-any.whl/fab/steps/find_source_files.py | 0.812049 | 0.40486 | find_source_files.py | pypi |
import logging
from string import Template
from typing import Optional
from fab.build_config import BuildConfig
from fab.constants import OBJECT_FILES, OBJECT_ARCHIVES
from fab.steps import step
from fab.util import log_or_dot
from fab.tools import run_command
from fab.artefacts import ArtefactsGetter, CollectionGetter
logger = logging.getLogger(__name__)
DEFAULT_SOURCE_GETTER = CollectionGetter(OBJECT_FILES)
# todo: two diagrams showing the flow of artefacts in the exe and library use cases
# show how the library has a single build target with None as the name.
# todo: all this documentation for such a simple step - should we split it up somehow?
@step
def archive_objects(config: BuildConfig, source: Optional[ArtefactsGetter] = None, archiver='ar',
output_fpath=None, output_collection=OBJECT_ARCHIVES):
"""
Create an object archive for every build target, from their object files.
An object archive is a set of object (*.o*) files bundled into a single file, typically with a *.a* extension.
Expects one or more build targets from its artefact getter, of the form Dict[name, object_files].
By default, it finds the build targets and their object files in the artefact collection named by
:py:const:`fab.constants.COMPILED_FILES`.
This step has three use cases:
* The **object archive** is the end goal of the build.
* The object archive is a convenience step before linking a **shared object**.
* One or more object archives as convenience steps before linking **executables**.
The benefit of creating an object archive before linking is simply to reduce the size
of the linker command, which might otherwise include thousands of .o files, making any error output
difficult to read. You don't have to use this step before linking.
The linker step has a default artefact getter which will work with or without this preceding step.
**Creating a Static or Shared Library:**
When building a library there is expected to be a single build target with a `None` name.
This typically happens when configuring the :class:`~fab.steps.analyser.Analyser` step *without* a root symbol.
We can assume the list of object files is the entire project source, compiled.
In this case you must specify an *output_fpath*.
**Creating Executables:**
When creating executables, there is expected to be one or more build targets, each with a name.
This typically happens when configuring the :class:`~fab.steps.analyser.Analyser` step *with* a root symbol(s).
We can assume each list of object files is sufficient to build each *<root_symbol>.exe*.
In this case you cannot specify an *output_fpath* path because they are automatically created from the
target name.
:param config:
The :class:`fab.build_config.BuildConfig` object where we can read settings
such as the project workspace folder or the multiprocessing flag.
:param source:
An :class:`~fab.artefacts.ArtefactsGetter` which give us our lists of objects to archive.
The artefacts are expected to be of the form `Dict[root_symbol_name, list_of_object_files]`.
:param archiver:
The archiver executable. Defaults to 'ar'.
:param output_fpath:
The file path of the archive file to create.
This string can include templating, where "$output" is replaced with the output folder.
* Must be specified when building a library file (no build target name).
* Must not be specified when building linker input (one or more build target names).
:param output_collection:
The name of the artefact collection to create. Defaults to the name in
:const:`fab.constants.OBJECT_ARCHIVES`.
"""
# todo: the output path should not be an abs fpath, it should be relative to the proj folder
source_getter = source or DEFAULT_SOURCE_GETTER
archiver = archiver
output_fpath = str(output_fpath) if output_fpath else None
output_collection = output_collection
target_objects = source_getter(config._artefact_store)
assert target_objects.keys()
if output_fpath and list(target_objects.keys()) != [None]:
raise ValueError("You must not specify an output path (library) when there are root symbols (exes)")
if not output_fpath and list(target_objects.keys()) == [None]:
raise ValueError("You must specify an output path when building a library.")
output_archives = config._artefact_store.setdefault(output_collection, {})
for root, objects in target_objects.items():
if root:
# we're building an object archive for an exe
output_fpath = str(config.build_output / f'{root}.a')
else:
# we're building a single object archive with a given filename
assert len(target_objects) == 1, "unexpected root of None with multiple build targets"
output_fpath = Template(str(output_fpath)).substitute(
output=config.build_output)
command = [archiver]
command.extend(['cr', output_fpath])
command.extend(map(str, sorted(objects)))
log_or_dot(logger, 'CreateObjectArchive running command: ' + ' '.join(command))
try:
run_command(command)
except Exception as err:
raise Exception(f"error creating object archive:\n{err}")
output_archives[root] = [output_fpath] | /sci_fab-1.0-py3-none-any.whl/fab/steps/archive_objects.py | 0.572723 | 0.47457 | archive_objects.py | pypi |
import logging
import os
from string import Template
from typing import Optional
from fab.constants import OBJECT_FILES, OBJECT_ARCHIVES, EXECUTABLES
from fab.steps import step
from fab.util import log_or_dot
from fab.tools import run_command
from fab.artefacts import ArtefactsGetter, CollectionGetter
logger = logging.getLogger(__name__)
class DefaultLinkerSource(ArtefactsGetter):
"""
A source getter specifically for linking.
Looks for the default output from archiving objects, falls back to default compiler output.
This allows a link step to work with or without a preceding object archive step.
"""
def __call__(self, artefact_store):
return CollectionGetter(OBJECT_ARCHIVES)(artefact_store) \
or CollectionGetter(OBJECT_FILES)(artefact_store)
def call_linker(linker, flags, filename, objects):
assert isinstance(linker, str)
command = linker.split()
command.extend(['-o', filename])
# todo: we need to be able to specify flags which appear before the object files
command.extend(map(str, sorted(objects)))
# note: this must this come after the list of object files?
command.extend(os.getenv('LDFLAGS', '').split())
command.extend(flags)
log_or_dot(logger, 'Link running command: ' + ' '.join(command))
try:
run_command(command)
except Exception as err:
raise Exception(f"error linking:\n{err}")
@step
def link_exe(config, linker: Optional[str] = None, flags=None, source: Optional[ArtefactsGetter] = None):
"""
Link object files into an executable for every build target.
Expects one or more build targets from its artefact getter, of the form Dict[name, object_files].
The default artefact getter, :py:const:`~fab.steps.link_exe.DefaultLinkerSource`, looks for any output
from an :class:`~fab.steps.archive_objects.ArchiveObjects` step, and falls back to using output from
compiler steps.
:param config:
The :class:`fab.build_config.BuildConfig` object where we can read settings
such as the project workspace folder or the multiprocessing flag.
:param linker:
E.g 'gcc' or 'ld'.
:param flags:
A list of flags to pass to the linker.
:param source:
An optional :class:`~fab.artefacts.ArtefactsGetter`.
Typically not required, as there is a sensible default.
"""
linker = linker or os.getenv('LD', 'ld')
logger.info(f'linker is {linker}')
flags = flags or []
source_getter = source or DefaultLinkerSource()
target_objects = source_getter(config._artefact_store)
for root, objects in target_objects.items():
exe_path = config.project_workspace / f'{root}.exe'
call_linker(linker=linker, flags=flags, filename=str(exe_path), objects=objects)
config._artefact_store.setdefault(EXECUTABLES, []).append(exe_path)
# todo: the bit about Dict[None, object_files] seems too obscure - try to rethink this.
@step
def link_shared_object(config, output_fpath: str, linker: Optional[str] = None, flags=None,
source: Optional[ArtefactsGetter] = None):
"""
Produce a shared object (*.so*) file from the given build target.
Expects a *single build target* from its artefact getter, of the form Dict[None, object_files].
We can assume the list of object files is the entire project source, compiled.
Params are as for :class:`~fab.steps.link_exe.LinkerBase`, with the addition of:
:param config:
The :class:`fab.build_config.BuildConfig` object where we can read settings
such as the project workspace folder or the multiprocessing flag.
:param output_fpath:
File path of the shared object to create.
:param linker:
E.g 'gcc' or 'ld'.
:param flags:
A list of flags to pass to the linker.
:param source:
An optional :class:`~fab.artefacts.ArtefactsGetter`.
Typically not required, as there is a sensible default.
"""
linker = linker or os.getenv('LD', 'ld')
logger.info(f'linker is {linker}')
flags = flags or []
source_getter = source or DefaultLinkerSource()
ensure_flags = ['-fPIC', '-shared']
for f in ensure_flags:
if f not in flags:
flags.append(f)
# We expect a single build target containing the whole codebase, with no name (as it's not a root symbol).
target_objects = source_getter(config._artefact_store)
assert list(target_objects.keys()) == [None]
objects = target_objects[None]
call_linker(
linker=linker, flags=flags,
filename=Template(output_fpath).substitute(output=config.build_output),
objects=objects) | /sci_fab-1.0-py3-none-any.whl/fab/steps/link.py | 0.623606 | 0.227308 | link.py | pypi |
import logging
import os
import shutil
from dataclasses import dataclass
from pathlib import Path
from typing import Collection, List, Optional, Tuple
from fab.build_config import BuildConfig, FlagsConfig
from fab.constants import PRAGMAD_C
from fab.metrics import send_metric
from fab.util import log_or_dot_finish, input_to_output_fpath, log_or_dot, suffix_filter, Timer, by_type
from fab.tools import get_tool, run_command
from fab.steps import check_for_errors, run_mp, step
from fab.artefacts import ArtefactsGetter, SuffixFilter, CollectionGetter
logger = logging.getLogger(__name__)
@dataclass
class MpCommonArgs(object):
"""Common args for calling process_artefact() using multiprocessing."""
config: BuildConfig
output_suffix: str
preprocessor: str
flags: FlagsConfig
name: str
def pre_processor(config: BuildConfig, preprocessor: str,
files: Collection[Path], output_collection, output_suffix,
common_flags: Optional[List[str]] = None,
path_flags: Optional[List] = None,
name="preprocess"):
"""
Preprocess Fortran or C files.
Uses multiprocessing, unless disabled in the config.
:param config:
The :class:`fab.build_config.BuildConfig` object where we can read settings
such as the project workspace folder or the multiprocessing flag.
:param preprocessor:
The preprocessor executable.
:param files:
The files to preprocess.
:param output_collection:
The name of the output artefact collection.
:param output_suffix:
Suffix for output files.
:param common_flags:
Used to construct a :class:`~fab.config.FlagsConfig` object.
:param path_flags:
Used to construct a :class:`~fab.build_config.FlagsConfig` object.
:param name:
Human friendly name for logger output, with sensible default.
"""
common_flags = common_flags or []
flags = FlagsConfig(common_flags=common_flags, path_flags=path_flags)
logger.info(f'preprocessor is {preprocessor}')
logger.info(f'preprocessing {len(files)} files')
# common args for the child process
mp_common_args = MpCommonArgs(
config=config,
output_suffix=output_suffix,
preprocessor=preprocessor,
flags=flags,
name=name,
)
# bundle files with common args
mp_args = [(file, mp_common_args) for file in files]
results = run_mp(config, items=mp_args, func=process_artefact)
check_for_errors(results, caller_label=name)
log_or_dot_finish(logger)
config._artefact_store[output_collection] = list(by_type(results, Path))
def process_artefact(arg: Tuple[Path, MpCommonArgs]):
"""
Expects an input file in the source folder.
Writes the output file to the output folder, with a lower case extension.
"""
fpath, args = arg
with Timer() as timer:
# output_fpath = input_to_output_fpath(config=self._config, input_path=fpath).with_suffix(self.output_suffix)
output_fpath = input_to_output_fpath(config=args.config, input_path=fpath).with_suffix(args.output_suffix)
# already preprocessed?
# todo: remove reuse_artefacts eveywhere!
if args.config.reuse_artefacts and output_fpath.exists():
log_or_dot(logger, f'Preprocessor skipping: {fpath}')
else:
output_fpath.parent.mkdir(parents=True, exist_ok=True)
command = [args.preprocessor]
command.extend(args.flags.flags_for_path(path=fpath, config=args.config))
command.append(str(fpath))
command.append(str(output_fpath))
log_or_dot(logger, 'PreProcessor running command: ' + ' '.join(command))
try:
run_command(command)
except Exception as err:
raise Exception(f"error preprocessing {fpath}:\n{err}")
send_metric(args.name, str(fpath), {'time_taken': timer.taken, 'start': timer.start})
return output_fpath
def get_fortran_preprocessor():
"""
Identify the fortran preprocessor and any flags from the environment.
Initially looks for the `FPP` environment variable, then tries to call the `fpp` and `cpp` command line tools.
Returns the executable and flags.
The returned flags will always include `-P` to suppress line numbers.
This fparser ticket requests line number handling https://github.com/stfc/fparser/issues/390 .
"""
fpp: Optional[str] = None
fpp_flags: Optional[List[str]] = None
try:
fpp, fpp_flags = get_tool(os.getenv('FPP'))
logger.info(f"The environment defined FPP as '{fpp}'")
except ValueError:
pass
if not fpp:
try:
run_command(['which', 'fpp'])
fpp, fpp_flags = 'fpp', ['-P']
logger.info('detected fpp')
except RuntimeError:
# fpp not available
pass
if not fpp:
try:
run_command(['which', 'cpp'])
fpp, fpp_flags = 'cpp', ['-traditional-cpp', '-P']
logger.info('detected cpp')
except RuntimeError:
# fpp not available
pass
if not fpp:
raise RuntimeError('no fortran preprocessor specified or discovered')
assert fpp_flags is not None
if '-P' not in fpp_flags:
fpp_flags.append('-P')
return fpp, fpp_flags
# todo: rename preprocess_fortran
@step
def preprocess_fortran(config: BuildConfig, source: Optional[ArtefactsGetter] = None, **kwargs):
"""
Wrapper to pre_processor for Fortran files.
Ensures all preprocessed files are in the build output.
This means *copying* already preprocessed files from source to build output.
Params as per :func:`~fab.steps.preprocess._pre_processor`.
The preprocessor is taken from the `FPP` environment, or falls back to `fpp -P`.
If source is not provided, it defaults to `SuffixFilter('all_source', '.F90')`.
"""
source_getter = source or SuffixFilter('all_source', ['.F90', '.f90'])
source_files = source_getter(config._artefact_store)
F90s = suffix_filter(source_files, '.F90')
f90s = suffix_filter(source_files, '.f90')
# get the tool from FPP
fpp, fpp_flags = get_fortran_preprocessor()
# make sure any flags from FPP are included in any common flags specified by the config
try:
common_flags = kwargs.pop('common_flags')
except KeyError:
common_flags = []
for fpp_flag in fpp_flags:
if fpp_flag not in common_flags:
common_flags.append(fpp_flag)
# preprocess big F90s
pre_processor(
config,
preprocessor=fpp,
common_flags=common_flags,
files=F90s,
output_collection='preprocessed_fortran', output_suffix='.f90',
name='preprocess fortran',
**kwargs,
)
# todo: parallel copy?
# copy little f90s from source to output folder
logger.info(f'Fortran preprocessor copying {len(f90s)} files to build_output')
for f90 in f90s:
output_path = input_to_output_fpath(config, input_path=f90)
if output_path != f90:
if not output_path.parent.exists():
output_path.parent.mkdir(parents=True)
log_or_dot(logger, f'copying {f90}')
shutil.copyfile(str(f90), str(output_path))
class DefaultCPreprocessorSource(ArtefactsGetter):
"""
A source getter specifically for c preprocessing.
Looks for the default output from pragma injection, falls back to default source finder.
This allows the step to work with or without a preceding pragma step.
"""
def __call__(self, artefact_store):
return CollectionGetter(PRAGMAD_C)(artefact_store) \
or SuffixFilter('all_source', '.c')(artefact_store)
# todo: rename preprocess_c
@step
def preprocess_c(config: BuildConfig, source=None, **kwargs):
"""
Wrapper to pre_processor for C files.
Params as per :func:`~fab.steps.preprocess._pre_processor`.
The preprocessor is taken from the `CPP` environment, or falls back to `cpp`.
If source is not provided, it defaults to :class:`~fab.steps.preprocess.DefaultCPreprocessorSource`.
"""
source_getter = source or DefaultCPreprocessorSource()
source_files = source_getter(config._artefact_store)
pre_processor(
config,
preprocessor=os.getenv('CPP', 'cpp'),
files=source_files,
output_collection='preprocessed_c', output_suffix='.c',
name='preprocess c',
**kwargs,
) | /sci_fab-1.0-py3-none-any.whl/fab/steps/preprocess.py | 0.727104 | 0.153549 | preprocess.py | pypi |
import re
from pathlib import Path
from typing import Pattern, Optional, Match
from fab import FabException
from fab.constants import PRAGMAD_C
from fab.steps import run_mp, step
from fab.artefacts import ArtefactsGetter, SuffixFilter
DEFAULT_SOURCE_GETTER = SuffixFilter('all_source', '.c')
# todo: test
@step
def c_pragma_injector(config, source: Optional[ArtefactsGetter] = None, output_name=None):
"""
A build step to inject custom pragmas to mark blocks of user and system include statements.
By default, reads .c files from the *all_source* artefact and creates the *pragmad_c* artefact.
This step does not write to the build output folder, it creates the pragmad c in the same folder as the c file.
This is because a subsequent preprocessing step needs to look in the source folder for header files,
including in paths relative to the c file.
:param config:
The :class:`fab.build_config.BuildConfig` object where we can read settings
such as the project workspace folder or the multiprocessing flag.
:param source:
An :class:`~fab.artefacts.ArtefactsGetter` which give us our c files to process.
:param output_name:
The name of the artefact collection to create in the artefact store, with a sensible default
"""
source_getter = source or DEFAULT_SOURCE_GETTER
output_name = output_name or PRAGMAD_C
files = source_getter(config._artefact_store)
results = run_mp(config, items=files, func=_process_artefact)
config._artefact_store[output_name] = list(results)
def _process_artefact(fpath: Path):
prag_output_fpath = fpath.with_suffix('.prag')
prag_output_fpath.open('w').writelines(inject_pragmas(fpath))
return prag_output_fpath
def inject_pragmas(fpath):
"""
Reads a C source file but when encountering an #include
preprocessor directive injects a special Fab-specific
#pragma which can be picked up later by the Analyser
after the preprocessing
"""
_include_re: str = r'^\s*#include\s+(\S+)'
_include_pattern: Pattern = re.compile(_include_re)
for line in open(fpath, 'rt', encoding='utf-8'):
include_match: Optional[Match] = _include_pattern.match(line)
if include_match:
# For valid C the first character of the matched
# part of the group will indicate whether this is
# a system library include or a user include
include: str = include_match.group(1)
if include.startswith('<'):
yield '#pragma FAB SysIncludeStart\n'
yield line
yield '#pragma FAB SysIncludeEnd\n'
elif include.startswith(('"', "'")):
yield '#pragma FAB UsrIncludeStart\n'
yield line
yield '#pragma FAB UsrIncludeEnd\n'
else:
msg = 'Found badly formatted #include'
raise FabException(msg)
else:
yield line | /sci_fab-1.0-py3-none-any.whl/fab/steps/c_pragma_injector.py | 0.468791 | 0.173831 | c_pragma_injector.py | pypi |
import multiprocessing
from fab.metrics import send_metric
from fab.util import by_type, TimerLogger
from functools import wraps
def step(func):
"""Function decorator for steps."""
@wraps(func)
def wrapper(*args, **kwargs):
name = func.__name__
# call the function
with TimerLogger(name) as step:
func(*args, **kwargs)
send_metric('steps', name, step.taken)
return wrapper
def run_mp(config, items, func, no_multiprocessing: bool = False):
"""
Called from Step.run() to process multiple items in parallel.
For example, a compile step would, in its run() method, find a list of source files in the artefact store.
It could then pass those paths to this method, along with a function to compile a *single* file.
The whole set of results are returned in a list-like, with undefined order.
:param items:
An iterable of items to process in parallel.
:param func:
A function to process a single item. Must accept a single argument.
:param no_multiprocessing:
Overrides the config's multiprocessing flag, disabling multiprocessing for this call.
"""
if config.multiprocessing and not no_multiprocessing:
with multiprocessing.Pool(config.n_procs) as p:
results = p.map(func, items)
else:
results = [func(f) for f in items]
return results
def run_mp_imap(config, items, func, result_handler):
"""
Like run_mp, but uses imap instead of map so that we can process each result as it happens.
This is useful for a slow operation where we want to save our progress as we go
instead of waiting for everything to finish, allowing us to pick up where we left off if the program is halted.
:param items:
An iterable of items to process in parallel.
:param func:
A function to process a single item. Must accept a single argument.
:param result_handler:
A function to handle a single result. Must accept a single argument.
"""
if config.multiprocessing:
with multiprocessing.Pool(config.n_procs) as p:
analysis_results = p.imap_unordered(func, items)
result_handler(analysis_results)
else:
analysis_results = (func(a) for a in items) # generator
result_handler(analysis_results)
def check_for_errors(results, caller_label=None):
"""
Check an iterable of results for any exceptions and handle them gracefully.
This is a helper function for steps which use multiprocessing,
getting multiple results back from :meth:`~fab.steps.Step.run_mp` all in one go.
:param results:
An iterable of results.
:param caller_label:
Optional human-friendly name of the caller for logging.
"""
caller_label = f'during {caller_label}' if caller_label else ''
exceptions = list(by_type(results, Exception))
if exceptions:
formatted_errors = "\n\n".join(map(str, exceptions))
raise RuntimeError(
f"{formatted_errors}\n\n{len(exceptions)} error(s) found {caller_label}"
) | /sci_fab-1.0-py3-none-any.whl/fab/steps/__init__.py | 0.809238 | 0.251441 | __init__.py | pypi |
import warnings
from pathlib import Path
from typing import Union
from fab.steps import step
from fab.tools import run_command
def current_commit(folder=None):
folder = folder or '.'
output = run_command(['git', 'log', '--oneline', '-n', '1'], cwd=folder)
commit = output.split()[0]
return commit
def tool_available() -> bool:
"""Is the command line git tool available?"""
try:
run_command(['git', 'help'])
except FileNotFoundError:
return False
return True
def is_working_copy(dst: Union[str, Path]) -> bool:
"""Is the given path is a working copy?"""
try:
run_command(['git', 'status'], cwd=dst)
except RuntimeError:
return False
return True
def fetch(src, revision, dst):
# todo: allow shallow fetch with --depth 1
command = ['git', 'fetch', src]
if revision:
command.append(revision)
run_command(command, cwd=str(dst))
# todo: allow cli args, e.g to set the depth
@step
def git_checkout(config, src: str, dst_label: str = '', revision=None):
"""
Checkout or update a Git repo.
"""
_dst = config.source_root / dst_label
# create folder?
if not _dst.exists():
_dst.mkdir(parents=True)
run_command(['git', 'init', '.'], cwd=_dst)
elif not is_working_copy(_dst): # type: ignore
raise ValueError(f"destination exists but is not a working copy: '{_dst}'")
fetch(src, revision, _dst)
run_command(['git', 'checkout', 'FETCH_HEAD'], cwd=_dst)
try:
_dst.relative_to(config.project_workspace)
run_command(['git', 'clean', '-f'], cwd=_dst)
except ValueError:
warnings.warn(f'not safe to clean git source in {_dst}')
@step
def git_merge(config, src: str, dst_label: str = '', revision=None):
"""
Merge a git repo into a local working copy.
"""
_dst = config.source_root / dst_label
if not _dst or not is_working_copy(_dst):
raise ValueError(f"destination is not a working copy: '{_dst}'")
fetch(src=src, revision=revision, dst=_dst)
try:
run_command(['git', 'merge', 'FETCH_HEAD'], cwd=_dst)
except RuntimeError as err:
run_command(['git', 'merge', '--abort'], cwd=_dst)
raise RuntimeError(f"Error merging {revision}. Merge aborted.\n{err}") | /sci_fab-1.0-py3-none-any.whl/fab/steps/grab/git.py | 0.465873 | 0.198181 | git.py | pypi |
import mimetypes
import os
import shutil
from typing import Optional
try:
import rarfile
except ImportError: # pragma: no cover
rarfile = None
class RARExtractionNotSupported(Exception):
pass
def _rar_extract(filename, extract_dir):
if rarfile is None:
raise RARExtractionNotSupported('RAR file extraction not supported, '
'please install \'rarfile\' package with your pip.')
with rarfile.RarFile(filename) as rf:
rf.extractall(path=extract_dir)
try:
import py7zr
except ImportError: # pragma: no cover
py7zr = None
class SevenZipExtractionNotSupported(Exception):
pass
def _7z_extract(filename, extract_dir):
if py7zr is None:
raise SevenZipExtractionNotSupported('7z file extraction not supported, '
'please install \'py7zr\' package with your pip.')
with py7zr.SevenZipFile(filename) as rf:
rf.extractall(path=extract_dir)
shutil.register_unpack_format('rar', ['.rar'], _rar_extract, [], 'WinRAR file')
shutil.register_unpack_format('7z', ['.7z'], _7z_extract, [], '7z file')
def unpack_archive(filename, dstpath, fmt: Optional[str] = None):
"""
Overview:
Extract from all kind of archive files, including ``.zip``, ``.tar``, ``.tar.gz``, ``.tar.xz``, ``.tar.bz2``, \
``.rar`` (requires ``rarfile`` package) and ``.7z`` (requires ``py7zr`` package``).
:param filename: Filename of the archive file.
:param dstpath: Destination path of the extracted file.
:param fmt: Format of the file, default is ``None`` which means the format will be auto-detected with ``filename``.
:return: Destination path of this extraction.
.. note::
Password is not supported at present.
"""
shutil.unpack_archive(filename, dstpath, fmt)
return dstpath
def get_archive_type(filename: str, content_type: Optional[str] = None) -> Optional[str]:
"""
Overview:
Get archive file type of the given ``filename`` and ``content_type``.
:param filename: Filename.
:param content_type: Content-Type information from remote.
:return: Archive format, can be used in :func:`shutils.unpack_archive` method.
"""
if content_type:
ext_guess = mimetypes.guess_extension(content_type)
if ext_guess:
for name, exts, _ in shutil.get_unpack_formats():
if ext_guess in exts:
return name
filename = os.path.normcase(filename)
for name, exts, _ in shutil.get_unpack_formats():
for ext in exts:
if filename.endswith(ext):
return name
return None | /sci_igm-0.0.2-py3-none-any.whl/igm/utils/archive.py | 0.601477 | 0.182826 | archive.py | pypi |
import hashlib
import sys
import logging
"""
``logging_filters``
-------------------
Python uses `filters`_ to add contextural information to its
:mod:`~python:logging` facility.
Filters defined below are attached to :data:`settings.LOGGING` and
also :class:`~.middleware.LogSetupMiddleware`.
.. _filters:
http://docs.python.org/2.6/library/logging.html#\
adding-contextual-information-to-your-logging-output
"""
class RequestFilter(object):
"""
Filter that adds information about a *request* to the logging record.
:param request:
:type request: :class:`~django.http.HttpRequest`
Extra information can be substituted in the formatter string:
``http_user_agent``
The user agent string, provided by the client.
``path_info``
The requested HTTP path.
``remote_addr``
The remote IP address.
``request_method``
The HTTP request method (*e.g.* GET, POST, PUT, DELETE, *etc.*)
``server_protocol``
The server protocol (*e.g.* HTTP, HTTPS, *etc.*)
``username``
The username for the logged-in user.
"""
def __init__(self, request=None):
"""Saves *request* (a WSGIRequest object) for later."""
self.request = request
def filter(self, record):
"""
Adds information from the request to the logging *record*.
If certain information cannot be extracted from ``self.request``,
a hyphen ``'-'`` is substituted as a placeholder.
"""
request = self.request
# Basic
record.request_method = getattr(request, 'method', '-')
record.path_info = getattr(request, 'path_info', '-')
# User
user = getattr(request, 'user', None)
if user and not user.is_anonymous():
# Hash it
record.username = hashlib.sha1(user.username.encode()).hexdigest()[:8]
record.userid = str(user.id)
else:
record.username = '---'
record.userid = '-'
# Headers
META = getattr(request, 'META', {})
record.remote_addr = META.get('REMOTE_ADDR', '-')
record.server_protocol = META.get('SERVER_PROTOCOL', '-')
record.http_user_agent = META.get('HTTP_USER_AGENT', '-')
return True
import weakref
weakref_type = type(weakref.ref(lambda: None))
def deref(x):
return x() if x and type(x) == weakref_type else x
class LogSetupMiddleware(object):
"""
Adds :class:`.logging_filters.RequestFilter` to every request.
If *root* is a module name, only look at loggers inside that
logging subtree.
This filter adds useful information about `HttpRequest`\ s to log
entries. See :class:`.logging_filters.RequestFilter` for details
about which formatter substitutions are added.
Automatically detects which handlers and logger need
RequestFilter installed, by looking for an unbound RequestFilter
attached to a handler or logger. To configure Django, in your
:envvar:`DJANGO_SETTINGS_MODULE`::
LOGGING = {
'filters': {
# Add an unbound RequestFilter.
'request': {
'()': 'django_requestlogging.logging_filters.RequestFilter',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'filters': ['request'],
},
},
'loggers': {
'myapp': {
# Add your handlers that have the unbound request filter
'handlers': ['console'],
# Optionally, add the unbound request filter to your
# application.
'filters': ['request'],
},
},
}
"""
FILTER = RequestFilter
def __init__(self, root=''):
self.root = root
def find_loggers(self):
"""
Returns a :class:`dict` of names and the associated loggers.
"""
# Extract the full logger tree from Logger.manager.loggerDict
# that are under ``self.root``.
result = {}
prefix = self.root + '.'
for name, logger in logging.Logger.manager.loggerDict.items():
if self.root and not name.startswith(prefix):
# Does not fall under self.root
continue
result[name] = logger
# Add the self.root logger
result[self.root] = logging.getLogger(self.root)
return result
def find_handlers(self):
"""
Returns a list of handlers.
"""
return list(logging._handlerList)
def _find_filterer_with_filter(self, filterers, filter_cls):
"""
Returns a :class:`dict` of filterers mapped to a list of filters.
*filterers* should be a list of filterers.
*filter_cls* should be a logging filter that should be matched.
"""
result = {}
for logger in map(deref, filterers):
filters = [f for f in map(deref, getattr(logger, 'filters', []))
if isinstance(f, filter_cls)]
if filters:
result[logger] = filters
return result
def find_loggers_with_filter(self, filter_cls):
"""
Returns a :class:`dict` of loggers mapped to a list of filters.
Looks for instances of *filter_cls* attached to each logger.
If the logger has at least one, it is included in the result.
"""
return self._find_filterer_with_filter(self.find_loggers().values(),
filter_cls)
def find_handlers_with_filter(self, filter_cls):
"""
Returns a :class:`dict` of handlers mapped to a list of filters.
Looks for instances of *filter_cls* attached to each handler.
If the handler has at least one, it is included in the result.
"""
return self._find_filterer_with_filter(self.find_handlers(),
filter_cls)
def add_filter(self, f, filter_cls=None):
"""Add filter *f* to any loggers that have *filter_cls* filters."""
if filter_cls is None:
filter_cls = type(f)
for logger in self.find_loggers_with_filter(filter_cls):
logger.addFilter(f)
for handler in self.find_handlers_with_filter(filter_cls):
handler.addFilter(f)
def remove_filter(self, f):
"""Remove filter *f* from all loggers."""
for logger in self.find_loggers_with_filter(type(f)):
logger.removeFilter(f)
for handler in self.find_handlers_with_filter(type(f)):
handler.removeFilter(f)
def process_request(self, request):
"""Adds a filter, bound to *request*, to the appropriate loggers."""
request.logging_filter = RequestFilter(request)
self.add_filter(request.logging_filter)
def process_response(self, request, response):
"""Removes this *request*'s filter from all loggers."""
f = getattr(request, 'logging_filter', None)
if f:
self.remove_filter(f)
return response
def process_exception(self, request, exception):
"""Removes this *request*'s filter from all loggers."""
f = getattr(request, 'logging_filter', None)
if f:
self.remove_filter(f)
class LoggingConfiguration(object):
def __init__(self, project='NONE'):
self.django_log_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {
'format': '[DJANGO] - [' + project + '] - [%(asctime)s][%(levelname)s][%(name)s.%(funcName)s:%(lineno)d]'
'[%(username)s][%(userid)s] - %(message)s',
},
},
'filters': {
# Add an unbound RequestFilter.
'request': {
'()': 'scilogging.logging.RequestFilter',
},
},
'handlers': {
'sentry': {
'level': 'ERROR', # To capture more than ERROR, change to WARNING, INFO, etc.
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
'tags': {'custom-tag': 'x'},
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'console',
'stream': sys.stdout,
'filters': ['request'],
},
},
'root': {
'handlers': ['console'],
'level': 'DEBUG',
'filters': ['request'],
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'WARNING',
'propagate': True,
},
'raven': {
'level': 'WARNING',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'WARNING',
'handlers': ['console'],
'propagate': False,
},
},
} | /sci-logging-0.2.tar.gz/sci-logging-0.2/scilogging/logging.py | 0.709724 | 0.268462 | logging.py | pypi |
"Utility functions for handling buffers"
import sys as _sys
import numpy as _numpy
def _ord(byte):
r"""Convert a byte to an integer.
>>> buffer = b'\x00\x01\x02'
>>> [_ord(b) for b in buffer]
[0, 1, 2]
"""
if _sys.version_info >= (3,):
return byte
else:
return ord(byte)
def hex_bytes(buffer, spaces=None):
r"""Pretty-printing for binary buffers.
>>> hex_bytes(b'\x00\x01\x02\x03\x04')
'0001020304'
>>> hex_bytes(b'\x00\x01\x02\x03\x04', spaces=1)
'00 01 02 03 04'
>>> hex_bytes(b'\x00\x01\x02\x03\x04', spaces=2)
'0001 0203 04'
>>> hex_bytes(b'\x00\x01\x02\x03\x04\x05\x06', spaces=2)
'0001 0203 0405 06'
>>> hex_bytes(b'\x00\x01\x02\x03\x04\x05\x06', spaces=3)
'000102 030405 06'
"""
hex_bytes = ['{:02x}'.format(_ord(x)) for x in buffer]
if spaces is None:
return ''.join(hex_bytes)
elif spaces is 1:
return ' '.join(hex_bytes)
for i in range(len(hex_bytes)//spaces):
hex_bytes.insert((spaces+1)*(i+1)-1, ' ')
return ''.join(hex_bytes)
def assert_null(buffer, strict=True):
r"""Ensure an input buffer is entirely zero.
>>> import sys
>>> assert_null(b'')
>>> assert_null(b'\x00\x00')
>>> assert_null(b'\x00\x01\x02\x03')
Traceback (most recent call last):
...
ValueError: 00 01 02 03
>>> stderr = sys.stderr
>>> sys.stderr = sys.stdout
>>> assert_null(b'\x00\x01\x02\x03', strict=False)
warning: post-data padding not zero: 00 01 02 03
>>> sys.stderr = stderr
"""
if buffer and _ord(max(buffer)) != 0:
hex_string = hex_bytes(buffer, spaces=1)
if strict:
raise ValueError(hex_string)
else:
_sys.stderr.write(
'warning: post-data padding not zero: {}\n'.format(hex_string))
# From ReadWave.c
def byte_order(needToReorderBytes):
little_endian = _sys.byteorder == 'little'
if needToReorderBytes:
little_endian = not little_endian
if little_endian:
return '<' # little-endian
return '>' # big-endian
# From ReadWave.c
def need_to_reorder_bytes(version):
# If the low order byte of the version field of the BinHeader
# structure is zero then the file is from a platform that uses
# different byte-ordering and therefore all data will need to be
# reordered.
return version & 0xFF == 0
# From ReadWave.c
def checksum(buffer, byte_order, oldcksum, numbytes):
x = _numpy.ndarray(
(numbytes/2,), # 2 bytes to a short -- ignore trailing odd byte
dtype=_numpy.dtype(byte_order+'h'),
buffer=buffer)
oldcksum += x.sum()
if oldcksum > 2**31: # fake the C implementation's int rollover
oldcksum %= 2**32
if oldcksum > 2**31:
oldcksum -= 2**31
return oldcksum & 0xffff
def _bytes(obj, encoding='utf-8'):
"""Convert bytes or strings into bytes
>>> _bytes(b'123')
'123'
>>> _bytes('123')
'123'
"""
if _sys.version_info >= (3,):
if isinstance(obj, bytes):
return obj
else:
return bytes(obj, encoding)
else:
return bytes(obj) | /sci_memex-0.0.3-py3-none-any.whl/memex/translators/igor/util.py | 0.494629 | 0.427337 | util.py | pypi |
"Read IGOR Binary Wave files into Numpy arrays."
# Based on WaveMetric's Technical Note 003, "Igor Binary Format"
# ftp://ftp.wavemetrics.net/IgorPro/Technical_Notes/TN003.zip
# From ftp://ftp.wavemetrics.net/IgorPro/Technical_Notes/TN000.txt
# We place no restrictions on copying Technical Notes, with the
# exception that you cannot resell them. So read, enjoy, and
# share. We hope IGOR Technical Notes will provide you with lots of
# valuable information while you are developing IGOR applications.
from __future__ import absolute_import
import array as _array
import struct as _struct
import sys as _sys
import types as _types
import numpy as np
from . import LOG as _LOG
from .struct import Structure as _Structure
from .struct import DynamicStructure as _DynamicStructure
from .struct import Field as _Field
from .struct import DynamicField as _DynamicField
from .util import assert_null as _assert_null
from .util import byte_order as _byte_order
from .util import need_to_reorder_bytes as _need_to_reorder_bytes
from .util import checksum as _checksum
# Numpy doesn't support complex integers by default, see
# http://mail.python.org/pipermail/python-dev/2002-April/022408.html
# http://mail.scipy.org/pipermail/numpy-discussion/2007-October/029447.html
# So we roll our own types. See
# http://docs.scipy.org/doc/numpy/user/basics.rec.html
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.dtype.html
complexInt8 = np.dtype([('real', np.int8), ('imag', np.int8)])
complexInt16 = np.dtype([('real', np.int16), ('imag', np.int16)])
complexInt32 = np.dtype([('real', np.int32), ('imag', np.int32)])
complexUInt8 = np.dtype([('real', np.uint8), ('imag', np.uint8)])
complexUInt16 = np.dtype(
[('real', np.uint16), ('imag', np.uint16)])
complexUInt32 = np.dtype(
[('real', np.uint32), ('imag', np.uint32)])
class StaticStringField (_DynamicField):
_null_terminated = False
_array_size_field = None
def __init__(self, *args, **kwargs):
if 'array' not in kwargs:
kwargs['array'] = True
super(StaticStringField, self).__init__(*args, **kwargs)
def post_unpack(self, parents, data):
wave_structure = parents[-1]
wave_data = self._get_structure_data(parents, data, wave_structure)
d = self._normalize_string(wave_data[self.name])
wave_data[self.name] = d
def _normalize_string(self, d):
if isinstance(d, bytes):
pass
elif hasattr(d, 'tobytes'):
d = d.tobytes()
elif hasattr(d, 'tostring'): # Python 2 compatibility
d = d.tostring()
else:
d = b''.join(d)
if self._array_size_field:
start = 0
strings = []
for count in self.counts:
end = start + count
if end > start:
strings.append(d[start:end])
if self._null_terminated:
strings[-1] = strings[-1].split(b'\x00', 1)[0]
start = end
elif self._null_terminated:
d = d.split(b'\x00', 1)[0]
return d
class NullStaticStringField (StaticStringField):
_null_terminated = True
# Begin IGOR constants and typedefs from IgorBin.h
# From IgorMath.h
TYPE_TABLE = { # (key: integer flag, value: numpy dtype)
0:None, # Text wave, not handled in ReadWave.c
1:complex, # NT_CMPLX, makes number complex.
2:np.float32, # NT_FP32, 32 bit fp numbers.
3:np.complex64,
4:np.float64, # NT_FP64, 64 bit fp numbers.
5:np.complex128,
8:np.int8, # NT_I8, 8 bit signed integer. Requires Igor Pro
# 2.0 or later.
9:complexInt8,
0x10:np.int16,# NT_I16, 16 bit integer numbers. Requires Igor
# Pro 2.0 or later.
0x11:complexInt16,
0x20:np.int32,# NT_I32, 32 bit integer numbers. Requires Igor
# Pro 2.0 or later.
0x21:complexInt32,
# 0x40:None, # NT_UNSIGNED, Makes above signed integers
# # unsigned. Requires Igor Pro 3.0 or later.
0x48:np.uint8,
0x49:complexUInt8,
0x50:np.uint16,
0x51:complexUInt16,
0x60:np.uint32,
0x61:complexUInt32,
}
# From wave.h
MAXDIMS = 4
# From binary.h
BinHeader1 = _Structure( # `version` field pulled out into Wave
name='BinHeader1',
fields=[
_Field('l', 'wfmSize', help='The size of the WaveHeader2 data structure plus the wave data plus 16 bytes of padding.'),
_Field('h', 'checksum', help='Checksum over this header and the wave header.'),
])
BinHeader2 = _Structure( # `version` field pulled out into Wave
name='BinHeader2',
fields=[
_Field('l', 'wfmSize', help='The size of the WaveHeader2 data structure plus the wave data plus 16 bytes of padding.'),
_Field('l', 'noteSize', help='The size of the note text.'),
_Field('l', 'pictSize', default=0, help='Reserved. Write zero. Ignore on read.'),
_Field('h', 'checksum', help='Checksum over this header and the wave header.'),
])
BinHeader3 = _Structure( # `version` field pulled out into Wave
name='BinHeader3',
fields=[
_Field('l', 'wfmSize', help='The size of the WaveHeader2 data structure plus the wave data plus 16 bytes of padding.'),
_Field('l', 'noteSize', help='The size of the note text.'),
_Field('l', 'formulaSize', help='The size of the dependency formula, if any.'),
_Field('l', 'pictSize', default=0, help='Reserved. Write zero. Ignore on read.'),
_Field('h', 'checksum', help='Checksum over this header and the wave header.'),
])
BinHeader5 = _Structure( # `version` field pulled out into Wave
name='BinHeader5',
fields=[
_Field('h', 'checksum', help='Checksum over this header and the wave header.'),
_Field('l', 'wfmSize', help='The size of the WaveHeader5 data structure plus the wave data.'),
_Field('l', 'formulaSize', help='The size of the dependency formula, if any.'),
_Field('l', 'noteSize', help='The size of the note text.'),
_Field('l', 'dataEUnitsSize', help='The size of optional extended data units.'),
_Field('l', 'dimEUnitsSize', help='The size of optional extended dimension units.', count=MAXDIMS, array=True),
_Field('l', 'dimLabelsSize', help='The size of optional dimension labels.', count=MAXDIMS, array=True),
_Field('l', 'sIndicesSize', help='The size of string indicies if this is a text wave.'),
_Field('l', 'optionsSize1', default=0, help='Reserved. Write zero. Ignore on read.'),
_Field('l', 'optionsSize2', default=0, help='Reserved. Write zero. Ignore on read.'),
])
# From wave.h
MAX_WAVE_NAME2 = 18 # Maximum length of wave name in version 1 and 2
# files. Does not include the trailing null.
MAX_WAVE_NAME5 = 31 # Maximum length of wave name in version 5
# files. Does not include the trailing null.
MAX_UNIT_CHARS = 3
# Header to an array of waveform data.
# `wData` field pulled out into DynamicWaveDataField1
WaveHeader2 = _DynamicStructure(
name='WaveHeader2',
fields=[
_Field('h', 'type', help='See types (e.g. NT_FP64) above. Zero for text waves.'),
_Field('P', 'next', default=0, help='Used in memory only. Write zero. Ignore on read.'),
NullStaticStringField('c', 'bname', help='Name of wave plus trailing null.', count=MAX_WAVE_NAME2+2),
_Field('h', 'whVersion', default=0, help='Write 0. Ignore on read.'),
_Field('h', 'srcFldr', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('P', 'fileName', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('c', 'dataUnits', default=0, help='Natural data units go here - null if none.', count=MAX_UNIT_CHARS+1, array=True),
_Field('c', 'xUnits', default=0, help='Natural x-axis units go here - null if none.', count=MAX_UNIT_CHARS+1, array=True),
_Field('l', 'npnts', help='Number of data points in wave.'),
_Field('h', 'aModified', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('d', 'hsA', help='X value for point p = hsA*p + hsB'),
_Field('d', 'hsB', help='X value for point p = hsA*p + hsB'),
_Field('h', 'wModified', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('h', 'swModified', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('h', 'fsValid', help='True if full scale values have meaning.'),
_Field('d', 'topFullScale', help='The min full scale value for wave.'), # sic, 'min' should probably be 'max'
_Field('d', 'botFullScale', help='The min full scale value for wave.'),
_Field('c', 'useBits', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('c', 'kindBits', default=0, help='Reserved. Write zero. Ignore on read.'),
_Field('P', 'formula', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('l', 'depID', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('L', 'creationDate', help='DateTime of creation. Not used in version 1 files.'),
_Field('c', 'wUnused', default=0, help='Reserved. Write zero. Ignore on read.', count=2, array=True),
_Field('L', 'modDate', help='DateTime of last modification.'),
_Field('P', 'waveNoteH', help='Used in memory only. Write zero. Ignore on read.'),
])
# `sIndices` pointer unset (use Wave5_data['sIndices'] instead). This
# field is filled in by DynamicStringIndicesDataField.
# `wData` field pulled out into DynamicWaveDataField5
WaveHeader5 = _DynamicStructure(
name='WaveHeader5',
fields=[
_Field('P', 'next', help='link to next wave in linked list.'),
_Field('L', 'creationDate', help='DateTime of creation.'),
_Field('L', 'modDate', help='DateTime of last modification.'),
_Field('l', 'npnts', help='Total number of points (multiply dimensions up to first zero).'),
_Field('h', 'type', help='See types (e.g. NT_FP64) above. Zero for text waves.'),
_Field('h', 'dLock', default=0, help='Reserved. Write zero. Ignore on read.'),
_Field('c', 'whpad1', default=0, help='Reserved. Write zero. Ignore on read.', count=6, array=True),
_Field('h', 'whVersion', default=1, help='Write 1. Ignore on read.'),
NullStaticStringField('c', 'bname', help='Name of wave plus trailing null.', count=MAX_WAVE_NAME5+1),
_Field('l', 'whpad2', default=0, help='Reserved. Write zero. Ignore on read.'),
_Field('P', 'dFolder', default=0, help='Used in memory only. Write zero. Ignore on read.'),
# Dimensioning info. [0] == rows, [1] == cols etc
_Field('l', 'nDim', help='Number of of items in a dimension -- 0 means no data.', count=MAXDIMS, array=True),
_Field('d', 'sfA', help='Index value for element e of dimension d = sfA[d]*e + sfB[d].', count=MAXDIMS, array=True),
_Field('d', 'sfB', help='Index value for element e of dimension d = sfA[d]*e + sfB[d].', count=MAXDIMS, array=True),
# SI units
_Field('c', 'dataUnits', default=0, help='Natural data units go here - null if none.', count=MAX_UNIT_CHARS+1, array=True),
_Field('c', 'dimUnits', default=0, help='Natural dimension units go here - null if none.', count=(MAXDIMS, MAX_UNIT_CHARS+1), array=True),
_Field('h', 'fsValid', help='TRUE if full scale values have meaning.'),
_Field('h', 'whpad3', default=0, help='Reserved. Write zero. Ignore on read.'),
_Field('d', 'topFullScale', help='The max and max full scale value for wave'), # sic, probably "max and min"
_Field('d', 'botFullScale', help='The max and max full scale value for wave.'), # sic, probably "max and min"
_Field('P', 'dataEUnits', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('P', 'dimEUnits', default=0, help='Used in memory only. Write zero. Ignore on read.', count=MAXDIMS, array=True),
_Field('P', 'dimLabels', default=0, help='Used in memory only. Write zero. Ignore on read.', count=MAXDIMS, array=True),
_Field('P', 'waveNoteH', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('l', 'whUnused', default=0, help='Reserved. Write zero. Ignore on read.', count=16, array=True),
# The following stuff is considered private to Igor.
_Field('h', 'aModified', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('h', 'wModified', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('h', 'swModified', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('c', 'useBits', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('c', 'kindBits', default=0, help='Reserved. Write zero. Ignore on read.'),
_Field('P', 'formula', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('l', 'depID', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('h', 'whpad4', default=0, help='Reserved. Write zero. Ignore on read.'),
_Field('h', 'srcFldr', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('P', 'fileName', default=0, help='Used in memory only. Write zero. Ignore on read.'),
_Field('P', 'sIndices', default=0, help='Used in memory only. Write zero. Ignore on read.'),
])
class DynamicWaveDataField1 (_DynamicField):
def pre_pack(self, parents, data):
raise NotImplementedError()
def pre_unpack(self, parents, data):
full_structure = parents[0]
wave_structure = parents[-1]
wave_header_structure = wave_structure.fields[1].format
wave_data = self._get_structure_data(parents, data, wave_structure)
version = data['version']
bin_header = wave_data['bin_header']
wave_header = wave_data['wave_header']
self.count = wave_header['npnts']
self.data_size = self._get_size(bin_header, wave_header_structure.size)
type_ = TYPE_TABLE.get(wave_header['type'], None)
if type_:
self.shape = self._get_shape(bin_header, wave_header)
else: # text wave
type_ = np.dtype('S1')
self.shape = (self.data_size,)
# dtype() wrapping to avoid numpy.generic and
# getset_descriptor issues with the builtin numpy types
# (e.g. int32). It has no effect on our local complex
# integers.
self.dtype = np.dtype(type_).newbyteorder(
wave_structure.byte_order)
if (version == 3 and
self.count > 0 and
bin_header['formulaSize'] > 0 and
self.data_size == 0):
"""From TN003:
Igor Pro 2.00 included support for dependency formulae. If
a wave was governed by a dependency formula then the
actual wave data was not written to disk for that wave,
because on loading the wave Igor could recalculate the
data. However,this prevented the wave from being loaded
into an experiment other than the original
experiment. Consequently, in a version of Igor Pro 3.0x,
we changed it so that the wave data was written even if
the wave was governed by a dependency formula. When
reading a binary wave file, you can detect that the wave
file does not contain the wave data by examining the
wfmSize, formulaSize and npnts fields. If npnts is greater
than zero and formulaSize is greater than zero and
the waveDataSize as calculated above is zero, then this is
a file governed by a dependency formula that was written
without the actual wave data.
"""
self.shape = (0,)
elif TYPE_TABLE.get(wave_header['type'], None) is not None:
assert self.data_size == self.count * self.dtype.itemsize, (
self.data_size, self.count, self.dtype.itemsize, self.dtype)
else:
assert self.data_size >= 0, (
bin_header['wfmSize'], wave_header_structure.size)
def _get_size(self, bin_header, wave_header_size):
return bin_header['wfmSize'] - wave_header_size - 16
def _get_shape(self, bin_header, wave_header):
return (self.count,)
def unpack(self, stream):
data_b = stream.read(self.data_size)
try:
data = np.ndarray(
shape=self.shape,
dtype=self.dtype,
buffer=data_b,
order='F',
)
except:
_LOG.error(
'could not reshape data from {} to {}'.format(
self.shape, data_b))
raise
return data
class DynamicWaveDataField5 (DynamicWaveDataField1):
"Adds support for multidimensional data."
def _get_size(self, bin_header, wave_header_size):
return bin_header['wfmSize'] - wave_header_size
def _get_shape(self, bin_header, wave_header):
return [n for n in wave_header['nDim'] if n > 0] or (0,)
# End IGOR constants and typedefs from IgorBin.h
class DynamicStringField (StaticStringField):
_size_field = None
def pre_unpack(self, parents, data):
size = self._get_size_data(parents, data)
if self._array_size_field:
self.counts = size
self.count = sum(self.counts)
else:
self.count = size
self.setup()
def _get_size_data(self, parents, data):
wave_structure = parents[-1]
wave_data = self._get_structure_data(parents, data, wave_structure)
bin_header = wave_data['bin_header']
return bin_header[self._size_field]
class DynamicWaveNoteField (DynamicStringField):
_size_field = 'noteSize'
class DynamicDependencyFormulaField (DynamicStringField):
"""Optional wave dependency formula
Excerpted from TN003:
A wave has a dependency formula if it has been bound by a
statement such as "wave0 := sin(x)". In this example, the
dependency formula is "sin(x)". The formula is stored with
no trailing null byte.
"""
_size_field = 'formulaSize'
# Except when it is stored with a trailing null byte :p. See, for
# example, test/data/mac-version3Dependent.ibw.
_null_terminated = True
class DynamicDataUnitsField (DynamicStringField):
"""Optional extended data units data
Excerpted from TN003:
dataUnits - Present in versions 1, 2, 3, 5. The dataUnits field
stores the units for the data represented by the wave. It is a C
string terminated with a null character. This field supports
units of 0 to 3 bytes. In version 1, 2 and 3 files, longer units
can not be represented. In version 5 files, longer units can be
stored using the optional extended data units section of the
file.
"""
_size_field = 'dataEUnitsSize'
class DynamicDimensionUnitsField (DynamicStringField):
"""Optional extended dimension units data
Excerpted from TN003:
xUnits - Present in versions 1, 2, 3. The xUnits field stores the
X units for a wave. It is a C string terminated with a null
character. This field supports units of 0 to 3 bytes. In
version 1, 2 and 3 files, longer units can not be represented.
dimUnits - Present in version 5 only. This field is an array of 4
strings, one for each possible wave dimension. Each string
supports units of 0 to 3 bytes. Longer units can be stored using
the optional extended dimension units section of the file.
"""
_size_field = 'dimEUnitsSize'
_array_size_field = True
class DynamicLabelsField (DynamicStringField):
"""Optional dimension label data
From TN003:
If the wave has dimension labels for dimension d then the
dimLabelsSize[d] field of the BinHeader5 structure will be
non-zero.
A wave will have dimension labels if a SetDimLabel command has
been executed on it.
A 3 point 1D wave has 4 dimension labels. The first dimension
label is the label for the dimension as a whole. The next three
dimension labels are the labels for rows 0, 1, and 2. When Igor
writes dimension labels to disk, it writes each dimension label as
a C string (null-terminated) in a field of 32 bytes.
"""
_size_field = 'dimLabelsSize'
_array_size_field = True
def post_unpack(self, parents, data):
wave_structure = parents[-1]
wave_data = self._get_structure_data(parents, data, wave_structure)
bin_header = wave_data['bin_header']
d = wave_data[self.name]
dim_labels = []
start = 0
for size in bin_header[self._size_field]:
end = start + size
if end > start:
dim_data = d[start:end]
chunks = []
for i in range(size//32):
chunks.append(dim_data[32*i:32*(i+1)])
labels = [b'']
for chunk in chunks:
labels[-1] = labels[-1] + b''.join(chunk)
if b'\x00' in chunk:
labels.append(b'')
labels.pop(-1)
start = end
else:
labels = []
dim_labels.append(labels)
wave_data[self.name] = dim_labels
class DynamicStringIndicesDataField (_DynamicField):
"""String indices used for text waves only
"""
def pre_pack(self, parents, data):
raise NotImplementedError()
def pre_unpack(self, parents, data):
wave_structure = parents[-1]
wave_data = self._get_structure_data(parents, data, wave_structure)
bin_header = wave_data['bin_header']
wave_header = wave_data['wave_header']
self.string_indices_size = bin_header['sIndicesSize']
self.count = self.string_indices_size // 4
if self.count: # make sure we're in a text wave
assert TYPE_TABLE[wave_header['type']] is None, wave_header
self.setup()
def post_unpack(self, parents, data):
if not self.count:
return
wave_structure = parents[-1]
wave_data = self._get_structure_data(parents, data, wave_structure)
wave_header = wave_data['wave_header']
wdata = wave_data['wData']
strings = []
start = 0
for i,offset in enumerate(wave_data['sIndices']):
if offset > start:
chars = wdata[start:offset]
strings.append(b''.join(chars))
start = offset
elif offset == start:
strings.append(b'')
else:
raise ValueError((offset, wave_data['sIndices']))
wdata = np.array(strings)
shape = [n for n in wave_header['nDim'] if n > 0] or (0,)
try:
wdata = wdata.reshape(shape)
except ValueError:
_LOG.error(
'could not reshape strings from {} to {}'.format(
shape, wdata.shape))
raise
wave_data['wData'] = wdata
class DynamicVersionField (_DynamicField):
def pre_pack(self, parents, byte_order):
raise NotImplementedError()
def post_unpack(self, parents, data):
wave_structure = parents[-1]
wave_data = self._get_structure_data(parents, data, wave_structure)
version = wave_data['version']
if wave_structure.byte_order in '@=':
need_to_reorder_bytes = _need_to_reorder_bytes(version)
wave_structure.byte_order = _byte_order(need_to_reorder_bytes)
_LOG.debug(
'get byte order from version: {} (reorder? {})'.format(
wave_structure.byte_order, need_to_reorder_bytes))
else:
need_to_reorder_bytes = False
old_format = wave_structure.fields[-1].format
if version == 1:
wave_structure.fields[-1].format = Wave1
elif version == 2:
wave_structure.fields[-1].format = Wave2
elif version == 3:
wave_structure.fields[-1].format = Wave3
elif version == 5:
wave_structure.fields[-1].format = Wave5
elif not need_to_reorder_bytes:
raise ValueError(
'invalid binary wave version: {}'.format(version))
if wave_structure.fields[-1].format != old_format:
_LOG.debug('change wave headers from {} to {}'.format(
old_format, wave_structure.fields[-1].format))
wave_structure.setup()
elif need_to_reorder_bytes:
wave_structure.setup()
# we might need to unpack again with the new byte order
return need_to_reorder_bytes
class DynamicWaveField (_DynamicField):
def post_unpack(self, parents, data):
return
raise NotImplementedError() # TODO
checksum_size = bin.size + wave.size
wave_structure = parents[-1]
if version == 5:
# Version 5 checksum does not include the wData field.
checksum_size -= 4
c = _checksum(b, parents[-1].byte_order, 0, checksum_size)
if c != 0:
raise ValueError(
('This does not appear to be a valid Igor binary wave file. '
'Error in checksum: should be 0, is {}.').format(c))
Wave1 = _DynamicStructure(
name='Wave1',
fields=[
_Field(BinHeader1, 'bin_header', help='Binary wave header'),
_Field(WaveHeader2, 'wave_header', help='Wave header'),
DynamicWaveDataField1('f', 'wData', help='The start of the array of waveform data.', count=0, array=True),
])
Wave2 = _DynamicStructure(
name='Wave2',
fields=[
_Field(BinHeader2, 'bin_header', help='Binary wave header'),
_Field(WaveHeader2, 'wave_header', help='Wave header'),
DynamicWaveDataField1('f', 'wData', help='The start of the array of waveform data.', count=0, array=True),
_Field('x', 'padding', help='16 bytes of padding in versions 2 and 3.', count=16, array=True),
DynamicWaveNoteField('c', 'note', help='Optional wave note data', count=0, array=True),
])
Wave3 = _DynamicStructure(
name='Wave3',
fields=[
_Field(BinHeader3, 'bin_header', help='Binary wave header'),
_Field(WaveHeader2, 'wave_header', help='Wave header'),
DynamicWaveDataField1('f', 'wData', help='The start of the array of waveform data.', count=0, array=True),
_Field('x', 'padding', help='16 bytes of padding in versions 2 and 3.', count=16, array=True),
DynamicWaveNoteField('c', 'note', help='Optional wave note data', count=0, array=True),
DynamicDependencyFormulaField('c', 'formula', help='Optional wave dependency formula', count=0, array=True),
])
Wave5 = _DynamicStructure(
name='Wave5',
fields=[
_Field(BinHeader5, 'bin_header', help='Binary wave header'),
_Field(WaveHeader5, 'wave_header', help='Wave header'),
DynamicWaveDataField5('f', 'wData', help='The start of the array of waveform data.', count=0, array=True),
DynamicDependencyFormulaField('c', 'formula', help='Optional wave dependency formula.', count=0, array=True),
DynamicWaveNoteField('c', 'note', help='Optional wave note data.', count=0, array=True),
DynamicDataUnitsField('c', 'data_units', help='Optional extended data units data.', count=0, array=True),
DynamicDimensionUnitsField('c', 'dimension_units', help='Optional dimension label data', count=0, array=True),
DynamicLabelsField('c', 'labels', help="Optional dimension label data", count=0, array=True),
DynamicStringIndicesDataField('P', 'sIndices', help='Dynamic string indices for text waves.', count=0, array=True),
])
Wave = _DynamicStructure(
name='Wave',
fields=[
DynamicVersionField('h', 'version', help='Version number for backwards compatibility.'),
DynamicWaveField(Wave1, 'wave', help='The rest of the wave data.'),
])
def load(filename):
if hasattr(filename, 'read'):
f = filename # filename is actually a stream object
else:
f = open(filename, 'rb')
try:
Wave.byte_order = '='
Wave.setup()
data = Wave.unpack_stream(f)
finally:
if not hasattr(filename, 'read'):
f.close()
return data
def save(filename):
raise NotImplementedError | /sci_memex-0.0.3-py3-none-any.whl/memex/translators/igor/binarywave.py | 0.576065 | 0.265714 | binarywave.py | pypi |
"Read IGOR Packed Experiment files files into records."
from . import LOG as _LOG
from .struct import Structure as _Structure
from .struct import Field as _Field
from .util import byte_order as _byte_order
from .util import need_to_reorder_bytes as _need_to_reorder_bytes
from .util import _bytes
from .record import RECORD_TYPE as _RECORD_TYPE
from .record.base import UnknownRecord as _UnknownRecord
from .record.base import UnusedRecord as _UnusedRecord
from .record.folder import FolderStartRecord as _FolderStartRecord
from .record.folder import FolderEndRecord as _FolderEndRecord
from .record.variables import VariablesRecord as _VariablesRecord
from .record.wave import WaveRecord as _WaveRecord
# From PTN003:
# Igor writes other kinds of records in a packed experiment file, for
# storing things like pictures, page setup records, and miscellaneous
# settings. The format for these records is quite complex and is not
# described in PTN003. If you are writing a program to read packed
# files, you must skip any record with a record type that is not
# listed above.
PackedFileRecordHeader = _Structure(
name='PackedFileRecordHeader',
fields=[
_Field('H', 'recordType', help='Record type plus superceded flag.'),
_Field('h', 'version', help='Version information depends on the type of record.'),
_Field('l', 'numDataBytes', help='Number of data bytes in the record following this record header.'),
])
#CR_STR = '\x15' (\r)
PACKEDRECTYPE_MASK = 0x7FFF # Record type = (recordType & PACKEDREC_TYPE_MASK)
SUPERCEDED_MASK = 0x8000 # Bit is set if the record is superceded by
# a later record in the packed file.
def load(filename, strict=True, ignore_unknown=True):
_LOG.debug('loading a packed experiment file from {}'.format(filename))
records = []
if hasattr(filename, 'read'):
f = filename # filename is actually a stream object
else:
f = open(filename, 'rb')
byte_order = None
initial_byte_order = '='
try:
while True:
PackedFileRecordHeader.byte_order = initial_byte_order
PackedFileRecordHeader.setup()
b = bytes(f.read(PackedFileRecordHeader.size))
if not b:
break
if len(b) < PackedFileRecordHeader.size:
raise ValueError(
('not enough data for the next record header ({} < {})'
).format(len(b), PackedFileRecordHeader.size))
_LOG.debug('reading a new packed experiment file record')
header = PackedFileRecordHeader.unpack_from(b)
if header['version'] and not byte_order:
need_to_reorder = _need_to_reorder_bytes(header['version'])
byte_order = initial_byte_order = _byte_order(need_to_reorder)
_LOG.debug(
'get byte order from version: {} (reorder? {})'.format(
byte_order, need_to_reorder))
if need_to_reorder:
PackedFileRecordHeader.byte_order = byte_order
PackedFileRecordHeader.setup()
header = PackedFileRecordHeader.unpack_from(b)
_LOG.debug(
'reordered version: {}'.format(header['version']))
data = bytes(f.read(header['numDataBytes']))
if len(data) < header['numDataBytes']:
raise ValueError(
('not enough data for the next record ({} < {})'
).format(len(b), header['numDataBytes']))
record_type = _RECORD_TYPE.get(
header['recordType'] & PACKEDRECTYPE_MASK, _UnknownRecord)
_LOG.debug('the new record has type {} ({}).'.format(
record_type, header['recordType']))
if record_type in [_UnknownRecord, _UnusedRecord
] and not ignore_unknown:
raise KeyError('unkown record type {}'.format(
header['recordType']))
records.append(record_type(header, data, byte_order=byte_order))
finally:
_LOG.debug('finished loading {} records from {}'.format(
len(records), filename))
if not hasattr(filename, 'read'):
f.close()
filesystem = _build_filesystem(records)
return (records, filesystem)
def _build_filesystem(records):
# From PTN003:
"""The name must be a valid Igor data folder name. See Object
Names in the Igor Reference help file for name rules.
When Igor Pro reads the data folder start record, it creates a new
data folder with the specified name. Any subsequent variable, wave
or data folder start records cause Igor to create data objects in
this new data folder, until Igor Pro reads a corresponding data
folder end record."""
# From the Igor Manual, chapter 2, section 8, page II-123
# http://www.wavemetrics.net/doc/igorman/II-08%20Data%20Folders.pdf
"""Like the Macintosh file system, Igor Pro's data folders use the
colon character (:) to separate components of a path to an
object. This is analogous to Unix which uses / and Windows which
uses \. (Reminder: Igor's data folders exist wholly in memory
while an experiment is open. It is not a disk file system!)
A data folder named "root" always exists and contains all other
data folders.
"""
# From the Igor Manual, chapter 4, page IV-2
# http://www.wavemetrics.net/doc/igorman/IV-01%20Commands.pdf
"""For waves and data folders only, you can also use "liberal"
names. Liberal names can include almost any character, including
spaces and dots (see Liberal Object Names on page III-415 for
details).
"""
# From the Igor Manual, chapter 3, section 16, page III-416
# http://www.wavemetrics.net/doc/igorman/III-16%20Miscellany.pdf
"""Liberal names have the same rules as standard names except you
may use any character except control characters and the following:
" ' : ;
"""
filesystem = {'root': {}}
dir_stack = [('root', filesystem['root'])]
for record in records:
cwd = dir_stack[-1][-1]
if isinstance(record, _FolderStartRecord):
name = record.null_terminated_text
cwd[name] = {}
dir_stack.append((name, cwd[name]))
elif isinstance(record, _FolderEndRecord):
dir_stack.pop()
elif isinstance(record, (_VariablesRecord, _WaveRecord)):
if isinstance(record, _VariablesRecord):
sys_vars = record.variables['variables']['sysVars'].keys()
for filename,value in record.namespace.items():
if len(dir_stack) > 1 and filename in sys_vars:
# From PTN003:
"""When reading a packed file, any system
variables encountered while the current data
folder is not the root should be ignored.
"""
continue
_check_filename(dir_stack, filename)
cwd[filename] = value
else: # WaveRecord
filename = record.wave['wave']['wave_header']['bname']
_check_filename(dir_stack, filename)
cwd[filename] = record
return filesystem
def _check_filename(dir_stack, filename):
cwd = dir_stack[-1][-1]
if filename in cwd:
raise ValueError('collision on name {} in {}'.format(
filename, ':'.join(d for d,cwd in dir_stack)))
def walk(filesystem, callback, dirpath=None):
"""Walk a packed experiment filesystem, operating on each key,value pair.
"""
if dirpath is None:
dirpath = []
for key,value in sorted((_bytes(k),v) for k,v in filesystem.items()):
callback(dirpath, key, value)
if isinstance(value, dict):
walk(filesystem=value, callback=callback, dirpath=dirpath+[key]) | /sci_memex-0.0.3-py3-none-any.whl/memex/translators/igor/packed.py | 0.421433 | 0.260648 | packed.py | pypi |
import io as _io
from .. import LOG as _LOG
from ..binarywave import TYPE_TABLE as _TYPE_TABLE
from ..binarywave import NullStaticStringField as _NullStaticStringField
from ..binarywave import DynamicStringField as _DynamicStringField
from ..struct import Structure as _Structure
from ..struct import DynamicStructure as _DynamicStructure
from ..struct import Field as _Field
from ..struct import DynamicField as _DynamicField
from ..util import byte_order as _byte_order
from ..util import need_to_reorder_bytes as _need_to_reorder_bytes
from .base import Record
class ListedStaticStringField (_NullStaticStringField):
"""Handle string conversions for multi-count dynamic parents.
If a field belongs to a multi-count dynamic parent, the parent is
called multiple times to parse each count, and the field's
post-unpack hook gets called after the field is unpacked during
each iteration. This requires alternative logic for getting and
setting the string data. The actual string formatting code is not
affected.
"""
def post_unpack(self, parents, data):
parent_structure = parents[-1]
parent_data = self._get_structure_data(parents, data, parent_structure)
d = self._normalize_string(parent_data[-1][self.name])
parent_data[-1][self.name] = d
class ListedStaticStringField (_NullStaticStringField):
"""Handle string conversions for multi-count dynamic parents.
If a field belongs to a multi-count dynamic parent, the parent is
called multiple times to parse each count, and the field's
post-unpack hook gets called after the field is unpacked during
each iteration. This requires alternative logic for getting and
setting the string data. The actual string formatting code is not
affected.
"""
def post_unpack(self, parents, data):
parent_structure = parents[-1]
parent_data = self._get_structure_data(parents, data, parent_structure)
d = self._normalize_string(parent_data[-1][self.name])
parent_data[-1][self.name] = d
class ListedDynamicStrDataField (_DynamicStringField, ListedStaticStringField):
_size_field = 'strLen'
_null_terminated = False
def _get_size_data(self, parents, data):
parent_structure = parents[-1]
parent_data = self._get_structure_data(parents, data, parent_structure)
return parent_data[-1][self._size_field]
class DynamicVarDataField (_DynamicField):
def __init__(self, *args, **kwargs):
if 'array' not in kwargs:
kwargs['array'] = True
super(DynamicVarDataField, self).__init__(*args, **kwargs)
def pre_pack(self, parents, data):
raise NotImplementedError()
def post_unpack(self, parents, data):
var_structure = parents[-1]
var_data = self._get_structure_data(parents, data, var_structure)
data = var_data[self.name]
d = {}
for i,value in enumerate(data):
key,value = self._normalize_item(i, value)
d[key] = value
var_data[self.name] = d
def _normalize_item(self, index, value):
raise NotImplementedError()
class DynamicSysVarField (DynamicVarDataField):
def _normalize_item(self, index, value):
name = 'K{}'.format(index)
return (name, value)
class DynamicUserVarField (DynamicVarDataField):
def _normalize_item(self, index, value):
name = value['name']
value = value['num']
return (name, value)
class DynamicUserStrField (DynamicVarDataField):
def _normalize_item(self, index, value):
name = value['name']
value = value['data']
return (name, value)
class DynamicVarNumField (_DynamicField):
def post_unpack(self, parents, data):
parent_structure = parents[-1]
parent_data = self._get_structure_data(parents, data, parent_structure)
d = self._normalize_numeric_variable(parent_data[-1][self.name])
parent_data[-1][self.name] = d
def _normalize_numeric_variable(self, num_var):
t = _TYPE_TABLE[num_var['numType']]
if num_var['numType'] % 2: # complex number
return t(complex(num_var['realPart'], num_var['imagPart']))
else:
return t(num_var['realPart'])
class DynamicFormulaField (_DynamicStringField):
_size_field = 'formulaLen'
_null_terminated = True
# From Variables.h
VarHeader1 = _Structure( # `version` field pulled out into VariablesRecord
name='VarHeader1',
fields=[
_Field('h', 'numSysVars', help='Number of system variables (K0, K1, ...).'),
_Field('h', 'numUserVars', help='Number of user numeric variables -- may be zero.'),
_Field('h', 'numUserStrs', help='Number of user string variables -- may be zero.'),
])
# From Variables.h
VarHeader2 = _Structure( # `version` field pulled out into VariablesRecord
name='VarHeader2',
fields=[
_Field('h', 'numSysVars', help='Number of system variables (K0, K1, ...).'),
_Field('h', 'numUserVars', help='Number of user numeric variables -- may be zero.'),
_Field('h', 'numUserStrs', help='Number of user string variables -- may be zero.'),
_Field('h', 'numDependentVars', help='Number of dependent numeric variables -- may be zero.'),
_Field('h', 'numDependentStrs', help='Number of dependent string variables -- may be zero.'),
])
# From Variables.h
UserStrVarRec1 = _DynamicStructure(
name='UserStrVarRec1',
fields=[
ListedStaticStringField('c', 'name', help='Name of the string variable.', count=32),
_Field('h', 'strLen', help='The real size of the following array.'),
ListedDynamicStrDataField('c', 'data'),
])
# From Variables.h
UserStrVarRec2 = _DynamicStructure(
name='UserStrVarRec2',
fields=[
ListedStaticStringField('c', 'name', help='Name of the string variable.', count=32),
_Field('l', 'strLen', help='The real size of the following array.'),
_Field('c', 'data'),
])
# From Variables.h
VarNumRec = _Structure(
name='VarNumRec',
fields=[
_Field('h', 'numType', help='Type from binarywave.TYPE_TABLE'),
_Field('d', 'realPart', help='The real part of the number.'),
_Field('d', 'imagPart', help='The imag part if the number is complex.'),
_Field('l', 'reserved', help='Reserved - set to zero.'),
])
# From Variables.h
UserNumVarRec = _DynamicStructure(
name='UserNumVarRec',
fields=[
ListedStaticStringField('c', 'name', help='Name of the string variable.', count=32),
_Field('h', 'type', help='0 = string, 1 = numeric.'),
DynamicVarNumField(VarNumRec, 'num', help='Type and value of the variable if it is numeric. Not used for string.'),
])
# From Variables.h
UserDependentVarRec = _DynamicStructure(
name='UserDependentVarRec',
fields=[
ListedStaticStringField('c', 'name', help='Name of the string variable.', count=32),
_Field('h', 'type', help='0 = string, 1 = numeric.'),
_Field(VarNumRec, 'num', help='Type and value of the variable if it is numeric. Not used for string.'),
_Field('h', 'formulaLen', help='The length of the dependency formula.'),
DynamicFormulaField('c', 'formula', help='Start of the dependency formula. A C string including null terminator.'),
])
class DynamicVarHeaderField (_DynamicField):
def pre_pack(self, parents, data):
raise NotImplementedError()
def post_unpack(self, parents, data):
var_structure = parents[-1]
var_data = self._get_structure_data(
parents, data, var_structure)
var_header_structure = self.format
data = var_data['var_header']
sys_vars_field = var_structure.get_field('sysVars')
sys_vars_field.count = data['numSysVars']
sys_vars_field.setup()
user_vars_field = var_structure.get_field('userVars')
user_vars_field.count = data['numUserVars']
user_vars_field.setup()
user_strs_field = var_structure.get_field('userStrs')
user_strs_field.count = data['numUserStrs']
user_strs_field.setup()
if 'numDependentVars' in data:
dependent_vars_field = var_structure.get_field('dependentVars')
dependent_vars_field.count = data['numDependentVars']
dependent_vars_field.setup()
dependent_strs_field = var_structure.get_field('dependentStrs')
dependent_strs_field.count = data['numDependentStrs']
dependent_strs_field.setup()
var_structure.setup()
Variables1 = _DynamicStructure(
name='Variables1',
fields=[
DynamicVarHeaderField(VarHeader1, 'var_header', help='Variables header'),
DynamicSysVarField('f', 'sysVars', help='System variables', count=0),
DynamicUserVarField(UserNumVarRec, 'userVars', help='User numeric variables', count=0),
DynamicUserStrField(UserStrVarRec1, 'userStrs', help='User string variables', count=0),
])
Variables2 = _DynamicStructure(
name='Variables2',
fields=[
DynamicVarHeaderField(VarHeader2, 'var_header', help='Variables header'),
DynamicSysVarField('f', 'sysVars', help='System variables', count=0),
DynamicUserVarField(UserNumVarRec, 'userVars', help='User numeric variables', count=0),
DynamicUserStrField(UserStrVarRec2, 'userStrs', help='User string variables', count=0),
_Field(UserDependentVarRec, 'dependentVars', help='Dependent numeric variables.', count=0, array=True),
_Field(UserDependentVarRec, 'dependentStrs', help='Dependent string variables.', count=0, array=True),
])
class DynamicVersionField (_DynamicField):
def pre_pack(self, parents, byte_order):
raise NotImplementedError()
def post_unpack(self, parents, data):
variables_structure = parents[-1]
variables_data = self._get_structure_data(
parents, data, variables_structure)
version = variables_data['version']
if variables_structure.byte_order in '@=':
need_to_reorder_bytes = _need_to_reorder_bytes(version)
variables_structure.byte_order = _byte_order(need_to_reorder_bytes)
_LOG.debug(
'get byte order from version: {} (reorder? {})'.format(
variables_structure.byte_order, need_to_reorder_bytes))
else:
need_to_reorder_bytes = False
old_format = variables_structure.fields[-1].format
if version == 1:
variables_structure.fields[-1].format = Variables1
elif version == 2:
variables_structure.fields[-1].format = Variables2
elif not need_to_reorder_bytes:
raise ValueError(
'invalid variables record version: {}'.format(version))
if variables_structure.fields[-1].format != old_format:
_LOG.debug('change variables record from {} to {}'.format(
old_format, variables_structure.fields[-1].format))
variables_structure.setup()
elif need_to_reorder_bytes:
variables_structure.setup()
# we might need to unpack again with the new byte order
return need_to_reorder_bytes
VariablesRecordStructure = _DynamicStructure(
name='VariablesRecord',
fields=[
DynamicVersionField('h', 'version', help='Version number for this header.'),
_Field(Variables1, 'variables', help='The rest of the variables data.'),
])
class VariablesRecord (Record):
def __init__(self, *args, **kwargs):
super(VariablesRecord, self).__init__(*args, **kwargs)
# self.header['version'] # record version always 0?
VariablesRecordStructure.byte_order = '='
VariablesRecordStructure.setup()
stream = _io.BytesIO(bytes(self.data))
self.variables = VariablesRecordStructure.unpack_stream(stream)
self.namespace = {}
for key,value in self.variables['variables'].items():
if key not in ['var_header']:
_LOG.debug('update namespace {} with {} for {}'.format(
self.namespace, value, key))
self.namespace.update(value) | /sci_memex-0.0.3-py3-none-any.whl/memex/translators/igor/record/variables.py | 0.602646 | 0.30005 | variables.py | pypi |
from typing import Container
import docker
import logging
import os
import typer
import subprocess
import re
from collections.abc import Mapping
import sys
_LOGGER = logging.getLogger(__name__)
def port_mapping(mapping: str, public: bool) -> Mapping:
m = re.fullmatch("^(([0-9]{1,5})(?:/(?:tcp|udp))?):([0-9]{1,5})$", mapping)
if not m:
typer.secho(
f"Invalid port specification '{mapping}'",
fg=typer.colors.RED,
)
raise typer.Exit(code=1)
container = m.group(1)
srcPort = int(m.group(2))
hostPort = int(m.group(3))
if srcPort < 0 or srcPort > 65535:
typer.secho(
f"Invalid port number '{srcPort}'",
fg=typer.colors.RED,
)
raise typer.Exit(code=1)
if hostPort < 0 or hostPort > 65535:
typer.secho(
f"Invalid port number '{srcPort}'",
fg=typer.colors.RED,
)
raise typer.Exit(code=1)
host = "0.0.0.0" if public else "127.0.0.1"
return {container: (host, hostPort) if hostPort != 0 else None}
def port_map(portList: list, public: bool) -> dict:
portMapping = {}
for p in portList:
portMapping.update(port_mapping(p, public))
return portMapping
def port_env_mapping(mapping: str) -> str:
m = re.fullmatch("^(([0-9]{1,5})(?:/(?:tcp|udp))?):([0-9]{1,5})$", mapping)
if not m:
typer.secho(
f"Invalid port specification '{mapping}'",
fg=typer.colors.RED,
)
raise typer.Exit(code=1)
srcPort = int(m.group(2))
hostPort = int(m.group(3))
if srcPort < 0 or srcPort > 65535:
typer.secho(
f"Invalid port number '{srcPort}'",
fg=typer.colors.RED,
)
raise typer.Exit(code=1)
if hostPort < 0 or hostPort > 65535:
typer.secho(
f"Invalid port number '{srcPort}'",
fg=typer.colors.RED,
)
raise typer.Exit(code=1)
return f"PORT_{srcPort}={hostPort}"
def port_env_map(portList: list) -> list:
return [port_env_mapping(p) for p in portList]
def fetch_latest(client: docker.client, repository, **kwargs):
_LOGGER.info(
f'pulling latest version of the "{repository}" docker image, this may take a while...'
)
oldImage = None
try:
oldImage = client.images.get(repository)
except:
pass
image = client.images.pull(repository)
_LOGGER.info("Done pulling the latest docker image")
if oldImage and oldImage.id != image.id:
oldImage.remove()
return image
def create_container(client: docker.client, course: dict, **kwargs):
try:
_LOGGER.info(f"checking if image {course['image']} exists locally...")
i = client.images.get(course["image"])
_LOGGER.info("Image exists locally.")
except docker.errors.ImageNotFound as e:
_LOGGER.info("Image is not found, start will take a while to pull first.")
typer.secho(
f"Course image needs to be downloaded, this may take a while...",
fg=typer.colors.YELLOW,
)
_LOGGER.info(f"starting `{course['image']}` container as `{course['name']}`...")
try:
container = client.containers.run(
course["image"],
ports=port_map(course["ports"], course.get("public", False)),
environment=port_env_map(course["ports"]),
name=f'scioer_{course["name"]}',
hostname=course["name"],
tty=True,
detach=True,
volumes=[f"{course['volume']}:/course"],
)
except docker.errors.ImageNotFound as e:
_LOGGER.error("Image not found.", e)
typer.secho(
f"Course image not found, check the config file that the image name is correct.",
fg=typer.colors.RED,
)
sys.exit(1)
except docker.errors.APIError as e:
_LOGGER.debug(f"Failed to start the container: {e}")
if e.status_code == 409:
typer.secho(
f"Container name already in use. Please delete the container with the name `scioer_{course['name']}` before trying again.",
fg=typer.colors.RED,
)
sys.exit(2)
elif e.status_code == 404:
typer.secho(
f"Course image not found, check the config file that the image name is correct.",
fg=typer.colors.RED,
)
sys.exit(3)
typer.secho(
f"Unknown error: {e.explanation}, aborting...",
fg=typer.colors.RED,
)
sys.exit(4)
return container
def start_container(client: docker.client, course: dict, **kwargs):
container = None
try:
container = client.containers.get(f'scioer_{course["name"]}')
_LOGGER.info(f'Container for `scioer_{course["name"]}` already exists.')
if container.status == "running":
_LOGGER.info("Container is already running, not restarting.")
else:
_LOGGER.info("Restarting container")
container.start()
_LOGGER.info("Successfully started")
except:
_LOGGER.info(f'Container `scioer_{course["name"]}` does not exist, starting...')
container = create_container(client, course)
return container
def stop_container(client: docker.client, courseName: str, keep: bool, **kwargs):
_LOGGER.info("stopping docker container...")
try:
container = client.containers.get(f"scioer_{courseName}")
except:
typer.secho(
f"Container for course '{courseName}' is not running",
fg=typer.colors.YELLOW,
)
return
container.stop()
typer.secho(
f"Container for course '{courseName}' is has been stopped",
fg=typer.colors.GREEN,
)
if not keep:
delete_container(container)
def attach(client: docker.client, courseName: str, **kwargs):
_LOGGER.info("attaching to docker container...")
try:
container = client.containers.get(f"scioer_{courseName}")
except:
typer.secho(
f"Container for course '{courseName}' is not running",
fg=typer.colors.YELLOW,
)
return
os.system(f"docker exec -it scioer_{courseName} cat /scripts/motd.txt")
typer.echo("Starting interactive shell in the container, type `exit` to quit.")
os.system(f"docker exec -it scioer_{courseName} bash --login")
def delete_container(container, **kwargs):
_LOGGER.info("Deleting container...")
container.remove()
def setup():
client = None
try:
client = docker.from_env()
except:
typer.secho(
"failed to connect to docker, check that Docker is running on the host.",
fg=typer.colors.RED,
)
raise typer.Exit(code=1)
return client | /sci_oer-1.3.0-py3-none-any.whl/scioer/docker.py | 0.540924 | 0.181191 | docker.py | pypi |
import typer
from collections.abc import Mapping
import click
import scioer.config.load as load
import scioer.config.parse as parser
import os
import re
from typing import Optional
from pathlib import Path
import logging
try:
import readline
except:
import sys
if sys.platform == "win32" or sys.platform == "cygwin":
try:
from pyreadline3 import Readline
except:
pass
__version__ = "UNKNOWN"
try:
from scioer.__version__ import __version__
except:
pass
import scioer.docker as docker
_LOGGER = logging.getLogger(__name__)
app = typer.Typer(
name="Self Contained Interactive Open Educational Resource Helper",
help=""" A CLI tool to help configure, start, stop course resources.
\b
Common usage commands:
1. `scioer config`
.... fill in the form
2. `scioer start <course>`
3. `scioer shell <course>`
4. `scioer stop <course>`
\f
""",
no_args_is_help=True,
context_settings={"help_option_names": ["-h", "--help"]},
)
def conf_callback(ctx: typer.Context, param: typer.CallbackParam, value: Path):
if value:
value = os.path.realpath(os.path.expanduser(str(value)))
configFiles = load.get_config_files(value)
config = load.filter_config_files(configFiles)
if not value and not config:
config = configFiles[0]
_LOGGER.info(f"No config file found, using default: {config}")
elif value and value != config:
config = value
_LOGGER.info(f"Config file does not exist yet, using anyway: {config}")
if config:
_LOGGER.info(f"Loading from config file: {config}")
data = parser.load_config_file(config)
ctx.default_map = ctx.default_map or {} # Initialize the default map
ctx.default_map.update(data) # Merge the config dict into default_map
ctx.obj = ctx.obj or {} # Initialize the object
ctx.obj.update(
{"config_file": config, "config": data}
) # Merge the config dict into object
return config
def version_callback(value: bool):
if value:
typer.echo(f"scioer CLI Version: {__version__}")
raise typer.Exit()
configOption = typer.Option(
None,
"--config",
"-c",
metavar="FILE",
dir_okay=False,
resolve_path=False,
readable=True,
writable=True,
callback=conf_callback,
is_eager=True,
help="Path to the yaml config file",
)
courseNameArgument = typer.Argument(
None, metavar="COURSE_NAME", help="The name of the course"
)
def load_course(config: Mapping, courseName: str, ask: bool = True):
course = config.get(courseName, {})
while ask and not course:
if courseName:
typer.secho(
f'Course "{courseName} is not found. use `scioer config` if you want to create it.',
fg=typer.colors.YELLOW,
)
courses = [k for k in config.keys() if isinstance(config[k], dict)]
courseName = click.prompt(
"Course not found, did you mean one of:", type=click.Choice(courses)
)
course = config.get(courseName, {})
if course:
course["name"] = courseName
_LOGGER.info(f"course content: {course}")
return course
def print_post_start_help(courseName):
typer.secho(f"Started the {courseName} course resource", fg=typer.colors.GREEN)
typer.secho(
f"Login for the wiki: '[email protected]' 'password'", fg=typer.colors.GREEN
)
typer.echo("-----")
typer.secho(f"To stop the course: scioer stop {courseName}", fg=typer.colors.YELLOW)
typer.secho(
f"To get a shell for the course: scioer shell {courseName}",
fg=typer.colors.YELLOW,
)
typer.secho(
f"To re-start the course: scioer start {courseName}", fg=typer.colors.YELLOW
)
typer.secho(
f"To get information on the courses: scioer status", fg=typer.colors.YELLOW
)
@app.command()
def start(
ctx: typer.Context,
name: Optional[str] = courseNameArgument,
pull: bool = False,
configFile: Optional[Path] = configOption,
):
"""Start a oer container"""
client = docker.setup()
config = ctx.default_map
if not name and len(config.keys()) == 1:
name = list(config.keys())[0]
course = load_course(config, name)
if course.get("auto_pull", False) or pull:
typer.secho(
f"Pulling the latest version of the {course['name']}",
fg=typer.colors.GREEN,
)
typer.secho(
f"This may take a while...",
fg=typer.colors.YELLOW,
)
docker.fetch_latest(client, course["image"])
typer.secho("Starting...", fg=typer.colors.GREEN)
docker.start_container(client, course)
print_post_start_help(course["name"])
@app.command()
def stop(
ctx: typer.Context,
name: Optional[str] = courseNameArgument,
keep: Optional[bool] = typer.Option(False, "--no-remove", "-k"),
configFile: Optional[Path] = configOption,
):
"""Stop a running course container"""
client = docker.setup()
config = ctx.default_map
if not name and len(config.keys()) == 1:
name = list(config.keys())[0]
course = load_course(config, name, ask=False)
if course:
typer.secho(
f"Stopping course container, this make take a couple seconds...",
fg=typer.colors.RED,
)
docker.stop_container(client, course["name"], keep)
else:
typer.secho(f'Course "{name}" is not running.', fg=typer.colors.YELLOW)
@app.command()
def shell(
ctx: typer.Context,
name: str = courseNameArgument,
configFile: Optional[Path] = configOption,
):
"""Start a shell in a running course container"""
client = docker.setup()
config = ctx.default_map
if not name and len(config.keys()) == 1:
name = list(config.keys())[0]
course = load_course(config, name, ask=False)
if course:
docker.attach(client, course["name"])
else:
typer.secho(f'Course "{name}" is not running.', fg=typer.colors.YELLOW)
def print_container(container):
indent = 2
print(f"Course: {container.name[7:]} ({container.status})")
print(f'{" " * indent }Ports:')
ports = list(container.ports.items())
if ports:
for port in ports[:-1]:
print(f'{" " * indent }├── {port[1][0]["HostPort"]} -> {port[0][:-4]}')
port = ports[-1]
print(f'{" " * indent }└── {port[1][0]["HostPort"]} -> {port[0][:-4]}')
print(f'{" " * indent }Volumes:')
volumes = [v for v in container.attrs["Mounts"] if v["Type"] == "bind"]
home = os.path.expanduser("~")
if volumes:
for volume in volumes[:-1]:
hostPath = volume["Source"].replace(home, "~")
print(f'{" " * indent }├── {hostPath} as {volume["Destination"]}')
volume = volumes[-1]
hostPath = volume["Source"].replace(home, "~")
print(f'{" " * indent }└── {hostPath} as {volume["Destination"]}')
@app.command()
def status(
ctx: typer.Context,
configFile: Optional[Path] = configOption,
):
"""Prints all the information about the running container"""
client = docker.setup()
config = ctx.default_map
config_file = ctx.obj["config_file"]
_LOGGER.info(f"Config contents: {config}")
names = list(config.keys())
home = os.path.expanduser("~")
configPath = config_file.replace(home, "~")
print(f"Config file: {configPath}")
# containers = client.containers.list(all=True)
# containers = [c for c in containers if c.name.replace('scioer_', '') in names ]
filtered = []
notRunning = []
for n in names:
try:
c = client.containers.get(f"scioer_{n}")
_LOGGER.debug(f"Container information for course {n}: {c.attrs}")
filtered.append(c)
except:
notRunning.append(n)
for c in filtered:
print_container(c)
for n in notRunning:
print(f"Course: {n} (Not Running)")
def prompt_port(message: str, default: int) -> int:
value = typer.prompt(
message,
default=default,
type=int,
)
while value < 0 or value > 65535:
typer.secho(
f"`{value}` is not a valid port number.",
fg=typer.colors.RED,
)
value = typer.prompt(
message,
default=default,
type=int,
)
return value
def port_mapping(mapping: str) -> Mapping:
m = re.fullmatch("^(([0-9]{1,5})(?:/(?:tcp|udp))?)(?::([0-9]{1,5}))?$", mapping)
if not m:
return {}
container = m.group(1)
srcPort = int(m.group(2))
hostPort = int(m.group(3) or m.group(2))
if srcPort < 0 or srcPort > 65535 or hostPort < 0 or hostPort > 65535:
typer.secho(
"Invalid port number.",
fg=typer.colors.RED,
)
return {}
return {container: hostPort}
def prompt_custom_ports() -> Mapping:
value = typer.prompt(
"Custom ports to expose, in the form of 'container[:host]', or no input to skip ",
default="",
value_proc=lambda v: v.strip(),
type=str,
)
mapping = port_mapping(value)
if value != "" and not mapping:
typer.secho(
"Invalid port specification, please try again.",
fg=typer.colors.RED,
)
mappings = mapping
while value != "":
value = typer.prompt(
"Custom ports to expose, in the form of 'container:host', or no input to skip ",
default="",
value_proc=lambda v: v.strip(),
type=str,
)
mapping = port_mapping(value)
if value != "" and not mapping:
typer.secho(
"Invalid port specification, please try again.",
fg=typer.colors.RED,
)
continue
if mapping:
mappings = {**mappings, **mapping}
return mappings
@app.command()
def config(
ctx: typer.Context,
configFile: Optional[Path] = configOption,
):
"""Setup a new course resource, or edit an existing one"""
if not configFile:
typer.echo(
f"Config file not found, or not specified. Make sure that file exists or use `--config=FILE` to specify the file"
)
raise typer.Exit(1)
config = ctx.default_map
_LOGGER.info(f"config contents: {config}")
course_name = typer.prompt("What's the name of the course?")
safe_course_name = "".join(
x for x in course_name.replace(" ", "_") if x.isalnum() or x == "_"
)
default_image = "scioer/oo-java:W23"
default_volume = os.path.join(os.path.expanduser("~/Desktop"), safe_course_name)
course = config.get(safe_course_name, {})
docker_image = typer.prompt(
"What docker image does the course use?",
default=course.get("image", default_image),
)
auto_pull = typer.confirm(
"Automatically fetch new versions", default=course.get("auto_pull", False)
)
course_storage = typer.prompt(
"Where should the files for the course be stored?",
default=course.get("volume", default_volume),
)
useDefaults = typer.confirm("Use the default ports", default=True)
mappings = {
"3000": 3000,
"8888": 8888,
"8000": 8000,
"22": 2222,
}
if not useDefaults:
wiki_port = prompt_port(
"Wiki port to expose, (0 to publish a random port)",
3000,
)
jupyter_port = prompt_port(
"Jupyter notebooks port to expose, (0 to publish a random port)",
8888,
)
lectures_port = prompt_port(
"Lectures port to expose, (0 to publish a random port)",
8000,
)
ssh_port = prompt_port(
"ssh port to expose, (0 to publish a random port)",
2222,
)
customPorts = prompt_custom_ports()
mappings = {
"3000": wiki_port,
"8888": jupyter_port,
"8000": lectures_port,
"22": ssh_port,
**customPorts,
}
ports = [f"{k}:{v}" for k, v in mappings.items()]
config[safe_course_name] = {
"image": docker_image,
"volume": os.path.realpath(os.path.expanduser(course_storage)),
"ports": ports,
"auto_pull": auto_pull,
"public": False,
}
parser.save_config_file(configFile, config)
@app.callback()
def setup(
verbose: Optional[bool] = typer.Option(False, "--verbose", "-v"),
debug: Optional[bool] = typer.Option(False, "--debug"),
version: Optional[bool] = typer.Option(
None, "--version", "-V", callback=version_callback, is_eager=True
),
):
level = logging.WARNING
if debug:
level = logging.DEBUG
elif verbose:
level = logging.INFO
logging.basicConfig(level=level)
if __name__ == "__main__":
app() | /sci_oer-1.3.0-py3-none-any.whl/scioer/cli.py | 0.496582 | 0.158435 | cli.py | pypi |
PALETTES = {
"npg_nrc": {
"Cinnabar": "#E64B35",
"Shakespeare": "#4DBBD5",
"PersianGreen": "#00A087",
"Chambray": "#3C5488",
"Apricot": "#F39B7F",
"WildBlueYonder": "#8491B4",
"MonteCarlo": "#91D1C2",
"Monza": "#DC0000",
"RomanCoffee": "#7E6148",
"Sandrift": "#B09C85"
},
"aaas": {
"Chambray": "#3B4992",
"Red": "#EE0000",
"FunGreen": "#008B45",
"HoneyFlower": "#631879",
"Teal": "#008280",
"Monza": "#BB0021",
"ButterflyBush": "#5F559B",
"FreshEggplant": "#A20056",
"Stack": "#808180",
"CodGray": "#1B1919"
},
"nejm": {
"TallPoppy": "#BC3C29",
"DeepCerulean": "#0072B5",
"Zest": "#E18727",
"Eucalyptus": "#20854E",
"WildBlueYonder": "#7876B1",
"Gothic": "#6F99AD",
"Salomie": "#FFDC91",
"FrenchRose": "#EE4C97"
},
"lancet_lanonc": {
"CongressBlue": "#00468B",
"Red": "#ED0000",
"Apple": "#42B540",
"BondiBlue": "#0099B4",
"TrendyPink": "#925E9F",
"MonaLisa": "#FDAF91",
"Carmine": "#AD002A",
"Edward": "#ADB6B6",
"CodGray": "#1B1919"
},
"jama": {
"Limed Spruce": "#374E55",
"Anzac": "#DF8F44",
"Cerulean": "#00A1D5",
"Apple Blossom": "#B24745",
"Acapulco": "#79AF97",
"Kimberly": "#6A6599",
"Makara": "#80796B"
},
"jco": {
"Lochmara": "#0073C2",
"Corn": "#EFC000",
"Gray": "#868686",
"ChestnutRose": "#CD534C",
"Danube": "#7AA6DC",
"RegalBlue": "#003C67",
"Olive": "#8F7700",
"MineShaft": "#3B3B3B",
"WellRead": "#A73030",
"KashmirBlue": "#4A6990"
},
"ucscgb": {
"chr5": "#FF0000",
"chr8": "#FF9900",
"chr9": "#FFCC00",
"chr12": "#00FF00",
"chr15": "#6699FF",
"chr20": "#CC33FF",
"chr3": "#99991E",
"chrX": "#999999",
"chr6": "#FF00CC",
"chr4": "#CC0000",
"chr7": "#FFCCCC",
"chr10": "#FFFF00",
"chr11": "#CCFF00",
"chr13": "#358000",
"chr14": "#0000CC",
"chr16": "#99CCFF",
"chr17": "#00FFFF",
"chr18": "#CCFFFF",
"chr19": "#9900CC",
"chr21": "#CC99FF",
"chr1": "#996600",
"chr2": "#666600",
"chr22": "#666666",
"chrY": "#CCCCCC",
"chrUn": "#79CC3D",
"chrM": "#CCCC99"
},
"d3_category10": {
"Matisse": "#1F77B4",
"Flamenco": "#FF7F0E",
"ForestGreen": "#2CA02C",
"Punch": "#D62728",
"Wisteria": "#9467BD",
"SpicyMix": "#8C564B",
"Orchid": "#E377C2",
"Gray": "#7F7F7F",
"KeyLimePie": "#BCBD22",
"Java": "#17BECF"
},
"d3_category20": {
"Matisse": "#1F77B4",
"Flamenco": "#FF7F0E",
"ForestGreen": "#2CA02C",
"Punch": "#D62728",
"Wisteria": "#9467BD",
"SpicyMix": "#8C564B",
"Orchid": "#E377C2",
"Gray": "#7F7F7F",
"KeyLimePie": "#BCBD22",
"Java": "#17BECF",
"Spindle": "#AEC7E8",
"MaC": "#FFBB78",
"Feijoa": "#98DF8A",
"MonaLisa": "#FF9896",
"LavenderGray": "#C5B0D5",
"Quicksand": "#C49C94",
"Chantilly": "#F7B6D2",
"Silver": "#C7C7C7",
"Deco": "#DBDB8D",
"RegentStBlue": "#9EDAE5"
},
"d3_category20b": {
"EastBay": "#393B79",
"ChaletGreen": "#637939",
"Pesto": "#8C6D31",
"Lotus": "#843C39",
"CannonPink": "#7B4173",
"ButterflyBush": "#5254A3",
"ChelseaCucumber": "#8CA252",
"Tussock": "#BD9E39",
"AppleBlossom": "#AD494A",
"Tapestry": "#A55194",
"MoodyBlue": "#6B6ECF",
"WildWillow": "#B5CF6B",
"Ronchi": "#E7BA52",
"ChestnutRose": "#D6616B",
"Hopbush": "#CE6DBD",
"ColdPurple": "#9C9EDE",
"Deco": "#CEDB9C",
"Putty": "#E7CB94",
"TonysPink": "#E7969C",
"LightOrchid": "#DE9ED6"
},
"d3_category20c": {
"BostonBlue": "#3182BD",
"Christine": "#E6550D",
"SeaGreen": "#31A354",
"Deluge": "#756BB1",
"DoveGray": "#636363",
"Danube": "#6BAED6",
"NeonCarrot": "#FD8D3C",
"DeYork": "#74C476",
"BlueBell": "#9E9AC8",
"DustyGray": "#969696",
"RegentStBlue": "#9ECAE1",
"Koromiko": "#FDAE6B",
"MossGreen": "#A1D99B",
"LavenderGray": "#BCBDDC",
"Silver": "#BDBDBD",
"Spindle": "#C6DBEF",
"Flesh": "#FDD0A2",
"Celadon": "#C7E9C0",
"Snuff": "#DADAEB",
"Alto": "#D9D9D9"
},
"igv": {
"chr1": "#5050FF",
"chr2": "#CE3D32",
"chr3": "#749B58",
"chr4": "#F0E685",
"chr5": "#466983",
"chr6": "#BA6338",
"chr7": "#5DB1DD",
"chr8": "#802268",
"chr9": "#6BD76B",
"chr10": "#D595A7",
"chr11": "#924822",
"chr12": "#837B8D",
"chr13": "#C75127",
"chr14": "#D58F5C",
"chr15": "#7A65A5",
"chr16": "#E4AF69",
"chr17": "#3B1B53",
"chr18": "#CDDEB7",
"chr19": "#612A79",
"chr20": "#AE1F63",
"chr21": "#E7C76F",
"chr22": "#5A655E",
"chrX": "#CC9900",
"chrY": "#99CC00",
"chrUn": "#A9A9A9",
"chr23": "#CC9900",
"chr24": "#99CC00",
"chr25": "#33CC00",
"chr26": "#00CC33",
"chr27": "#00CC99",
"chr28": "#0099CC",
"chr29": "#0A47FF",
"chr30": "#4775FF",
"chr31": "#FFC20A",
"chr32": "#FFD147",
"chr33": "#990033",
"chr34": "#991A00",
"chr35": "#996600",
"chr36": "#809900",
"chr37": "#339900",
"chr38": "#00991A",
"chr39": "#009966",
"chr40": "#008099",
"chr41": "#003399",
"chr42": "#1A0099",
"chr43": "#660099",
"chr44": "#990080",
"chr45": "#D60047",
"chr46": "#FF1463",
"chr47": "#00D68F",
"chr48": "#14FFB1"
},
"igv_alternating": {
"Indigo": "#5773CC",
"SelectiveYellow": "#FFB900"
},
"locuszoom": {
"0.8to1.0": "#D43F3A",
"0.6to0.8": "#EEA236",
"0.4to0.6": "#5CB85C",
"0.2to0.4": "#46B8DA",
"0.0to0.2": "#357EBD",
"LDRefVar": "#9632B8",
"nodata": "#B8B8B8"
},
"uchicago": {
"Maroon": "#800000",
"DarkGray": "#767676",
"Yellow": "#FFA319",
"LightGreen": "#8A9045",
"Blue": "#155F83",
"Orange": "#C16622",
"Red": "#8F3931",
"DarkGreen": "#58593F",
"Violet": "#350E20"
},
"uchicago_light": {
"Maroon": "#800000",
"LightGray": "#D6D6CE",
"Yellow": "#FFB547",
"LightGreen": "#ADB17D",
"Blue": "#5B8FA8",
"Orange": "#D49464",
"Red": "#B1746F",
"DarkGreen": "#8A8B79",
"Violet": "#725663"
},
"uchicago_dark": {
"Maroon": "#800000",
"DarkGray": "#767676",
"Yellow": "#CC8214",
"LightGreen": "#616530",
"Blue": "#0F425C",
"Orange": "#9A5324",
"Red": "#642822",
"DarkGreen": "#3E3E23",
"Violet": "#350E20"
},
"cosmic_hallmarks_dark": {
"Invasion and Metastasis": "#171717",
"Escaping Immunic Response to Cancer": "#7D0226",
"Change of Cellular Energetics": "#300049",
"Cell Replicative Immortality": "#165459",
"Suppression of Growth": "#3F2327",
"Genome Instability and Mutations": "#0B1948",
"Angiogenesis": "#E71012",
"Escaping Programmed Cell Death": "#555555",
"Proliferative Signaling": "#193006",
"Tumour Promoting Inflammation": "#A8450C"
},
"cosmic_hallmarks_light": {
"Invasion and Metastasis": "#2E2A2B",
"Escaping Immunic Response to Cancer": "#CF4E9C",
"Change of Cellular Energetics": "#8C57A2",
"Cell Replicative Immortality": "#358DB9",
"Suppression of Growth": "#82581F",
"Genome Instability and Mutations": "#2F509E",
"Angiogenesis": "#E5614C",
"Escaping Programmed Cell Death": "#97A1A7",
"Proliferative Signaling": "#3DA873",
"Tumour Promoting Inflammation": "#DC9445"
},
"cosmic_signature_substitutions": {
"C>A": "#5ABCEB",
"C>G": "#050708",
"C>T": "#D33C32",
"T>A": "#CBCACB",
"T>C": "#ABCD72",
"T>G": "#E7C9C6"
},
"simpsons_springfield": {
"HomerYellow": "#FED439",
"HomerBlue": "#709AE1",
"HomerGrey": "#8A9197",
"HomerBrown": "#D2AF81",
"BartOrange": "#FD7446",
"MargeGreen": "#D5E4A2",
"MargeBlue": "#197EC0",
"LisaOrange": "#F05C3B",
"NedGreen": "#46732E",
"MaggieBlue": "#71D0F5",
"BurnsPurple": "#370335",
"BurnsGreen": "#075149",
"DuffRed": "#C80813",
"KentRed": "#91331F",
"BobGreen": "#1A9993",
"FrinkPink": "#FD8CC1"
},
"futurama_planetexpress": {
"FryOrange": "#FF6F00",
"FryRed": "#C71000",
"FryBlue": "#008EA0",
"LeelaPurple": "#8A4198",
"BenderIron": "#5A9599",
"ZoidbergRed": "#FF6348",
"ZoidbergBlue": "#84D7E1",
"AmyPink": "#FF95A8",
"HermesGreen": "#3D3B25",
"ProfessorBlue": "#ADE2D0",
"ScruffyGreen": "#1A5354",
"LeelaGrey": "#3F4041"
},
"rickandmorty_schwifty": {
"MortyYellow": "#FAFD7C",
"MortyBrown": "#82491E",
"MortyBlue": "#24325F",
"RickBlue": "#B7E4F9",
"BethRed": "#FB6467",
"JerryGreen": "#526E2D",
"SummerPink": "#E762D7",
"SummerOrange": "#E89242",
"BethYellow": "#FAE48B",
"RickGreen": "#A6EEE6",
"RickBrown": "#917C5D",
"MeeseeksBlue": "#69C8EC"
},
"startrek_uniform": {
"Engineering": "#CC0C00",
"Sciences": "#5C88DA",
"Senior": "#84BD00",
"Command": "#FFCD00",
"Teal": "#7C878E",
"Cerulean": "#00B5E2",
"Jade": "#00AF66"
},
"tron_legacy": {
"BlackGuard": "#FF410D",
"Sam": "#6EE2FF",
"Clu": "#F7C530",
"Underclass": "#95CC5E",
"KevinFlynn": "#D0DFE6",
"CluFollower": "#F79D1E",
"Underclass2": "#748AA6"
},
"gsea": {
"Purple": "#4500AD",
"DarkBlue": "#2700D1",
"RoyalBlue": "#6B58EF",
"Malibu": "#8888FF",
"Melrose": "#C7C1FF",
"Fog": "#D5D5FF",
"CottonCandy": "#FFC0E5",
"VividTangerine": "#FF8989",
"BrinkPink": "#FF7080",
"Persimmon": "#FF5A5A",
"Flamingo": "#EF4040",
"GuardsmanRed": "#D60C00"
},
"material_red": {
"Red50": "#FFEBEE",
"Red100": "#FFCDD2",
"Red200": "#EF9A9A",
"Red300": "#E57373",
"Red400": "#EF5350",
"Red500": "#F44336",
"Red600": "#E53935",
"Red700": "#D32F2F",
"Red800": "#C62828",
"Red900": "#B71C1C"
},
"material_pink": {
"Pink50": "#FCE4EC",
"Pink100": "#F8BBD0",
"Pink200": "#F48FB1",
"Pink300": "#F06292",
"Pink400": "#EC407A",
"Pink500": "#E91E63",
"Pink600": "#D81B60",
"Pink700": "#C2185B",
"Pink800": "#AD1457",
"Pink900": "#880E4F"
},
"material_purple": {
"Purple50": "#F3E5F5",
"Purple100": "#E1BEE7",
"Purple200": "#CE93D8",
"Purple300": "#BA68C8",
"Purple400": "#AB47BC",
"Purple500": "#9C27B0",
"Purple600": "#8E24AA",
"Purple700": "#7B1FA2",
"Purple800": "#6A1B9A",
"Purple900": "#4A148C"
},
"material_indigo": {
"Indigo50": "#E8EAF6",
"Indigo100": "#C5CAE9",
"Indigo200": "#9FA8DA",
"Indigo300": "#7986CB",
"Indigo400": "#5C6BC0",
"Indigo500": "#3F51B5",
"Indigo600": "#3949AB",
"Indigo700": "#303F9F",
"Indigo800": "#283593",
"Indigo900": "#1A237E"
},
"material_blue": {
"Blue50": "#E3F2FD",
"Blue100": "#BBDEFB",
"Blue200": "#90CAF9",
"Blue300": "#64B5F6",
"Blue400": "#42A5F5",
"Blue500": "#2196F3",
"Blue600": "#1E88E5",
"Blue700": "#1976D2",
"Blue800": "#1565C0",
"Blue900": "#0D47A1"
},
"material_cyan": {
"Cyan50": "#E0F7FA",
"Cyan100": "#B2EBF2",
"Cyan200": "#80DEEA",
"Cyan300": "#4DD0E1",
"Cyan400": "#26C6DA",
"Cyan500": "#00BCD4",
"Cyan600": "#00ACC1",
"Cyan700": "#0097A7",
"Cyan800": "#00838F",
"Cyan900": "#006064"
},
"material_teal": {
"Teal50": "#E0F2F1",
"Teal100": "#B2DFDB",
"Teal200": "#80CBC4",
"Teal300": "#4DB6AC",
"Teal400": "#26A69A",
"Teal500": "#009688",
"Teal600": "#00897B",
"Teal700": "#00796B",
"Teal800": "#00695C",
"Teal900": "#004D40"
},
"material_green": {
"Green50": "#E8F5E9",
"Green100": "#C8E6C9",
"Green200": "#A5D6A7",
"Green300": "#81C784",
"Green400": "#66BB6A",
"Green500": "#4CAF50",
"Green600": "#43A047",
"Green700": "#388E3C",
"Green800": "#2E7D32",
"Green900": "#1B5E20"
},
"material_lime": {
"Lime50": "#F9FBE7",
"Lime100": "#F0F4C3",
"Lime200": "#E6EE9C",
"Lime300": "#DCE775",
"Lime400": "#D4E157",
"Lime500": "#CDDC39",
"Lime600": "#C0CA33",
"Lime700": "#AFB42B",
"Lime800": "#9E9D24",
"Lime900": "#827717"
},
"material_yellow": {
"Yellow50": "#FFFDE7",
"Yellow100": "#FFF9C4",
"Yellow200": "#FFF59D",
"Yellow300": "#FFF176",
"Yellow400": "#FFEE58",
"Yellow500": "#FFEB3B",
"Yellow600": "#FDD835",
"Yellow700": "#FBC02D",
"Yellow800": "#F9A825",
"Yellow900": "#F57F17"
},
"material_amber": {
"Amber50": "#FFF8E1",
"Amber100": "#FFECB3",
"Amber200": "#FFE082",
"Amber300": "#FFD54F",
"Amber400": "#FFCA28",
"Amber500": "#FFC107",
"Amber600": "#FFB300",
"Amber700": "#FFA000",
"Amber800": "#FF8F00",
"Amber900": "#FF6F00"
},
"material_orange": {
"Orange50": "#FFF3E0",
"Orange100": "#FFE0B2",
"Orange200": "#FFCC80",
"Orange300": "#FFB74D",
"Orange400": "#FFA726",
"Orange500": "#FF9800",
"Orange600": "#FB8C00",
"Orange700": "#F57C00",
"Orange800": "#EF6C00",
"Orange900": "#E65100"
},
"material_brown": {
"Brown50": "#EFEBE9",
"Brown100": "#D7CCC8",
"Brown200": "#BCAAA4",
"Brown300": "#A1887F",
"Brown400": "#8D6E63",
"Brown500": "#795548",
"Brown600": "#6D4C41",
"Brown700": "#5D4037",
"Brown800": "#4E342E",
"Brown900": "#3E2723"
},
"material_grey": {
"Grey50": "#FAFAFA",
"Grey100": "#F5F5F5",
"Grey200": "#EEEEEE",
"Grey300": "#E0E0E0",
"Grey400": "#BDBDBD",
"Grey500": "#9E9E9E",
"Grey600": "#757575",
"Grey700": "#616161",
"Grey800": "#424242",
"Grey900": "#212121"
}
} | /sci-palettes-1.0.1.tar.gz/sci-palettes-1.0.1/sci_palettes/palettes.py | 0.432902 | 0.561696 | palettes.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu = 0, sigma = 1):
Distribution.__init__(self,mu,sigma)
def calculate_mean(self):
"""Method to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
self.mean = sum(self.data)/len(self.data)
return self.mean
def calculate_stdev(self, sample=True):
"""Method to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
data = [(x-self.mean)**2 for x in self.data]
summation = sum(data)
n = len(self.data)
if sample:
self.stdev=math.sqrt(summation/(n -1))
else:
self.stdev = math.sqrt(summation/n)
return self.stdev
def read_data_file(self, file_name, sample=True):
"""Method to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
After reading in the file, the mean and standard deviation are calculated
Args:
file_name (string): name of a file to read from
Returns:
None
"""
Distribution.read_data_file(self,file_name,sample)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev(sample)
def plot_histogram(self):
"""Method to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.xlabel("data")
plt.ylabel("count")
plt.title("data distribution")
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
coeffient = 1/(self.stdev * math.sqrt(2*math.pi))
euler_exponent = -0.5 * ((x-self.mean)/self.stdev)**2
return coeffient*math.exp(euler_exponent)
def plot_histogram_pdf(self, n_spaces = 50):
"""Method to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Magic method to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean+other.mean
result.stdev = math.sqrt(self.stdev**2 + other.stdev**2)
return result
def __repr__(self):
"""Magic method to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean,self.stdev) | /sci_stats_dist-0.0.2.tar.gz/sci_stats_dist-0.0.2/sci_stats_dist/Gaussiandistribution.py | 0.807916 | 0.804598 | Gaussiandistribution.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) the total number of trials
"""
def __init__(self, prob=.5, size=20):
self.p = prob
self.n=size
Distribution.__init__(self,self.calculate_mean() , self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.n * self.p
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev=math.sqrt(self.n *self.p*(1-self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = self.data.count(1)/self.n
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
return self.p,self.n
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(["0","1"] , [self.n * (1-self.p) , self.n*self.p])
plt.xlabel("data")
plt.ylabel("counts")
plt.show()
def __nCk(self,n,k):
k = min(k,n-k)
nchoosek=1
for i in range(1,k+1):
nchoosek*=(n-i+1)
nchoosek /=i
return nchoosek
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
k (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return self.__nCk(self.n,k) * self.p**k * (1-self.p)**(self.n-k)
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x= list(range(0,self.n+1))
y = [pdf(k) for k in x ]
plt.bar(x,y)
plt.show()
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
return x,y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
return Binomial(self.p,self.n+other.n)
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return f"mean {self.mean}, standard deviation {self.stdev}, p {self.p}, n {self.n}" | /sci_stats_dist-0.0.2.tar.gz/sci_stats_dist-0.0.2/sci_stats_dist/Binomialdistribution.py | 0.830044 | 0.804598 | Binomialdistribution.py | pypi |
import traceback
from typing import Union
import pandas as pd
import numpy as np
def combine_csv_files(from_files: list, to_file: str, wanted_cols: Union[list, str, None] = None, *args, **kwargs) -> pd.DataFrame:
"""
Covert several csv files to ONE csv file with specified columns.
:param na_vals:
:param sep:
:param from_files: a list of csv file paths which represent the source files to combine, <br>
eg. ['path/to/source_file_1.csv', 'path/to/source_file_2.csv'] <br> <br>
:param to_file: the csv file path which designate the destinate location to store the result, <br>
eg. 'path/to/save_file.csv' <br> <br>
:param wanted_cols: the filter columns, which will be the result csv columns, <br>
no data in this column and this column will be empty, <br>
no wanted_cols provided (None), all columns will be preserved. <br>
:return: pd.DataFrame which is the data content store in the "to_file" csv file.
"""
if from_files is None:
raise ValueError('from_files cannot be None')
elif type(from_files) is not list:
raise ValueError('from_files must be <type: list>')
elif len(from_files) == 0:
raise ValueError('from_files cannot be empty')
if to_file is None:
raise ValueError('to_file cannot be None')
elif type(to_file) is not str:
raise ValueError('to_file must be <type: str>')
elif len(to_file) == 0:
raise ValueError('to_file cannot be empty')
dfs = []
for _from_file in from_files:
try:
_df = pd.read_csv(_from_file, *args, **kwargs)
dfs.append(_df)
except:
print('*'*32)
print(f'- pd.read_csv error with input file: "{_from_file}"')
traceback.print_exc()
print('*'*32)
continue
# combine all dfs with concat 'outer' join,
# ignore_index will allow concat directly and add columns automatically,
# axis=0 means concat follow vertical direction.
final_combined_df = pd.concat(dfs, axis=0, ignore_index=True, sort=False)
if wanted_cols is None \
or (type(wanted_cols) is list and len(wanted_cols) == 0) \
or (type(wanted_cols) is not list and type(wanted_cols) is not str):
final_combined_df = final_combined_df
else:
current_cols = final_combined_df.columns.to_list()
if type(wanted_cols) is list:
for _col in wanted_cols:
if _col not in current_cols:
final_combined_df[_col] = np.nan
elif type(wanted_cols) is str:
if wanted_cols not in current_cols:
final_combined_df[wanted_cols] = np.nan
final_combined_df = final_combined_df[wanted_cols]
final_combined_df.to_csv(to_file, header=True)
return final_combined_df
if __name__ == '__main__':
d1 = {'A': [2, 3, 4], 'B': ['a', 'b', 'c'], 'C': ['10002', 'sss', 'msc23d']}
d2 = {'A': [12, 13, 4, 15], 'B': ['1a', 'b', 'c', '1Z'], 'Z': ['333', '444', '555', 'ZZZ']}
df1 = pd.DataFrame(d1)
df2 = pd.DataFrame(d2)
df1_pth = 'df1_test.csv'
df2_pth = 'df2_test.csv'
df1.to_csv(df1_pth, index=False)
df2.to_csv(df2_pth, index=False)
# dfNone = combine_csv_files(from_files=[df1_pth, df2_pth], to_file='dfcombine_test_None.csv', wanted_cols=None)
# dfAZC = combine_csv_files(from_files=[df1_pth, df2_pth], to_file='dfcombine_test_AZC.csv', wanted_cols=['A', 'Z', 'C'])
dfNone = combine_csv_files([df1_pth, df2_pth], 'dfcombine_test_None.csv', None)
dfAZC = combine_csv_files([df1_pth, df2_pth], 'dfcombine_test_AZC.csv', ['A', 'Z', 'C'])
dfZ = combine_csv_files([df1_pth, df2_pth], 'dfcombine_test_Z.csv', 'Z')
dfZZZ = combine_csv_files([df1_pth, df2_pth], 'dfcombine_test_ZZZ.csv', 'ZZZ')
print('df1 === \n', df1)
print('df2 === \n', df2)
print('dfNone === \n', dfNone)
print('dfAZC === \n', dfAZC)
print('dfZ === \n', dfZ)
print('dfZZZ === \n', dfZZZ) | /sci-util-1.2.7.tar.gz/sci-util-1.2.7/sci_util/pd/csv.py | 0.702632 | 0.382459 | csv.py | pypi |
def cnt_split(tar_list, cnt_per_slice):
"""
Yield successive n-sized(cnt_per_slice) chunks from l(tar_list).
>>> x = list(range(34))
>>> for i in cnt_split(x, 5):
>>> print(i)
<<< print result ...
<<< [0, 1, 2, 3, 4]
<<< [5, 6, 7, 8, 9]
<<< [10, 11, 12, 13, 14]
<<< [15, 16, 17, 18, 19]
<<< [20, 21, 22, 23, 24]
<<< [25, 26, 27, 28, 29]
<<< [30, 31, 32, 33]
The targetlist can be split into slices with a NAX size of 'cnt_per_slice' ...
:param tar_list: target list to split
:param cnt_per_slice: slice per max size...
:return: yield one result.
"""
for i in range(0, len(tar_list), cnt_per_slice):
yield tar_list[i:i + cnt_per_slice]
def n_split(tar_list, n):
"""
Yield successive n-sized(cnt_per_slice) chunks from l(tar_list).
>>> x = list(range(33))
>>> for i in n_split(x, 5):
>>> print(i)
<<< print result ...
<<< [0, 1, 2, 3, 4, 5, 6]
<<< [7, 8, 9, 10, 11, 12, 13]
<<< [14, 15, 16, 17, 18, 19, 20]
<<< [21, 22, 23, 24, 25, 26]
<<< [27, 28, 29, 30, 31, 32]
The targetlist can be split into slices with a NAX size of 'cnt_per_slice' ...
:param tar_list: target list to split
:param n: slice counts ...
:return: yield one result.
"""
slice_len = int(len(tar_list) / n)
slice_len_1 = slice_len + 1
slice_remain = int(len(tar_list) % n)
cur_idx = 0
for i in range(n):
# print(f'{i} < {slice_remain} : [{cur_idx}: {cur_idx+(slice_len_1 if i < slice_remain else slice_len)}]')
yield tar_list[cur_idx: cur_idx+(slice_len_1 if i < slice_remain else slice_len)]
cur_idx += slice_len_1 if i < slice_remain else slice_len
def n_split_idx(tar_list_len, n):
"""
Yield successive n-sized(cnt_per_slice) chunks from l(tar_list).
>>> x = list(range(33))
>>> n_split_idx(len(x), 3)
<<< [11, 11, 11]
>>> n_split_idx(len(x), 4)
<<< [9, 8, 8, 8]
>>> n_split_idx(len(x), 5)
<<< [7, 7, 7, 6, 6]
>>> n_split_idx(len(x), 6)
<<< [6, 6, 6, 5, 5, 5]
>>> n_split_idx(len(x), 7)
<<< [5, 5, 5, 5, 5, 4, 4]
The targetlist can be split into slices with a NAX size of 'cnt_per_slice' ...
:param tar_list_len: target list length to split
:param n: slice counts ...
:return: list of each slice length.
"""
slice_len = int(tar_list_len / n)
slice_remain = int(tar_list_len % n)
res = []
for i in range(n):
if i<slice_remain:
res.append(slice_len+1)
else:
res.append(slice_len)
return res | /sci-util-1.2.7.tar.gz/sci-util-1.2.7/sci_util/list_util/split_util.py | 0.459561 | 0.480052 | split_util.py | pypi |
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
classification_report,
roc_curve,
roc_auc_score,
)
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
def show_classification(y_test, y_pred):
r"""
Confusion matrix
- Binary:
- y_test: [1, 0, 1, 0, 1]
- y_pred: [0.1, 0.2, 0.9, 0, 0.8]
- Multi:
- y_test: [1, 2, 1, 0, 1]
- y_pred: [[0.1, 0.8, 0.1], [0.1, 0.2, 0.7], [0.1, 0.6, 0.3], [0.5, 0.3. 0.2], [0.1, 0.6, 0.4]]
"""
cm = confusion_matrix(y_test, y_pred)
TN = cm[0, 0]
TP = cm[1, 1]
FP = cm[0, 1]
FN = cm[1, 0]
print(sum(y_test), sum(y_pred))
print("Confusion matrix\n\n", cm)
print("\nTrue Negatives(TN) = ", TN)
print("\nTrue Positives(TP) = ", TP)
print("\nFalse Positives(FP) = ", FP)
print("\nFalse Negatives(FN) = ", FN)
classification_accuracy = (TP + TN) / float(TP + TN + FP + FN)
print("Classification accuracy : {0:0.4f}".format(classification_accuracy))
classification_error = (FP + FN) / float(TP + TN + FP + FN)
print("Classification error : {0:0.4f}".format(classification_error))
precision = TP / float(TP + FP)
print("Precision : {0:0.4f}".format(precision))
recall = TP / float(TP + FN)
print("Recall or Sensitivity : {0:0.4f}".format(recall))
true_positive_rate = TP / float(TP + FN)
print("True Positive Rate : {0:0.4f}".format(true_positive_rate))
false_positive_rate = FP / float(FP + TN)
print("False Positive Rate : {0:0.4f}".format(false_positive_rate))
specificity = TN / (TN + FP)
print("Specificity : {0:0.4f}".format(specificity))
cm_matrix = pd.DataFrame(
data=cm.T,
columns=["Actual Negative:0", "Actual Positive:1"],
index=["Predict Negative:0", "Predict Positive:1"],
)
sns.heatmap(cm_matrix, annot=True, fmt="d", cmap="YlGnBu")
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
plt.figure(figsize=(6, 4))
plt.plot(fpr, tpr, linewidth=2)
plt.plot([0, 1], [0, 1], "k--")
plt.rcParams["font.size"] = 12
plt.title("ROC curve for Predicting a Pulsar Star classifier")
plt.xlabel("False Positive Rate (1 - Specificity)")
plt.ylabel("True Positive Rate (Sensitivity)")
plt.show()
ROC_AUC = roc_auc_score(y_test, y_pred)
print("ROC AUC : {:.4f}".format(ROC_AUC)) | /sci_ztools-0.1.4-py3-none-any.whl/z/metrics.py | 0.805096 | 0.590012 | metrics.py | pypi |
from pathlib import Path
import shutil
from typing import Optional, Union, List
try:
import gzip
import tarfile
except:
raise ImportError
def get_path(path: Union[Path, str]) -> Path:
"""Transform to `Path`.
Args:
path (str): The path to be transformed.
Returns:
Path: the `pathlib.Path` class
"""
if isinstance(path, Path):
return path
else:
return Path(path)
def get_path_out(
path_in: Path, rename: str, path_out: Optional[Union[Path, str]] = None
):
r"""
Adaptor pathout to path_in
"""
if path_out is None:
return path_in.parent / rename
else:
_path_out = get_path(path_out)
if _path_out.is_dir():
return _path_out / rename
elif path_out.is_file():
return _path_out
def zip(path_in: Union[Path, str], path_out: Optional[Union[Path, str]] = None):
r""" """
_path_in = get_path(path_in)
assert _path_in.is_file(), f"{path_in} is not a file"
rename = _path_in.name + ".gz"
_path_out = get_path_out(_path_in, rename, path_out)
with open(_path_in, "rb") as f_in:
with gzip.open(_path_out, "wb") as f_out:
f_out.write(f_in.read())
def unzip(path_in: Union[Path, str], path_out: Optional[Union[Path, str]] = None):
_path_in = get_path(path_in)
assert _path_in.is_file(), f"{path_in} is not a file"
assert _path_in.suffix == ".gz", f"not .gz file name"
rename = _path_in.name.rstrip(".gz") # rip
_path_out = get_path_out(_path_in, rename, path_out)
with gzip.open(_path_in, "rb") as f_in:
with open(_path_out, "wb") as f_out:
f_out.write(f_in.read())
def tar(
path: Union[Path, str], staffs: Union[List[Union[Path, str]], Union[Path, str]]
):
_path = get_path(path)
with tarfile.open(_path, "w:gz") as tar:
if isinstance(staffs, (str, Path)):
tar.add(staffs)
print(f"add {staffs}")
elif isinstance(staffs, List):
for staff in staffs:
tar.add(staff)
print(f"add {staff}")
def untar(path_in: Union[Path, str], path_out: Optional[Union[Path, str]] = None):
_path_in = get_path(path_in)
assert _path_in.is_file(), f"{path_in} is not a file"
rename = _path_in.name.rstrip(".tar.gz") # rip
_path_out = get_path_out(_path_in, rename, path_out)
with tarfile.open(_path_in, "r:gz") as tar:
tar.extract(_path_out) | /sci_ztools-0.1.4-py3-none-any.whl/z/sh.py | 0.900157 | 0.292709 | sh.py | pypi |
import os
import random
from itertools import takewhile, repeat
from pathlib import Path
from typing import Union, List, Optional
import numpy as np
import pandas as pd
import torch
from rich import console
from rich.table import Table
from sklearn.model_selection import KFold # Kfold cross validation
import logging
from rich.logging import RichHandler
from logging import FileHandler
from typing import Optional
from sklearn.utils import shuffle
def get_logger(
name: Optional[str] = None, filename: Optional[str] = None, level: str = "INFO"
) -> logging.Logger:
"""Get glorified Rich Logger"""
name = name if name else __name__
handlers = [
RichHandler(
rich_tracebacks=True,
)
]
if filename:
handlers.append(FileHandler(filename))
logging.basicConfig(format="%(name)s: %(message)s", handlers=handlers)
log = logging.getLogger(name)
log.setLevel(level)
return log
log = get_logger()
def read_excel(
paths: Union[Path, List[Path]], drop_by: Optional[str] = None
) -> pd.DataFrame:
"""Read excel and get pandas.DataFrame"""
if isinstance(paths, List):
# use openpyxl for better excel
df = pd.concat([pd.read_excel(path, engine="openpyxl") for path in paths])
elif isinstance(paths, Path):
df = pd.read_excel(paths, engine="openpyxl")
else:
raise NotImplementedError
# remove blank lines in the tail of xlsx
# use drop to make sure the index order
if drop_by:
df.dropna(subset=[drop_by], inplace=True)
df.reset_index(drop=True, inplace=True)
return df
def get_device():
"get device (CPU or GPU)"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("%s (%d GPUs)" % (device, n_gpu))
return device
def iter_count(file_name):
"""Text lines counter"""
buffer = 1024 * 1024
with open(file_name) as f:
buf_gen = takewhile(lambda x: x, (f.read(buffer) for _ in repeat(None)))
return sum(buf.count("\n") for buf in buf_gen)
def df_to_table(
pandas_dataframe: pd.DataFrame,
rich_table: Table,
show_index: bool = True,
index_name: Optional[str] = None,
) -> Table:
"""Convert a pandas.DataFrame obj into a rich.Table obj.
Args:
pandas_dataframe (DataFrame): A Pandas DataFrame to be converted to a rich Table.
rich_table (Table): A rich Table that should be populated by the DataFrame values.
show_index (bool): Add a column with a row count to the table. Defaults to True.
index_name (str, optional): The column name to give to the index column. Defaults to None, showing no value.
Returns:
Table: The rich Table instance passed, populated with the DataFrame values."""
if show_index:
index_name = str(index_name) if index_name else ""
rich_table.add_column(index_name)
for column in pandas_dataframe.columns:
rich_table.add_column(str(column))
for index, value_list in enumerate(pandas_dataframe.values.tolist()):
row = [str(index)] if show_index else []
row += [str(x) for x in value_list]
rich_table.add_row(*row)
return rich_table
def print_df(
pandas_dataframe: pd.DataFrame,
title: str = None,
):
console.Console().print(df_to_table(pandas_dataframe, Table(title=title)))
def set_seeds(seed):
"set random seeds"
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def show_ratio(df: pd.DataFrame, label="label", sort=None, n=5) -> None:
"""print the label proportion in pd.DataFrame
Args:
sort: 'value' or 'label'
"""
n_all = len(df)
if sort == "value":
n_classes = (
df[label]
.value_counts()
.reset_index()
.sort_values(by=label, ascending=False)
)
elif sort == "label":
n_classes = df[label].value_counts().reset_index().sort_values(by="index")
else:
n_classes = df[label].value_counts().reset_index()
n_classes = n_classes[:n]
for i in n_classes.index:
log.info(
f'Label {n_classes.at[i, "index"]} takes: {n_classes.at[i, label] / n_all * 100:.2f}%, Nums: {n_classes.at[i, label]}'
)
def split_df(df: pd.DataFrame, shuf=True, val=True, random_state=42):
"""Split df into train/val/test set and write into files
ratio: 8:1:1 or 9:1
Args:
- df (DataFrame): some data
- shuf (bool, default=True): shuffle the DataFrame
- val (bool, default=True): split into three set, train/val/test
"""
if shuf:
df = shuffle(df, random_state=random_state)
sep = int(len(df) * 0.1)
if val:
test_df = df.iloc[:sep]
val_df = df.iloc[sep : sep * 2]
train_df = df.iloc[sep * 2 :]
return train_df, val_df, test_df
else:
test_df = df.iloc[:sep]
train_df = df.iloc[sep:]
return train_df, test_df
def kfold(df: pd.DataFrame, n_splits=5, shuffle=True, random_state=42) -> pd.DataFrame:
"""
:param df: make sure the index correct
:param n_splits:
:param shuffle:
:param random_state:
:return:
"""
_df = df.copy()
if shuffle:
kf = KFold(n_splits=n_splits, shuffle=shuffle, random_state=random_state)
else:
kf = KFold(n_splits=n_splits)
for fold in range(n_splits):
_df[f"fold{fold}"] = False
fold = 0
for train_idxs, test_idxs in kf.split(_df):
print(train_idxs, test_idxs)
for i in test_idxs:
_df.loc[i, f"fold{fold}"] = True
fold += 1
return _df
def get_CV(
df: pd.DataFrame,
n_splits=5,
dir: Path = Path("CV"),
header=True,
index=True,
cols=None,
):
os.makedirs(dir, exist_ok=True)
for fold in range(n_splits):
_df = df.copy()
df_fold_test = _df[_df[f"fold{fold}"]]
df_fold_train = _df[~_df[f"fold{fold}"]]
if cols:
df_fold_test = df_fold_test[cols]
df_fold_train = df_fold_train[cols]
_df = _df[cols]
fold_dir = dir / f"fold{fold}"
os.makedirs(fold_dir, exist_ok=True)
df_fold_test.to_csv(fold_dir / "test.csv", header=header, index=index)
df_fold_train.to_csv(fold_dir / "train.csv", header=header, index=index)
_df.to_csv(fold_dir / "all.csv", header=header, index=index)
if __name__ == "__main__":
df = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 1, 1], "b": [4, 5, 6, 7, 8, 9, 10, 2, 1]}
)
print(kfold(df)) | /sci_ztools-0.1.4-py3-none-any.whl/z/utils.py | 0.826116 | 0.332581 | utils.py | pypi |
import pandas as pd
from sklearn.utils import shuffle
from sklearn.model_selection import (
StratifiedShuffleSplit,
StratifiedKFold,
KFold,
train_test_split,
)
from typing import Optional, Union, List, Tuple
from pathlib import Path
import copy
class DataFrame():
def __init__(
self, df: pd.DataFrame, random_state: int = 42, *args, **kargs
) -> None:
super(DataFrame, self).__init__()
self.df = df.copy(deep=True) # get a copy of original dataframe
self.random_state = random_state
def __repr__(self) -> str:
return repr(self.df)
def split_df(
self,
val: bool = False,
test_size: float = 0.1,
val_size: Optional[float] = None,
):
if val:
assert val_size is not None, "val size needed"
val_num, test_num = len(self.df) * [val_size, test_size]
train_df, val_df = train_test_split(
self.df, test_size=int(val_num), random_state=self.random_state
)
train_df, test_df = train_test_split(
train_df, test_size=int(test_num), random_state=self.random_state
)
return train_df, val_df, test_df
else:
train_df, test_df = train_test_split(
train_df, test_size=test_size, random_state=self.random_state
)
return train_df, test_df
def stratified_split_df(
self, labels: Union[List[str], str], n_splits: int = 1, test_size: float = 0.1
) -> Union[List, Tuple]:
split = StratifiedShuffleSplit(
n_splits=n_splits, test_size=test_size, random_state=self.random_state
)
df_trains = []
df_tests = []
for train_index, test_index in split.split(self.df, self.df[labels]):
strat_train_set = self.df.loc[train_index]
strat_test_set = self.df.loc[test_index]
df_trains.append(strat_train_set)
df_tests.append(strat_test_set)
return (
(strat_train_set, strat_test_set)
if n_splits == 1
else (df_trains, df_tests)
)
def stratified_kfold_split_df(
self, labels: Union[List[str], str], n_splits: int = 2
) -> Tuple:
assert n_splits >= 2, "At least 2 fold"
skf = StratifiedKFold(
n_splits=n_splits, shuffle=True, random_state=self.random_state
)
for train_index, test_index in skf.split(self.df, self.df[labels]):
strat_train_set = self.df.loc[train_index]
strat_test_set = self.df.loc[test_index]
yield strat_train_set, strat_test_set
def kfold_split_df(self, labels: Union[List[str], str], n_splits: int = 2) -> Tuple:
assert n_splits >= 2, "At least 2 fold"
df_trains = []
df_tests = []
skf = StratifiedKFold(
n_splits=n_splits, shuffle=True, random_state=self.random_state
)
for train_index, test_index in skf.split(self.df, self.df[labels]):
strat_train_set = self.df.loc[train_index]
strat_test_set = self.df.loc[test_index]
df_trains.append(strat_train_set)
df_tests.append(strat_test_set)
return df_trains, df_tests
def show_ratio(self, label="label", sort=None, n: Optional[int] = None):
"""print the label proportion in pd.DataFrame
Args:
sort: 'value' or 'label'
"""
n_all = len(self.df)
if sort == "value":
n_classes = (
self.df[label]
.value_counts()
.reset_index()
.sort_values(by=label, ascending=False)
)
elif sort == "label":
n_classes = (
self.df[label].value_counts().reset_index().sort_values(by="index")
)
else:
n_classes = self.df[label].value_counts().reset_index()
if n:
n_classes = n_classes[:n]
for i in n_classes.index:
print(
f'Label, {n_classes.at[i, "index"]} takes: {n_classes.at[i, label] / n_all * 100:.2f}%, Nums: {n_classes.at[i, label]}'
)
def read_csv(path: str = "", random_state: int = 42, *args, **kargs):
_path = Path(path)
assert _path.is_file(), "not a file"
return DataFrame(df=pd.read_csv(path, *args, **kargs), random_state=random_state)
def split_df(df: pd.DataFrame, shuf=True, val=True, random_state=42):
"""Split df into train/val/test set and write into files
ratio: 8:1:1 or 9:1
Args:
- df (DataFrame): some data
- shuf (bool, default=True): shuffle the DataFrame
- val (bool, default=True): split into three set, train/val/test
"""
if shuf:
df = shuffle(df, random_state=random_state)
sep = int(len(df) * 0.1)
if val:
test_df = df.iloc[:sep]
val_df = df.iloc[sep : sep * 2]
train_df = df.iloc[sep * 2 :]
return train_df, val_df, test_df
else:
test_df = df.iloc[:sep]
train_df = df.iloc[sep:]
return train_df, test_df
def show_ratio(df: pd.DataFrame, label="label", sort=None, n=5) -> None:
"""print the label proportion in pd.DataFrame
Args:
sort: 'value' or 'label'
"""
n_all = len(df)
if sort == "value":
n_classes = (
df[label]
.value_counts()
.reset_index()
.sort_values(by=label, ascending=False)
)
elif sort == "label":
n_classes = df[label].value_counts().reset_index().sort_values(by="index")
else:
n_classes = df[label].value_counts().reset_index()
n_classes = n_classes[:n]
for i in n_classes.index:
print(
f'Label {n_classes.at[i, "index"]} takes: {n_classes.at[i, label] / n_all * 100:.2f}%, Nums: {n_classes.at[i, label]}'
) | /sci_ztools-0.1.4-py3-none-any.whl/z/pandas.py | 0.813979 | 0.419648 | pandas.py | pypi |
from paraview.simple import *
import paraview as pv
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# get active source.
resultfoam = GetActiveSource()
# resultfoam.SkipZeroTime = 0
# check whether T exist
convert_T=False
alldata = pv.servermanager.Fetch(resultfoam)
if(alldata.GetBlock(0).GetPointData().GetArray("T")==None):
convert_T=False
else:
convert_T=True
renderView1 = GetActiveViewOrCreate('RenderView')
if(convert_T):
# create a new 'Calculator'
calculator1 = Calculator(Input=resultfoam)
calculator1.Function = 'T-273.15'
calculator1.ResultArrayName = 'T_degC'
RenameSource('K2degC', calculator1)
# SetActiveSource(calculator1)
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
# show data in view
resultfoamDisplay = Show(GetActiveSource(), renderView1)
# get color transfer function/color map for 'p'
pLUT = GetColorTransferFunction('T_degC')
# get opacity transfer function/opacity map for 'p'
pPWF = GetOpacityTransferFunction('T_degC')
# trace defaults for the display properties.
resultfoamDisplay.Representation = 'Surface'
# reset view to fit data
renderView1.ResetCamera()
# show color bar/color legend
resultfoamDisplay.SetScalarBarVisibility(renderView1, True)
# update the view to ensure updated data information
renderView1.Update()
# set scalar coloring
ColorBy(resultfoamDisplay, ('POINTS', 'T_degC'))
# Hide the scalar bar for this color map if no visible data is colored by it.
HideScalarBarIfNotNeeded(pLUT, renderView1)
# rescale color and/or opacity maps used to include current data range
resultfoamDisplay.RescaleTransferFunctionToDataRange(True, False)
# show color bar/color legend
resultfoamDisplay.SetScalarBarVisibility(renderView1, True)
tsteps = resultfoam.TimestepValues
name_time='Time_second'
if(len(tsteps)>1):
# create a new 'Annotate Time Filter'
annotateTimeFilter1 = AnnotateTimeFilter(Input=resultfoam)
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
# show data in view
annotateTimeFilter1Display = Show(annotateTimeFilter1, renderView1)
# update the view to ensure updated data information
renderView1.Update()
# Properties modified on annotateTimeFilter1
dt=(tsteps[-1]-tsteps[0])/(len(tsteps)-1)
if(dt>(86400*365)):
annotateTimeFilter1.Format = 'Time: %.0f years'
annotateTimeFilter1.Scale = 3.17e-08
name_time='Time_year'
elif(dt>86400):
annotateTimeFilter1.Format = 'Time: %.0f days'
annotateTimeFilter1.Scale = 1.1574074074074073e-05
name_time='Time_day'
elif(dt>3600):
annotateTimeFilter1.Format = 'Time: %.0f hours'
annotateTimeFilter1.Scale = 0.0002777777777777778
name_time='Time_hour'
elif(dt>60):
annotateTimeFilter1.Format = 'Time: %.0f minutes'
annotateTimeFilter1.Scale = 0.016666666666666666
name_time='Time_minute'
else:
annotateTimeFilter1.Format = 'Time: %.2f seconds'
annotateTimeFilter1.Scale = 1
name_time='Time_second'
# Properties modified on annotateTimeFilter1Display
annotateTimeFilter1Display.Bold = 1
annotateTimeFilter1Display.FontSize = 5
# update the view to ensure updated data information
renderView1.Update()
# rename source object
RenameSource(name_time, annotateTimeFilter1)
# set active source
if(convert_T):
SetActiveSource(calculator1)
renderView1.ResetCamera()
# current camera placement for renderView1
renderView1.CameraPosition = [2000.0, -3000.0, 7965.728650875111]
renderView1.CameraFocalPoint = [2000.0, -3000.0, 0.5]
renderView1.CameraParallelScale = 2061.5528734427357
# #### uncomment the following to render all views
# # RenderAllViews()
# # alternatively, if you want to write images, you can use SaveScreenshot(...).
renderView1.Update()
Hide(resultfoam, renderView1) | /sciPyFoam-0.4.1.tar.gz/sciPyFoam-0.4.1/example/cases/blockMesh/showTimeYear.py | 0.559049 | 0.399812 | showTimeYear.py | pypi |
# SCIAMACHY data tools
[](https://github.com/st-bender/sciapy/actions/workflows/ci_build_and_test.yml)
[](https://sciapy.rtfd.io/en/latest/?badge=latest)
[](https://coveralls.io/github/st-bender/sciapy)
[](https://scrutinizer-ci.com/g/st-bender/sciapy/?branch=master)
[](https://doi.org/10.5281/zenodo.1401370)
[](https://doi.org/10.5281/zenodo.1342701)
## Overview
These SCIAMACHY tools are provided as convenience tools for handling
SCIAMACHY level 1c limb spectra and retrieved level 2 trace-gas densities.
More extensive documentation is provided on [sciapy.rtfd.io](https://sciapy.rtfd.io).
### Level 1c tools
The `sciapy.level1c` submodule provides a few
[conversion tools](sciapy/level1c/README.md) for [SCIAMACHY](http://www.sciamachy.org)
level 1c calibrated spectra, to be used as input for trace gas retrieval with
[scia\_retrieval\_2d](https://github.com/st-bender/scia_retrieval_2d).
**Note that this is *not* a level 1b to level 1c calibration tool.**
For calibrating level 1b spectra (for example SCI\_NL\_\_1P version 8.02
provided by ESA via the
[ESA data browser](https://earth.esa.int/web/guest/data-access/browse-data-products))
to level 1c spectra, use the
[SciaL1C](https://earth.esa.int/web/guest/software-tools/content/-/article/scial1c-command-line-tool-4073)
command line tool or the free software
[nadc\_tools](https://github.com/rmvanhees/nadc_tools).
The first produces `.child` files, the second can output to HDF5 (`.h5`).
**Further note**: `.child` files are currently not supported.
### Level 2 tools
The `sciapy.level2` submodule provides
post-processing tools for trace-gas densities retrieved from SCIAMACHY limb scans.
Support simple operations as combining files into *netcdf*, calculating and noting
local solar time at the retrieval grid points, geomagnetic latitudes, etc.
The level 2 tools also include a simple binning algorithm.
### Regression
The `sciapy.regress` submodule can be used for regression analysis of SCIAMACHY
level 2 trace gas density time series, either directly or as daily zonal means.
It uses the [`regressproxy`](https://regressproxy.readthedocs.io) package
for modelling the proxy input with lag and lifetime decay.
The regression tools support various parameter fitting methods using
[`scipy.optimize`](https://docs.scipy.org/doc/scipy/reference/optimize.html)
and uncertainty evaluation using Markov-Chain Monte-Carlo sampling with
[`emcee`](https://emcee.readthedocs.io).
Further supports covariance modelling via
[`celerite`](https://celerite.readthedocs.io)
and [`george`](https://george.readthedocs.io).
## Install
### Prerequisites
Sciapy uses features from a lot of different packages.
All dependencies will be automatically installed when using
`pip install` or `python setup.py`, see below.
However, to speed up the install or for use
within a `conda` environment, it may be advantageous to
install some of the important packages beforehand:
- `numpy` at least version 1.13.0 for general numerics,
- `scipy` at least version 0.17.0 for scientific numerics,
- `matplotlib` at least version 2.2 for plotting,
- `netCDF4` for the low level netcdf4 interfaces,
- `h5py` for the low level hdf5 interfaces,
- `dask`,
- `toolz`,
- `pandas` and
- `xarray` for the higher level data interfaces,
- `astropy` for (astronomical) time conversions,
- `parse` for ASCII text parsing in `level1c`,
- `pybind11` C++ interface needed by `celerite`
- `celerite` at least version 0.3.0 and
- `george` for Gaussian process modelling,
- `emcee` for MCMC sampling and
- `corner` for the sample histogram plots,
- `regressproxy` for the regression proxy modelling.
Out of these packages, `numpy` is probably the most important one
to be installed first because at least `celerite` needs it for setup.
It may also be a good idea to install
[`pybind11`](https://pybind11.readthedocs.io)
because both `celerite` and `george` use its interface,
and both may fail to install without `pybind11`.
Depending on the setup, `numpy` and `pybind11` can be installed
via `pip`:
```sh
pip install numpy pybind11
```
or [`conda`](https://conda.io):
```sh
conda install numpy pybind11
```
### sciapy
Official releases are available as `pip` packages from the main package repository,
to be found at <https://pypi.org/project/sciapy/>, and which can be installed with:
```sh
$ pip install sciapy
```
The latest development version of
sciapy can be installed with [`pip`](https://pip.pypa.io) directly
from github (see <https://pip.pypa.io/en/stable/reference/pip_install/#vcs-support>
and <https://pip.pypa.io/en/stable/reference/pip_install/#git>):
```sh
$ pip install [-e] git+https://github.com/st-bender/sciapy.git
```
The other option is to use a local clone:
```sh
$ git clone https://github.com/st-bender/sciapy.git
$ cd sciapy
```
and then using `pip` (optionally using `-e`, see
<https://pip.pypa.io/en/stable/reference/pip_install/#install-editable>):
```sh
$ pip install [-e] .
```
or using `setup.py`:
```sh
$ python setup.py install
```
## Usage
The whole module as well as the individual submodules can be loaded as usual:
```python
>>> import sciapy
>>> import sciapy.level1c
>>> import sciapy.level2
>>> import sciapy.regress
```
Basic class and method documentation is accessible via `pydoc`:
```sh
$ pydoc sciapy
```
The submodules' documentation can be accessed with `pydoc` as well:
```sh
$ pydoc sciapy.level1c
$ pydoc sciapy.level2
$ pydoc sciapy.regress
```
## License
This python package is free software: you can redistribute it or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 2 (GPLv2), see [local copy](./LICENSE)
or [online version](http://www.gnu.org/licenses/gpl-2.0.html).
| /sciapy-0.0.8.tar.gz/sciapy-0.0.8/README.md | 0.532668 | 0.957477 | README.md | pypi |
# SCIAMACHY data tools
[](https://github.com/st-bender/sciapy/actions/workflows/ci_build_and_test.yml)
[](https://sciapy.rtfd.io/en/latest/?badge=latest)
[](https://coveralls.io/github/st-bender/sciapy)
[](https://scrutinizer-ci.com/g/st-bender/sciapy/?branch=master)
[](https://doi.org/10.5281/zenodo.1401370)
[](https://doi.org/10.5281/zenodo.1342701)
## Overview
These SCIAMACHY tools are provided as convenience tools for handling
SCIAMACHY level 1c limb spectra and retrieved level 2 trace-gas densities.
More extensive documentation is provided on [sciapy.rtfd.io](https://sciapy.rtfd.io).
### Level 1c tools
The `sciapy.level1c` submodule provides a few
[conversion tools](sciapy/level1c/README.md) for [SCIAMACHY](http://www.sciamachy.org)
level 1c calibrated spectra, to be used as input for trace gas retrieval with
[scia\_retrieval\_2d](https://github.com/st-bender/scia_retrieval_2d).
**Note that this is *not* a level 1b to level 1c calibration tool.**
For calibrating level 1b spectra (for example SCI\_NL\_\_1P version 8.02
provided by ESA via the
[ESA data browser](https://earth.esa.int/web/guest/data-access/browse-data-products))
to level 1c spectra, use the
[SciaL1C](https://earth.esa.int/web/guest/software-tools/content/-/article/scial1c-command-line-tool-4073)
command line tool or the free software
[nadc\_tools](https://github.com/rmvanhees/nadc_tools).
The first produces `.child` files, the second can output to HDF5 (`.h5`).
**Further note**: `.child` files are currently not supported.
### Level 2 tools
The `sciapy.level2` submodule provides
post-processing tools for trace-gas densities retrieved from SCIAMACHY limb scans.
Support simple operations as combining files into *netcdf*, calculating and noting
local solar time at the retrieval grid points, geomagnetic latitudes, etc.
The level 2 tools also include a simple binning algorithm.
### Regression
The `sciapy.regress` submodule can be used for regression analysis of SCIAMACHY
level 2 trace gas density time series, either directly or as daily zonal means.
It uses the [`regressproxy`](https://regressproxy.readthedocs.io) package
for modelling the proxy input with lag and lifetime decay.
The regression tools support various parameter fitting methods using
[`scipy.optimize`](https://docs.scipy.org/doc/scipy/reference/optimize.html)
and uncertainty evaluation using Markov-Chain Monte-Carlo sampling with
[`emcee`](https://emcee.readthedocs.io).
Further supports covariance modelling via
[`celerite`](https://celerite.readthedocs.io)
and [`george`](https://george.readthedocs.io).
## Install
### Prerequisites
Sciapy uses features from a lot of different packages.
All dependencies will be automatically installed when using
`pip install` or `python setup.py`, see below.
However, to speed up the install or for use
within a `conda` environment, it may be advantageous to
install some of the important packages beforehand:
- `numpy` at least version 1.13.0 for general numerics,
- `scipy` at least version 0.17.0 for scientific numerics,
- `matplotlib` at least version 2.2 for plotting,
- `netCDF4` for the low level netcdf4 interfaces,
- `h5py` for the low level hdf5 interfaces,
- `dask`,
- `toolz`,
- `pandas` and
- `xarray` for the higher level data interfaces,
- `astropy` for (astronomical) time conversions,
- `parse` for ASCII text parsing in `level1c`,
- `pybind11` C++ interface needed by `celerite`
- `celerite` at least version 0.3.0 and
- `george` for Gaussian process modelling,
- `emcee` for MCMC sampling and
- `corner` for the sample histogram plots,
- `regressproxy` for the regression proxy modelling.
Out of these packages, `numpy` is probably the most important one
to be installed first because at least `celerite` needs it for setup.
It may also be a good idea to install
[`pybind11`](https://pybind11.readthedocs.io)
because both `celerite` and `george` use its interface,
and both may fail to install without `pybind11`.
Depending on the setup, `numpy` and `pybind11` can be installed
via `pip`:
```sh
pip install numpy pybind11
```
or [`conda`](https://conda.io):
```sh
conda install numpy pybind11
```
### sciapy
Official releases are available as `pip` packages from the main package repository,
to be found at <https://pypi.org/project/sciapy/>, and which can be installed with:
```sh
$ pip install sciapy
```
The latest development version of
sciapy can be installed with [`pip`](https://pip.pypa.io) directly
from github (see <https://pip.pypa.io/en/stable/reference/pip_install/#vcs-support>
and <https://pip.pypa.io/en/stable/reference/pip_install/#git>):
```sh
$ pip install [-e] git+https://github.com/st-bender/sciapy.git
```
The other option is to use a local clone:
```sh
$ git clone https://github.com/st-bender/sciapy.git
$ cd sciapy
```
and then using `pip` (optionally using `-e`, see
<https://pip.pypa.io/en/stable/reference/pip_install/#install-editable>):
```sh
$ pip install [-e] .
```
or using `setup.py`:
```sh
$ python setup.py install
```
## Usage
The whole module as well as the individual submodules can be loaded as usual:
```python
>>> import sciapy
>>> import sciapy.level1c
>>> import sciapy.level2
>>> import sciapy.regress
```
Basic class and method documentation is accessible via `pydoc`:
```sh
$ pydoc sciapy
```
The submodules' documentation can be accessed with `pydoc` as well:
```sh
$ pydoc sciapy.level1c
$ pydoc sciapy.level2
$ pydoc sciapy.regress
```
## License
This python package is free software: you can redistribute it or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 2 (GPLv2), see [local copy](./LICENSE)
or [online version](http://www.gnu.org/licenses/gpl-2.0.html).
| /sciapy-0.0.8.tar.gz/sciapy-0.0.8/docs/README.md | 0.532668 | 0.957477 | README.md | pypi |
# Regression model intro
## Standard imports
First, setup some standard modules and matplotlib.
```
%matplotlib inline
%config InlineBackend.figure_format = 'png'
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
```
Load the main `sciapy` module and its wrappers for easy access to the used proxy timeseries.
```
import regressproxy
import sciapy
from sciapy.regress.load_data import load_dailymeanAE, load_dailymeanLya
plt.rcParams["figure.dpi"] = 120
```
## Model interface
The model is set up part by part, beginning with the more involved proxy models.
### Lyman-$\alpha$ proxy
We start with the Lyman-$\alpha$ proxy, it is not centered (mean-subtracted) and we set the rest of the parameters except `ltscan` to zero.
```
# load proxy data
plat, plap = load_dailymeanLya()
# setup the model
lya_model = regressproxy.ProxyModel(plat,
plap["Lya"],
center=False,
amp=0,
lag=0,
tau0=0,
taucos1=0, tausin1=0,
taucos2=0, tausin2=0,
ltscan=60)
```
### AE proxy with lifetime
The AE proxy is also not centered and we start with the same parameters as above.
```
# load proxy data
paet, paep = load_dailymeanAE()
# setup the model
ae_model = regressproxy.ProxyModel(paet,
paep["AE"],
center=False,
amp=0,
lag=0,
tau0=0,
taucos1=0, tausin1=0,
taucos2=0, tausin2=0,
ltscan=60)
```
### Offset
We use the `ConstantModel` (inherited from `celerite`) for the constant offset.
```
offset_model = regressproxy.ConstantModel(value=0.)
```
### Optional harmonic terms
The harmonic terms are not used here but we include them to show how to set them up.
```
harm1 = regressproxy.HarmonicModelCosineSine(freq=1, cos=0, sin=0)
harm2 = regressproxy.HarmonicModelCosineSine(freq=2, cos=0, sin=0)
# frequencies should not be fitted
harm1.freeze_parameter("freq")
harm2.freeze_parameter("freq")
```
### Combined model
We then combine the separate models into a `ModelSet`.
```
model = regressproxy.ProxyModelSet([("offset", offset_model),
("Lya", lya_model), ("GM", ae_model),
("f1", harm1), ("f2", harm2)])
```
The full model has the following parameters:
```
model.get_parameter_dict()
```
But we don't need all of them, so we freeze all parameters and thaw the ones we need.
This is easier than the other way around (freezing all unused parameters).
```
model.freeze_all_parameters()
model.thaw_parameter("offset:value")
model.thaw_parameter("Lya:amp")
model.thaw_parameter("GM:amp")
model.thaw_parameter("GM:tau0")
model.thaw_parameter("GM:taucos1")
model.thaw_parameter("GM:tausin1")
```
Cross check that only the used parameters are really active:
```
model.get_parameter_dict()
```
## Model parameters
### Manually setting the parameters
Now we set the model parameters to something non-trivial, with the same order as listed above:
```
model.set_parameter_vector([-25.6, 6.26, 0.0874, 1.54, 10.52, -0.714])
model.get_parameter_dict()
```
With the parameters properly set, we can now "predict" the density for any time we wish.
Here we take 25 years half-daily:
```
times = np.arange(1992, 2017.01, 0.5 / 365.25)
pred = model.get_value(times)
```
and then plot the result:
```
plt.plot(times, pred, label="model")
plt.xlabel("time [Julian epoch]")
# The data were scaled by 10^-6 before fitting
plt.ylabel("NO number density [10$^6$ cm$^{{-3}}$]")
plt.legend();
```
### Setting the parameters from file
Instead of making up some numbers for the parameters, we can take "real" ones.
We use the ones determined by fitting the model to actual data,
in this case SCIAMACHY nitric oxide daily zonal mean data.
We connect to zenodo and load the contents into memory.
It's a rather small file so that should be no problem, but we need the requests and netCDF4 modules for that.
The alternative would be to download a copy into the same folder as this notebook.
```
import requests
import netCDF4
def load_data_store(store, variables=None):
with xr.open_dataset(store, chunks={"lat": 9, "alt": 8}) as data_ds:
if variables is not None:
data_ds = data_ds[variables]
data_ds.load()
return data_ds
def load_data_url(url, variables=None):
with requests.get(url, stream=True) as response:
nc4_ds = netCDF4.Dataset("data", memory=response.content)
store = xr.backends.NetCDF4DataStore(nc4_ds)
return load_data_store(store, variables)
zenodo_url = "https://zenodo.org/record/1342701/files/NO_regress_quantiles_pGM_Lya_ltcs_exp1dscan60d_km32.nc"
# If you downloaded a copy, use load_data_store()
# and replace the url by "/path/to/<filename.nc>"
quants = load_data_url(zenodo_url)
```
The data file contains the median together with the (0.16, 0.84), (0.025, 0.975),
and (0.001, 0.999) quantiles corresponding to the 1$\sigma$, 2$\sigma$, and 3$\sigma$ confidence regions.
In particular, the contents of the quantiles dataset are:
```
quants
```
The dimensions of the available parameters are:
```
quants.lat, quants.alt
```
We loop over the parameter names and set the parameters to the median values (`quantile=0.5`)
for the selected altitude and latitude bin.
The variables in the quantiles file were created using [celerite](https://github.com/dfm/celerite)
which prepends "mean:" to the variables from the mean model.
```
# select latitude and altitude first
latitude = 65
altitude = 70
for v in model.get_parameter_names():
model.set_parameter(v, quants["mean:{0}".format(v)].sel(alt=altitude, lat=latitude, quantile=0.5))
```
The parameters from the file are (actually pretty close to the ones above):
```
model.get_parameter_dict()
```
We take the same times as above (25 years half-daily) to predict the model values:
```
pred = model.get_value(times)
```
and then plot the result again:
```
plt.plot(times, pred, label="model")
plt.xlabel("time [Julian epoch]")
# Again, the data were scaled by 10^-6 before fitting, so adjust the X-Axis label
plt.ylabel("NO number density [10$^6$ cm$^{{-3}}$]")
plt.legend();
```
| /sciapy-0.0.8.tar.gz/sciapy-0.0.8/docs/tutorials/regress_intro.ipynb | 0.629775 | 0.940463 | regress_intro.ipynb | pypi |
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from scib_metrics.utils import compute_simpson_index, convert_knn_graph_to_idx
def lisi_knn(X: csr_matrix, labels: np.ndarray, perplexity: float = None) -> np.ndarray:
"""Compute the local inverse simpson index (LISI) for each cell :cite:p:`korsunsky2019harmony`.
Parameters
----------
X
Array of shape (n_cells, n_cells) with non-zero values
representing distances to exactly each cell's k nearest neighbors.
labels
Array of shape (n_cells,) representing label values
for each cell.
perplexity
Parameter controlling effective neighborhood size. If None, the
perplexity is set to the number of neighbors // 3.
Returns
-------
lisi
Array of shape (n_cells,) with the LISI score for each cell.
"""
labels = np.asarray(pd.Categorical(labels).codes)
knn_dists, knn_idx = convert_knn_graph_to_idx(X)
if perplexity is None:
perplexity = np.floor(knn_idx.shape[1] / 3)
n_labels = len(np.unique(labels))
simpson = compute_simpson_index(knn_dists, knn_idx, labels, n_labels, perplexity=perplexity)
return 1 / simpson
def ilisi_knn(X: csr_matrix, batches: np.ndarray, perplexity: float = None, scale: bool = True) -> np.ndarray:
"""Compute the integration local inverse simpson index (iLISI) for each cell :cite:p:`korsunsky2019harmony`.
Returns a scaled version of the iLISI score for each cell, by default :cite:p:`luecken2022benchmarking`.
Parameters
----------
X
Array of shape (n_cells, n_cells) with non-zero values
representing distances to exactly each cell's k nearest neighbors.
batches
Array of shape (n_cells,) representing batch values
for each cell.
perplexity
Parameter controlling effective neighborhood size. If None, the
perplexity is set to the number of neighbors // 3.
scale
Scale lisi into the range [0, 1]. If True, higher values are better.
Returns
-------
ilisi
Array of shape (n_cells,) with the iLISI score for each cell.
"""
batches = np.asarray(pd.Categorical(batches).codes)
lisi = lisi_knn(X, batches, perplexity=perplexity)
ilisi = np.nanmedian(lisi)
if scale:
nbatches = len(np.unique(batches))
ilisi = (ilisi - 1) / (nbatches - 1)
return ilisi
def clisi_knn(X: csr_matrix, labels: np.ndarray, perplexity: float = None, scale: bool = True) -> np.ndarray:
"""Compute the cell-type local inverse simpson index (cLISI) for each cell :cite:p:`korsunsky2019harmony`.
Returns a scaled version of the cLISI score for each cell, by default :cite:p:`luecken2022benchmarking`.
Parameters
----------
X
Array of shape (n_cells, n_cells) with non-zero values
representing distances to exactly each cell's k nearest neighbors.
labels
Array of shape (n_cells,) representing cell type label values
for each cell.
perplexity
Parameter controlling effective neighborhood size. If None, the
perplexity is set to the number of neighbors // 3.
scale
Scale lisi into the range [0, 1]. If True, higher values are better.
Returns
-------
clisi
Array of shape (n_cells,) with the cLISI score for each cell.
"""
labels = np.asarray(pd.Categorical(labels).codes)
lisi = lisi_knn(X, labels, perplexity=perplexity)
clisi = np.nanmedian(lisi)
if scale:
nlabels = len(np.unique(labels))
clisi = (nlabels - clisi) / (nlabels - 1)
return clisi | /scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/_lisi.py | 0.928498 | 0.759002 | _lisi.py | pypi |
import logging
from typing import Optional, Union
import numpy as np
import pandas as pd
from ._silhouette import silhouette_label
logger = logging.getLogger(__name__)
def isolated_labels(
X: np.ndarray,
labels: np.ndarray,
batch: np.ndarray,
iso_threshold: Optional[int] = None,
) -> float:
"""Isolated label score :cite:p:`luecken2022benchmarking`.
Score how well labels of isolated labels are distiguished in the dataset by
average-width silhouette score (ASW) on isolated label vs all other labels.
The default of the original scib package is to use a cluster-based F1 scoring
procedure, but here we use the ASW for speed and simplicity.
Parameters
----------
X
Array of shape (n_cells, n_features).
labels
Array of shape (n_cells,) representing label values
batch
Array of shape (n_cells,) representing batch values
iso_threshold
Max number of batches per label for label to be considered as
isolated, if integer. If `None`, considers minimum number of
batches that labels are present in
Returns
-------
isolated_label_score
"""
scores = {}
isolated_labels = _get_isolated_labels(labels, batch, iso_threshold)
for label in isolated_labels:
score = _score_isolated_label(X, labels, label)
scores[label] = score
scores = pd.Series(scores)
return scores.mean()
def _score_isolated_label(
X: np.ndarray,
labels: np.ndarray,
isolated_label: Union[str, float, int],
):
"""Compute label score for a single label."""
mask = labels == isolated_label
score = silhouette_label(X, mask.astype(np.float32))
logging.info(f"{isolated_label}: {score}")
return score
def _get_isolated_labels(labels: np.ndarray, batch: np.ndarray, iso_threshold: float):
"""Get labels that are isolated depending on the number of batches."""
tmp = pd.DataFrame()
label_key = "label"
batch_key = "batch"
tmp[label_key] = labels
tmp[batch_key] = batch
tmp = tmp.drop_duplicates()
batch_per_lab = tmp.groupby(label_key).agg({batch_key: "count"})
# threshold for determining when label is considered isolated
if iso_threshold is None:
iso_threshold = batch_per_lab.min().tolist()[0]
logging.info(f"isolated labels: no more than {iso_threshold} batches per label")
labels = batch_per_lab[batch_per_lab[batch_key] <= iso_threshold].index.tolist()
if len(labels) == 0:
logging.info(f"no isolated labels with less than {iso_threshold} batches")
return np.array(labels) | /scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/_isolated_labels.py | 0.935125 | 0.570271 | _isolated_labels.py | pypi |
import logging
import warnings
from typing import Dict, Tuple
import numpy as np
import scanpy as sc
from scipy.sparse import spmatrix
from sklearn.metrics.cluster import adjusted_rand_score, normalized_mutual_info_score
from sklearn.utils import check_array
from .utils import KMeans, check_square
logger = logging.getLogger(__name__)
def _compute_clustering_kmeans(X: np.ndarray, n_clusters: int) -> np.ndarray:
kmeans = KMeans(n_clusters)
kmeans.fit(X)
return kmeans.labels_
def _compute_clustering_leiden(connectivity_graph: spmatrix, resolution: float) -> np.ndarray:
g = sc._utils.get_igraph_from_adjacency(connectivity_graph)
clustering = g.community_leiden(objective_function="modularity", weights="weight", resolution_parameter=resolution)
clusters = clustering.membership
return np.asarray(clusters)
def _compute_nmi_ari_cluster_labels(
X: np.ndarray,
labels: np.ndarray,
resolution: float = 1.0,
) -> Tuple[float, float]:
labels_pred = _compute_clustering_leiden(X, resolution)
nmi = normalized_mutual_info_score(labels, labels_pred, average_method="arithmetic")
ari = adjusted_rand_score(labels, labels_pred)
return nmi, ari
def nmi_ari_cluster_labels_kmeans(X: np.ndarray, labels: np.ndarray) -> Dict[str, float]:
"""Compute nmi and ari between k-means clusters and labels.
This deviates from the original implementation in scib by using k-means
with k equal to the known number of cell types/labels. This leads to
a more efficient computation of the nmi and ari scores.
Parameters
----------
X
Array of shape (n_cells, n_features).
labels
Array of shape (n_cells,) representing label values
Returns
-------
nmi
Normalized mutual information score
ari
Adjusted rand index score
"""
X = check_array(X, accept_sparse=False, ensure_2d=True)
n_clusters = len(np.unique(labels))
labels_pred = _compute_clustering_kmeans(X, n_clusters)
nmi = normalized_mutual_info_score(labels, labels_pred, average_method="arithmetic")
ari = adjusted_rand_score(labels, labels_pred)
return {"nmi": nmi, "ari": ari}
def nmi_ari_cluster_labels_leiden(
X: spmatrix, labels: np.ndarray, optimize_resolution: bool = True, resolution: float = 1.0, n_jobs: int = 1
) -> Dict[str, float]:
"""Compute nmi and ari between leiden clusters and labels.
This deviates from the original implementation in scib by using leiden instead of
louvain clustering. Installing joblib allows for parallelization of the leiden
resoution optimization.
Parameters
----------
X
Array of shape (n_cells, n_cells) representing a connectivity graph.
Values should represent weights between pairs of neighbors, with a higher weight
indicating more connected.
labels
Array of shape (n_cells,) representing label values
optimize_resolution
Whether to optimize the resolution parameter of leiden clustering by searching over
10 values
resolution
Resolution parameter of leiden clustering. Only used if optimize_resolution is False.
n_jobs
Number of jobs for parallelizing resolution optimization via joblib. If -1, all CPUs
are used.
Returns
-------
nmi
Normalized mutual information score
ari
Adjusted rand index score
"""
X = check_array(X, accept_sparse=True, ensure_2d=True)
check_square(X)
if optimize_resolution:
n = 10
resolutions = np.array([2 * x / n for x in range(1, n + 1)])
try:
from joblib import Parallel, delayed
out = Parallel(n_jobs=n_jobs)(delayed(_compute_nmi_ari_cluster_labels)(X, labels, r) for r in resolutions)
except ImportError:
warnings.warn("Using for loop over clustering resolutions. `pip install joblib` for parallelization.")
out = [_compute_nmi_ari_cluster_labels(X, labels, r) for r in resolutions]
nmi_ari = np.array(out)
nmi_ind = np.argmax(nmi_ari[:, 0])
nmi, ari = nmi_ari[nmi_ind, :]
return {"nmi": nmi, "ari": ari}
else:
nmi, ari = _compute_nmi_ari_cluster_labels(X, labels, resolution)
return {"nmi": nmi, "ari": ari} | /scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/_nmi_ari.py | 0.920016 | 0.636155 | _nmi_ari.py | pypi |
import numpy as np
import pandas as pd
from scib_metrics.utils import silhouette_samples
def silhouette_label(X: np.ndarray, labels: np.ndarray, rescale: bool = True, chunk_size: int = 256) -> float:
"""Average silhouette width (ASW) :cite:p:`luecken2022benchmarking`.
Parameters
----------
X
Array of shape (n_cells, n_features).
labels
Array of shape (n_cells,) representing label values
rescale
Scale asw into the range [0, 1].
chunk_size
Size of chunks to process at a time for distance computation.
Returns
-------
silhouette score
"""
asw = np.mean(silhouette_samples(X, labels, chunk_size=chunk_size))
if rescale:
asw = (asw + 1) / 2
return np.mean(asw)
def silhouette_batch(
X: np.ndarray, labels: np.ndarray, batch: np.ndarray, rescale: bool = True, chunk_size: int = 256
) -> float:
"""Average silhouette width (ASW) with respect to batch ids within each label :cite:p:`luecken2022benchmarking`.
Parameters
----------
X
Array of shape (n_cells, n_features).
labels
Array of shape (n_cells,) representing label values
batch
Array of shape (n_cells,) representing batch values
rescale
Scale asw into the range [0, 1]. If True, higher values are better.
chunk_size
Size of chunks to process at a time for distance computation.
Returns
-------
silhouette score
"""
sil_dfs = []
unique_labels = np.unique(labels)
for group in unique_labels:
labels_mask = labels == group
X_subset = X[labels_mask]
batch_subset = batch[labels_mask]
n_batches = len(np.unique(batch_subset))
if (n_batches == 1) or (n_batches == X_subset.shape[0]):
continue
sil_per_group = silhouette_samples(X_subset, batch_subset, chunk_size=chunk_size)
# take only absolute value
sil_per_group = np.abs(sil_per_group)
if rescale:
# scale s.t. highest number is optimal
sil_per_group = 1 - sil_per_group
sil_dfs.append(
pd.DataFrame(
{
"group": [group] * len(sil_per_group),
"silhouette_score": sil_per_group,
}
)
)
sil_df = pd.concat(sil_dfs).reset_index(drop=True)
sil_means = sil_df.groupby("group").mean()
asw = sil_means["silhouette_score"].mean()
return asw | /scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/_silhouette.py | 0.924108 | 0.721449 | _silhouette.py | pypi |
import os
import warnings
from dataclasses import asdict, dataclass
from enum import Enum
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Union
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scanpy as sc
from anndata import AnnData
from plottable import ColumnDefinition, Table
from plottable.cmap import normed_cmap
from plottable.plots import bar
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
import scib_metrics
from scib_metrics.nearest_neighbors import NeighborsOutput, pynndescent
Kwargs = Dict[str, Any]
MetricType = Union[bool, Kwargs]
_LABELS = "labels"
_BATCH = "batch"
_X_PRE = "X_pre"
_METRIC_TYPE = "Metric Type"
_AGGREGATE_SCORE = "Aggregate score"
# Mapping of metric fn names to clean DataFrame column names
metric_name_cleaner = {
"silhouette_label": "Silhouette label",
"silhouette_batch": "Silhouette batch",
"isolated_labels": "Isolated labels",
"nmi_ari_cluster_labels_leiden_nmi": "Leiden NMI",
"nmi_ari_cluster_labels_leiden_ari": "Leiden ARI",
"nmi_ari_cluster_labels_kmeans_nmi": "KMeans NMI",
"nmi_ari_cluster_labels_kmeans_ari": "KMeans ARI",
"clisi_knn": "cLISI",
"ilisi_knn": "iLISI",
"kbet_per_label": "KBET",
"graph_connectivity": "Graph connectivity",
"pcr_comparison": "PCR comparison",
}
@dataclass(frozen=True)
class BioConservation:
"""Specification of bio conservation metrics to run in the pipeline.
Metrics can be included using a boolean flag. Custom keyword args can be
used by passing a dictionary here. Keyword args should not set data-related
parameters, such as `X` or `labels`.
"""
isolated_labels: MetricType = True
nmi_ari_cluster_labels_leiden: MetricType = False
nmi_ari_cluster_labels_kmeans: MetricType = True
silhouette_label: MetricType = True
clisi_knn: MetricType = True
@dataclass(frozen=True)
class BatchCorrection:
"""Specification of which batch correction metrics to run in the pipeline.
Metrics can be included using a boolean flag. Custom keyword args can be
used by passing a dictionary here. Keyword args should not set data-related
parameters, such as `X` or `labels`.
"""
silhouette_batch: MetricType = True
ilisi_knn: MetricType = True
kbet_per_label: MetricType = True
graph_connectivity: MetricType = True
pcr_comparison: MetricType = True
class MetricAnnDataAPI(Enum):
"""Specification of the AnnData API for a metric."""
isolated_labels = lambda ad, fn: fn(ad.X, ad.obs[_LABELS], ad.obs[_BATCH])
nmi_ari_cluster_labels_leiden = lambda ad, fn: fn(ad.obsp["15_connectivities"], ad.obs[_LABELS])
nmi_ari_cluster_labels_kmeans = lambda ad, fn: fn(ad.X, ad.obs[_LABELS])
silhouette_label = lambda ad, fn: fn(ad.X, ad.obs[_LABELS])
clisi_knn = lambda ad, fn: fn(ad.obsp["90_distances"], ad.obs[_LABELS])
graph_connectivity = lambda ad, fn: fn(ad.obsp["15_distances"], ad.obs[_LABELS])
silhouette_batch = lambda ad, fn: fn(ad.X, ad.obs[_LABELS], ad.obs[_BATCH])
pcr_comparison = lambda ad, fn: fn(ad.obsm[_X_PRE], ad.X, ad.obs[_BATCH], categorical=True)
ilisi_knn = lambda ad, fn: fn(ad.obsp["90_distances"], ad.obs[_BATCH])
kbet_per_label = lambda ad, fn: fn(ad.obsp["50_connectivities"], ad.obs[_BATCH], ad.obs[_LABELS])
class Benchmarker:
"""Benchmarking pipeline for the single-cell integration task.
Parameters
----------
adata
AnnData object containing the raw count data and integrated embeddings as obsm keys.
batch_key
Key in `adata.obs` that contains the batch information.
label_key
Key in `adata.obs` that contains the cell type labels.
embedding_obsm_keys
List of obsm keys that contain the embeddings to be benchmarked.
bio_conservation_metrics
Specification of which bio conservation metrics to run in the pipeline.
batch_correction_metrics
Specification of which batch correction metrics to run in the pipeline.
pre_integrated_embedding_obsm_key
Obsm key containing a non-integrated embedding of the data. If `None`, the embedding will be computed
in the prepare step. See the notes below for more information.
n_jobs
Number of jobs to use for parallelization of neighbor search.
Notes
-----
`adata.X` should contain a form of the data that is not integrated, but is normalized. The `prepare` method will
use `adata.X` for PCA via :func:`~scanpy.tl.pca`, which also only uses features masked via `adata.var['highly_variable']`.
See further usage examples in the following tutorial:
1. :doc:`/notebooks/lung_example`
"""
def __init__(
self,
adata: AnnData,
batch_key: str,
label_key: str,
embedding_obsm_keys: List[str],
bio_conservation_metrics: Optional[BioConservation] = None,
batch_correction_metrics: Optional[BatchCorrection] = None,
pre_integrated_embedding_obsm_key: Optional[str] = None,
n_jobs: int = 1,
):
self._adata = adata
self._embedding_obsm_keys = embedding_obsm_keys
self._pre_integrated_embedding_obsm_key = pre_integrated_embedding_obsm_key
self._bio_conservation_metrics = bio_conservation_metrics if bio_conservation_metrics else BioConservation()
self._batch_correction_metrics = batch_correction_metrics if batch_correction_metrics else BatchCorrection()
self._results = pd.DataFrame(columns=list(self._embedding_obsm_keys) + [_METRIC_TYPE])
self._emb_adatas = {}
self._neighbor_values = (15, 50, 90)
self._prepared = False
self._benchmarked = False
self._batch_key = batch_key
self._label_key = label_key
self._n_jobs = n_jobs
self._metric_collection_dict = {
"Bio conservation": self._bio_conservation_metrics,
"Batch correction": self._batch_correction_metrics,
}
def prepare(self, neighbor_computer: Optional[Callable[[np.ndarray, int], NeighborsOutput]] = None) -> None:
"""Prepare the data for benchmarking.
Parameters
----------
neighbor_computer
Function that computes the neighbors of the data. If `None`, the neighbors will be computed
with :func:`~scib_metrics.utils.nearest_neighbors.pynndescent`. The function should take as input
the data and the number of neighbors to compute and return a :class:`~scib_metrics.utils.nearest_neighbors.NeighborsOutput`
object.
"""
# Compute PCA
if self._pre_integrated_embedding_obsm_key is None:
# This is how scib does it
# https://github.com/theislab/scib/blob/896f689e5fe8c57502cb012af06bed1a9b2b61d2/scib/metrics/pcr.py#L197
sc.tl.pca(self._adata, use_highly_variable=False)
self._pre_integrated_embedding_obsm_key = "X_pca"
for emb_key in self._embedding_obsm_keys:
self._emb_adatas[emb_key] = AnnData(self._adata.obsm[emb_key], obs=self._adata.obs)
self._emb_adatas[emb_key].obs[_BATCH] = np.asarray(self._adata.obs[self._batch_key].values)
self._emb_adatas[emb_key].obs[_LABELS] = np.asarray(self._adata.obs[self._label_key].values)
self._emb_adatas[emb_key].obsm[_X_PRE] = self._adata.obsm[self._pre_integrated_embedding_obsm_key]
# Compute neighbors
for ad in tqdm(self._emb_adatas.values(), desc="Computing neighbors"):
if neighbor_computer is not None:
neigh_output = neighbor_computer(ad.X, max(self._neighbor_values))
else:
neigh_output = pynndescent(
ad.X, n_neighbors=max(self._neighbor_values), random_state=0, n_jobs=self._n_jobs
)
indices, distances = neigh_output.indices, neigh_output.distances
for n in self._neighbor_values:
sp_distances, sp_conns = sc.neighbors._compute_connectivities_umap(
indices[:, :n], distances[:, :n], ad.n_obs, n_neighbors=n
)
ad.obsp[f"{n}_connectivities"] = sp_conns
ad.obsp[f"{n}_distances"] = sp_distances
self._prepared = True
def benchmark(self) -> None:
"""Run the pipeline."""
if self._benchmarked:
warnings.warn(
"The benchmark has already been run. Running it again will overwrite the previous results.",
UserWarning,
)
if not self._prepared:
self.prepare()
num_metrics = sum(
[sum([v is not False for v in asdict(met_col)]) for met_col in self._metric_collection_dict.values()]
)
for emb_key, ad in tqdm(self._emb_adatas.items(), desc="Embeddings", position=0, colour="green"):
pbar = tqdm(total=num_metrics, desc="Metrics", position=1, leave=False, colour="blue")
for metric_type, metric_collection in self._metric_collection_dict.items():
for metric_name, use_metric_or_kwargs in asdict(metric_collection).items():
if use_metric_or_kwargs:
pbar.set_postfix_str(f"{metric_type}: {metric_name}")
metric_fn = getattr(scib_metrics, metric_name)
if isinstance(use_metric_or_kwargs, dict):
# Kwargs in this case
metric_fn = partial(metric_fn, **use_metric_or_kwargs)
metric_value = getattr(MetricAnnDataAPI, metric_name)(ad, metric_fn)
# nmi/ari metrics return a dict
if isinstance(metric_value, dict):
for k, v in metric_value.items():
self._results.loc[f"{metric_name}_{k}", emb_key] = v
self._results.loc[f"{metric_name}_{k}", _METRIC_TYPE] = metric_type
else:
self._results.loc[metric_name, emb_key] = metric_value
self._results.loc[metric_name, _METRIC_TYPE] = metric_type
pbar.update(1)
self._benchmarked = True
def get_results(self, min_max_scale: bool = True, clean_names: bool = True) -> pd.DataFrame:
"""Return the benchmarking results.
Parameters
----------
min_max_scale
Whether to min max scale the results.
clean_names
Whether to clean the metric names.
Returns
-------
The benchmarking results.
"""
df = self._results.transpose()
df.index.name = "Embedding"
df = df.loc[df.index != _METRIC_TYPE]
if min_max_scale:
# Use sklearn to min max scale
df = pd.DataFrame(
MinMaxScaler().fit_transform(df),
columns=df.columns,
index=df.index,
)
if clean_names:
df = df.rename(columns=metric_name_cleaner)
df = df.transpose()
df[_METRIC_TYPE] = self._results[_METRIC_TYPE].values
# Compute scores
per_class_score = df.groupby(_METRIC_TYPE).mean().transpose()
# This is the default scIB weighting from the manuscript
per_class_score["Total"] = 0.4 * per_class_score["Batch correction"] + 0.6 * per_class_score["Bio conservation"]
df = pd.concat([df.transpose(), per_class_score], axis=1)
df.loc[_METRIC_TYPE, per_class_score.columns] = _AGGREGATE_SCORE
return df
def plot_results_table(
self, min_max_scale: bool = True, show: bool = True, save_dir: Optional[str] = None
) -> Table:
"""Plot the benchmarking results.
Parameters
----------
min_max_scale
Whether to min max scale the results.
show
Whether to show the plot.
save_dir
The directory to save the plot to. If `None`, the plot is not saved.
"""
num_embeds = len(self._embedding_obsm_keys)
cmap_fn = lambda col_data: normed_cmap(col_data, cmap=matplotlib.cm.PRGn, num_stds=2.5)
df = self.get_results(min_max_scale=min_max_scale)
# Do not want to plot what kind of metric it is
plot_df = df.drop(_METRIC_TYPE, axis=0)
# Sort by total score
plot_df = plot_df.sort_values(by="Total", ascending=False).astype(np.float64)
plot_df["Method"] = plot_df.index
# Split columns by metric type, using df as it doesn't have the new method col
score_cols = df.columns[df.loc[_METRIC_TYPE] == _AGGREGATE_SCORE]
other_cols = df.columns[df.loc[_METRIC_TYPE] != _AGGREGATE_SCORE]
column_definitions = [
ColumnDefinition("Method", width=1.5, textprops={"ha": "left", "weight": "bold"}),
]
# Circles for the metric values
column_definitions += [
ColumnDefinition(
col,
title=col.replace(" ", "\n", 1),
width=1,
textprops={
"ha": "center",
"bbox": {"boxstyle": "circle", "pad": 0.25},
},
cmap=cmap_fn(plot_df[col]),
group=df.loc[_METRIC_TYPE, col],
formatter="{:.2f}",
)
for i, col in enumerate(other_cols)
]
# Bars for the aggregate scores
column_definitions += [
ColumnDefinition(
col,
width=1,
title=col.replace(" ", "\n", 1),
plot_fn=bar,
plot_kw={
"cmap": matplotlib.cm.YlGnBu,
"plot_bg_bar": False,
"annotate": True,
"height": 0.9,
"formatter": "{:.2f}",
},
group=df.loc[_METRIC_TYPE, col],
border="left" if i == 0 else None,
)
for i, col in enumerate(score_cols)
]
# Allow to manipulate text post-hoc (in illustrator)
with matplotlib.rc_context({"svg.fonttype": "none"}):
fig, ax = plt.subplots(figsize=(len(df.columns) * 1.25, 3 + 0.3 * num_embeds))
tab = Table(
plot_df,
cell_kw={
"linewidth": 0,
"edgecolor": "k",
},
column_definitions=column_definitions,
ax=ax,
row_dividers=True,
footer_divider=True,
textprops={"fontsize": 10, "ha": "center"},
row_divider_kw={"linewidth": 1, "linestyle": (0, (1, 5))},
col_label_divider_kw={"linewidth": 1, "linestyle": "-"},
column_border_kw={"linewidth": 1, "linestyle": "-"},
index_col="Method",
).autoset_fontcolors(colnames=plot_df.columns)
if show:
plt.show()
if save_dir is not None:
fig.savefig(os.path.join(save_dir, "scib_results.svg"), facecolor=ax.get_facecolor(), dpi=300)
return tab | /scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/benchmark/_core.py | 0.913857 | 0.370595 | _core.py | pypi |
from typing import Optional
import jax
import jax.numpy as jnp
import numpy as np
import pandas as pd
from jax import jit
from scib_metrics._types import NdArray
from ._pca import pca
from ._utils import one_hot
def principal_component_regression(
X: NdArray,
covariate: NdArray,
categorical: bool = False,
n_components: Optional[int] = None,
) -> float:
"""Principal component regression (PCR) :cite:p:`buttner2018`.
Parameters
----------
X
Array of shape (n_cells, n_features).
covariate
Array of shape (n_cells,) or (n_cells, 1) representing batch/covariate values.
categorical
If True, batch will be treated as categorical and one-hot encoded.
n_components:
Number of components to compute, passed into :func:`~scib_metrics.utils.pca`.
If None, all components are used.
Returns
-------
pcr: float
Principal component regression using the first n_components principal components.
"""
if len(X.shape) != 2:
raise ValueError("Dimension mismatch: X must be 2-dimensional.")
if X.shape[0] != covariate.shape[0]:
raise ValueError("Dimension mismatch: X and batch must have the same number of samples.")
if categorical:
covariate = np.asarray(pd.Categorical(covariate).codes)
else:
covariate = np.asarray(covariate)
covariate = one_hot(covariate) if categorical else covariate.reshape((covariate.shape[0], 1))
pca_results = pca(X, n_components=n_components)
# Center inputs for no intercept
covariate = covariate - jnp.mean(covariate, axis=0)
pcr = _pcr(pca_results.coordinates, covariate, pca_results.variance)
return float(pcr)
@jit
def _pcr(
X_pca: NdArray,
covariate: NdArray,
var: NdArray,
) -> NdArray:
"""Principal component regression.
Parameters
----------
X_pca
Array of shape (n_cells, n_components) containing PCA coordinates. Must be standardized.
covariate
Array of shape (n_cells, 1) or (n_cells, n_classes) containing batch/covariate values. Must be standardized
if not categorical (one-hot).
var
Array of shape (n_components,) containing the explained variance of each PC.
"""
def r2(pc, batch):
residual_sum = jnp.linalg.lstsq(batch, pc)[1]
total_sum = jnp.sum((pc - jnp.mean(pc)) ** 2)
return jnp.maximum(0, 1 - residual_sum / total_sum)
# Index PCs on axis = 1, don't index batch
r2_ = jax.vmap(r2, in_axes=(1, None))(X_pca, covariate)
return jnp.dot(jnp.ravel(r2_), var) / jnp.sum(var) | /scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/utils/_pcr.py | 0.958876 | 0.484441 | _pcr.py | pypi |
from functools import partial
from typing import Tuple, Union
import chex
import jax
import jax.numpy as jnp
import numpy as np
from ._utils import get_ndarray
NdArray = Union[np.ndarray, jnp.ndarray]
@chex.dataclass
class _NeighborProbabilityState:
H: float
P: chex.ArrayDevice
Hdiff: float
beta: float
betamin: float
betamax: float
tries: int
@jax.jit
def _Hbeta(knn_dists_row: jnp.ndarray, beta: float) -> Tuple[jnp.ndarray, jnp.ndarray]:
P = jnp.exp(-knn_dists_row * beta)
sumP = jnp.nansum(P)
H = jnp.where(sumP == 0, 0, jnp.log(sumP) + beta * jnp.nansum(knn_dists_row * P) / sumP)
P = jnp.where(sumP == 0, jnp.zeros_like(knn_dists_row), P / sumP)
return H, P
@jax.jit
def _get_neighbor_probability(
knn_dists_row: jnp.ndarray, perplexity: float, tol: float
) -> Tuple[jnp.ndarray, jnp.ndarray]:
beta = 1
betamin = -jnp.inf
betamax = jnp.inf
H, P = _Hbeta(knn_dists_row, beta)
Hdiff = H - jnp.log(perplexity)
def _get_neighbor_probability_step(state):
Hdiff = state.Hdiff
beta = state.beta
betamin = state.betamin
betamax = state.betamax
tries = state.tries
new_betamin = jnp.where(Hdiff > 0, beta, betamin)
new_betamax = jnp.where(Hdiff > 0, betamax, beta)
new_beta = jnp.where(
Hdiff > 0,
jnp.where(betamax == jnp.inf, beta * 2, (beta + betamax) / 2),
jnp.where(betamin == -jnp.inf, beta / 2, (beta + betamin) / 2),
)
new_H, new_P = _Hbeta(knn_dists_row, new_beta)
new_Hdiff = new_H - jnp.log(perplexity)
return _NeighborProbabilityState(
H=new_H, P=new_P, Hdiff=new_Hdiff, beta=new_beta, betamin=new_betamin, betamax=new_betamax, tries=tries + 1
)
def _get_neighbor_probability_convergence(state):
Hdiff, tries = state.Hdiff, state.tries
return jnp.logical_and(jnp.abs(Hdiff) > tol, tries < 50)
init_state = _NeighborProbabilityState(H=H, P=P, Hdiff=Hdiff, beta=beta, betamin=betamin, betamax=betamax, tries=0)
final_state = jax.lax.while_loop(_get_neighbor_probability_convergence, _get_neighbor_probability_step, init_state)
return final_state.H, final_state.P
def _compute_simpson_index_cell(
knn_dists_row: jnp.ndarray, knn_labels_row: jnp.ndarray, n_batches: int, perplexity: float, tol: float
) -> jnp.ndarray:
H, P = _get_neighbor_probability(knn_dists_row, perplexity, tol)
def _non_zero_H_simpson():
sumP = jnp.bincount(knn_labels_row, weights=P, length=n_batches)
return jnp.where(knn_labels_row.shape[0] == P.shape[0], jnp.dot(sumP, sumP), 1)
return jnp.where(H == 0, -1, _non_zero_H_simpson())
def compute_simpson_index(
knn_dists: NdArray,
knn_idx: NdArray,
labels: NdArray,
n_labels: int,
perplexity: float = 30,
tol: float = 1e-5,
) -> np.ndarray:
"""Compute the Simpson index for each cell.
Parameters
----------
knn_dists
KNN distances of size (n_cells, n_neighbors).
knn_idx
KNN indices of size (n_cells, n_neighbors) corresponding to distances.
labels
Cell labels of size (n_cells,).
n_labels
Number of labels.
perplexity
Measure of the effective number of neighbors.
tol
Tolerance for binary search.
Returns
-------
simpson_index
Simpson index of size (n_cells,).
"""
knn_dists = jnp.array(knn_dists)
knn_idx = jnp.array(knn_idx)
labels = jnp.array(labels)
knn_labels = labels[knn_idx]
simpson_fn = partial(_compute_simpson_index_cell, n_batches=n_labels, perplexity=perplexity, tol=tol)
out = jax.vmap(simpson_fn)(knn_dists, knn_labels)
return get_ndarray(out) | /scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/utils/_lisi.py | 0.935626 | 0.644267 | _lisi.py | pypi |
from functools import partial
from typing import Literal
import jax
import jax.numpy as jnp
import numpy as np
from sklearn.utils import check_array
from scib_metrics._types import IntOrKey
from ._dist import cdist
from ._utils import get_ndarray, validate_seed
def _initialize_random(X: jnp.ndarray, n_clusters: int, key: jax.random.KeyArray) -> jnp.ndarray:
"""Initialize cluster centroids randomly."""
n_obs = X.shape[0]
indices = jax.random.choice(key, n_obs, (n_clusters,), replace=False)
initial_state = X[indices]
return initial_state
@partial(jax.jit, static_argnums=1)
def _initialize_plus_plus(X: jnp.ndarray, n_clusters: int, key: jax.random.KeyArray) -> jnp.ndarray:
"""Initialize cluster centroids with k-means++ algorithm."""
n_obs = X.shape[0]
key, subkey = jax.random.split(key)
initial_centroid_idx = jax.random.choice(subkey, n_obs, (1,), replace=False)
initial_centroid = X[initial_centroid_idx].ravel()
dist_sq = jnp.square(cdist(initial_centroid[jnp.newaxis, :], X)).ravel()
initial_state = {"min_dist_sq": dist_sq, "centroid": initial_centroid, "key": key}
n_local_trials = 2 + int(np.log(n_clusters))
def _step(state, _):
prob = state["min_dist_sq"] / jnp.sum(state["min_dist_sq"])
# note that observations already chosen as centers will have 0 probability
# and will not be chosen again
state["key"], subkey = jax.random.split(state["key"])
next_centroid_idx_candidates = jax.random.choice(subkey, n_obs, (n_local_trials,), replace=False, p=prob)
next_centroid_candidates = X[next_centroid_idx_candidates]
# candidates by observations
dist_sq_candidates = jnp.square(cdist(next_centroid_candidates, X))
dist_sq_candidates = jnp.minimum(state["min_dist_sq"][jnp.newaxis, :], dist_sq_candidates)
candidates_pot = dist_sq_candidates.sum(axis=1)
# Decide which candidate is the best
best_candidate = jnp.argmin(candidates_pot)
min_dist_sq = dist_sq_candidates[best_candidate]
best_candidate = next_centroid_idx_candidates[best_candidate]
state["min_dist_sq"] = min_dist_sq.ravel()
state["centroid"] = X[best_candidate].ravel()
return state, state["centroid"]
_, centroids = jax.lax.scan(_step, initial_state, jnp.arange(n_clusters - 1))
return centroids
@jax.jit
def _get_dist_labels(X: jnp.ndarray, centroids: jnp.ndarray) -> jnp.ndarray:
"""Get the distance and labels for each observation."""
dist = cdist(X, centroids)
labels = jnp.argmin(dist, axis=1)
return dist, labels
class KMeans:
"""Jax implementation of :class:`sklearn.cluster.KMeans`.
This implementation is limited to Euclidean distance.
Parameters
----------
n_clusters
Number of clusters.
init
Cluster centroid initialization method. One of the following:
* ``'k-means++'``: Sample initial cluster centroids based on an
empirical distribution of the points' contributions to the
overall inertia.
* ``'random'``: Uniformly sample observations as initial centroids
n_init
Number of times the k-means algorithm will be initialized.
max_iter
Maximum number of iterations of the k-means algorithm for a single run.
tol
Relative tolerance with regards to inertia to declare convergence.
seed
Random seed.
"""
def __init__(
self,
n_clusters: int = 8,
init: Literal["k-means++", "random"] = "k-means++",
n_init: int = 10,
max_iter: int = 300,
tol: float = 1e-4,
seed: IntOrKey = 0,
):
self.n_clusters = n_clusters
self.n_init = n_init
self.max_iter = max_iter
self.tol = tol
self.seed: jax.random.KeyArray = validate_seed(seed)
if init not in ["k-means++", "random"]:
raise ValueError("Invalid init method, must be one of ['k-means++' or 'random'].")
if init == "k-means++":
self._initialize = _initialize_plus_plus
else:
self._initialize = _initialize_random
def fit(self, X: np.ndarray):
"""Fit the model to the data."""
X = check_array(X, dtype=np.float32, order="C")
# Subtract mean for numerical accuracy
mean = X.mean(axis=0)
X -= mean
self._fit(X)
X += mean
self.cluster_centroids_ += mean
return self
def _fit(self, X: np.ndarray):
all_centroids, all_inertias = jax.lax.map(
lambda key: self._kmeans_full_run(X, key), jax.random.split(self.seed, self.n_init)
)
i = jnp.argmin(all_inertias)
self.cluster_centroids_ = get_ndarray(all_centroids[i])
self.inertia_ = get_ndarray(all_inertias[i])
_, labels = _get_dist_labels(X, self.cluster_centroids_)
self.labels_ = get_ndarray(labels)
@partial(jax.jit, static_argnums=(0,))
def _kmeans_full_run(self, X: jnp.ndarray, key: jnp.ndarray) -> jnp.ndarray:
def _kmeans_step(state):
old_inertia = state[1]
centroids, _, _, n_iter = state
# TODO(adamgayoso): Efficiently compute argmin and min simultaneously.
dist, new_labels = _get_dist_labels(X, centroids)
# From https://colab.research.google.com/drive/1AwS4haUx6swF82w3nXr6QKhajdF8aSvA?usp=sharing
counts = (new_labels[jnp.newaxis, :] == jnp.arange(self.n_clusters)[:, jnp.newaxis]).sum(
axis=1, keepdims=True
)
counts = jnp.clip(counts, a_min=1, a_max=None)
# Sum over points in a centroid by zeroing others out
new_centroids = (
jnp.sum(
jnp.where(
# axes: (data points, clusters, data dimension)
new_labels[:, jnp.newaxis, jnp.newaxis]
== jnp.arange(self.n_clusters)[jnp.newaxis, :, jnp.newaxis],
X[:, jnp.newaxis, :],
0.0,
),
axis=0,
)
/ counts
)
new_inertia = jnp.mean(jnp.min(dist, axis=1))
n_iter = n_iter + 1
return new_centroids, new_inertia, old_inertia, n_iter
def _kmeans_convergence(state):
_, new_inertia, old_inertia, n_iter = state
cond1 = jnp.abs(old_inertia - new_inertia) < self.tol
cond2 = n_iter > self.max_iter
return jnp.logical_or(cond1, cond2)[0]
centroids = self._initialize(X, self.n_clusters, key)
# centroids, new_inertia, old_inertia, n_iter
state = (centroids, jnp.inf, jnp.inf, jnp.array([0.0]))
state = _kmeans_step(state)
state = jax.lax.while_loop(_kmeans_convergence, _kmeans_step, state)
return state[0], state[1] | /scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/utils/_kmeans.py | 0.880026 | 0.44071 | _kmeans.py | pypi |
from typing import Optional, Tuple
import jax.numpy as jnp
from chex import dataclass
from jax import jit
from scib_metrics._types import NdArray
from ._utils import get_ndarray
@dataclass
class _SVDResult:
"""SVD result.
Attributes
----------
u
Array of shape (n_cells, n_components) containing the left singular vectors.
s
Array of shape (n_components,) containing the singular values.
v
Array of shape (n_components, n_features) containing the right singular vectors.
"""
u: NdArray
s: NdArray
v: NdArray
@dataclass
class _PCAResult:
"""PCA result.
Attributes
----------
coordinates
Array of shape (n_cells, n_components) containing the PCA coordinates.
components
Array of shape (n_components, n_features) containing the PCA components.
variance
Array of shape (n_components,) containing the explained variance of each PC.
variance_ratio
Array of shape (n_components,) containing the explained variance ratio of each PC.
svd
Dataclass containing the SVD data.
"""
coordinates: NdArray
components: NdArray
variance: NdArray
variance_ratio: NdArray
svd: Optional[_SVDResult] = None
def _svd_flip(
u: NdArray,
v: NdArray,
u_based_decision: bool = True,
):
"""Sign correction to ensure deterministic output from SVD.
Jax implementation of :func:`~sklearn.utils.extmath.svd_flip`.
Parameters
----------
u
Left singular vectors of shape (M, K).
v
Right singular vectors of shape (K, N).
u_based_decision
If True, use the columns of u as the basis for sign flipping.
"""
if u_based_decision:
max_abs_cols = jnp.argmax(jnp.abs(u), axis=0)
signs = jnp.sign(u[max_abs_cols, jnp.arange(u.shape[1])])
else:
max_abs_rows = jnp.argmax(jnp.abs(v), axis=1)
signs = jnp.sign(v[jnp.arange(v.shape[0]), max_abs_rows])
u_ = u * signs
v_ = v * signs[:, None]
return u_, v_
def pca(
X: NdArray,
n_components: Optional[int] = None,
return_svd: bool = False,
) -> _PCAResult:
"""Principal component analysis (PCA).
Parameters
----------
X
Array of shape (n_cells, n_features).
n_components
Number of components to keep. If None, all components are kept.
return_svd
If True, also return the results from SVD.
Returns
-------
results: _PCAData
"""
max_components = min(X.shape)
if n_components and n_components > max_components:
raise ValueError(f"n_components = {n_components} must be <= min(n_cells, n_features) = {max_components}")
n_components = n_components or max_components
u, s, v, variance, variance_ratio = _pca(X)
# Select n_components
coordinates = u[:, :n_components] * s[:n_components]
components = v[:n_components]
variance_ = variance[:n_components]
variance_ratio_ = variance_ratio[:n_components]
results = _PCAResult(
coordinates=get_ndarray(coordinates),
components=get_ndarray(components),
variance=get_ndarray(variance_),
variance_ratio=get_ndarray(variance_ratio_),
svd=_SVDResult(u=get_ndarray(u), s=get_ndarray(s), v=get_ndarray(v)) if return_svd else None,
)
return results
@jit
def _pca(
X: NdArray,
) -> Tuple[NdArray, NdArray, NdArray, NdArray, NdArray]:
"""Principal component analysis.
Parameters
----------
X
Array of shape (n_cells, n_features).
Returns
-------
u: NdArray
Left singular vectors of shape (M, K).
s: NdArray
Singular values of shape (K,).
v: NdArray
Right singular vectors of shape (K, N).
variance: NdArray
Array of shape (K,) containing the explained variance of each PC.
variance_ratio: NdArray
Array of shape (K,) containing the explained variance ratio of each PC.
"""
X_ = X - jnp.mean(X, axis=0)
u, s, v = jnp.linalg.svd(X_, full_matrices=False)
u, v = _svd_flip(u, v)
variance = (s**2) / (X.shape[0] - 1)
total_variance = jnp.sum(variance)
variance_ratio = variance / total_variance
return u, s, v, variance, variance_ratio | /scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/utils/_pca.py | 0.975012 | 0.662223 | _pca.py | pypi |
import logging
from typing import Literal
import numpy as np
import pynndescent
import scipy
from scipy.sparse import csr_matrix, issparse
logger = logging.getLogger(__name__)
def _compute_transitions(X: csr_matrix, density_normalize: bool = True):
"""Code from scanpy.
https://github.com/scverse/scanpy/blob/2e98705347ea484c36caa9ba10de1987b09081bf/scanpy/neighbors/__init__.py#L899
"""
# TODO(adamgayoso): Refactor this with Jax
# density normalization as of Coifman et al. (2005)
# ensures that kernel matrix is independent of sampling density
if density_normalize:
# q[i] is an estimate for the sampling density at point i
# it's also the degree of the underlying graph
q = np.asarray(X.sum(axis=0))
if not issparse(X):
Q = np.diag(1.0 / q)
else:
Q = scipy.sparse.spdiags(1.0 / q, 0, X.shape[0], X.shape[0])
K = Q @ X @ Q
else:
K = X
# z[i] is the square root of the row sum of K
z = np.sqrt(np.asarray(K.sum(axis=0)))
if not issparse(K):
Z = np.diag(1.0 / z)
else:
Z = scipy.sparse.spdiags(1.0 / z, 0, K.shape[0], K.shape[0])
transitions_sym = Z @ K @ Z
return transitions_sym
def _compute_eigen(
transitions_sym: csr_matrix,
n_comps: int = 15,
sort: Literal["decrease", "increase"] = "decrease",
):
"""Compute eigen decomposition of transition matrix.
https://github.com/scverse/scanpy/blob/2e98705347ea484c36caa9ba10de1987b09081bf/scanpy/neighbors/__init__.py
"""
# TODO(adamgayoso): Refactor this with Jax
matrix = transitions_sym
# compute the spectrum
if n_comps == 0:
evals, evecs = scipy.linalg.eigh(matrix)
else:
n_comps = min(matrix.shape[0] - 1, n_comps)
# ncv = max(2 * n_comps + 1, int(np.sqrt(matrix.shape[0])))
ncv = None
which = "LM" if sort == "decrease" else "SM"
# it pays off to increase the stability with a bit more precision
matrix = matrix.astype(np.float64)
evals, evecs = scipy.sparse.linalg.eigsh(matrix, k=n_comps, which=which, ncv=ncv)
evals, evecs = evals.astype(np.float32), evecs.astype(np.float32)
if sort == "decrease":
evals = evals[::-1]
evecs = evecs[:, ::-1]
return evals, evecs
def _get_sparse_matrix_from_indices_distances_numpy(indices, distances, n_obs, n_neighbors):
"""Code from scanpy."""
n_nonzero = n_obs * n_neighbors
indptr = np.arange(0, n_nonzero + 1, n_neighbors)
D = csr_matrix(
(
distances.copy().ravel(), # copy the data, otherwise strange behavior here
indices.copy().ravel(),
indptr,
),
shape=(n_obs, n_obs),
)
D.eliminate_zeros()
D.sort_indices()
return D
def diffusion_nn(X: csr_matrix, k: int, n_comps: int = 100):
"""Diffusion-based neighbors.
This function generates a nearest neighbour list from a connectivities matrix.
This allows us to select a consistent number of nearest neighbors across all methods.
This differs from the original scIB implemenation by leveraging diffusion maps. Here we
embed the data with diffusion maps in which euclidean distance represents well the diffusion
distance. We then use pynndescent to find the nearest neighbours in this embedding space.
Parameters
----------
X
Array of shape (n_cells, n_cells) with non-zero values
representing connectivities.
k
Number of nearest neighbours to select.
n_comps
Number of components for diffusion map
Returns
-------
Neighbors graph
"""
transitions = _compute_transitions(X)
evals, evecs = _compute_eigen(transitions, n_comps=n_comps)
evals += 1e-8 # Avoid division by zero
# Multiscale such that the number of steps t gets "integrated out"
embedding = evecs
scaled_evals = np.array([e if e == 1 else e / (1 - e) for e in evals])
embedding *= scaled_evals
nn_obj = pynndescent.NNDescent(embedding, n_neighbors=k + 1)
neigh_inds, neigh_distances = nn_obj.neighbor_graph
# We purposely ignore the first neighbor as it is the cell itself
# It gets added back inside the kbet internal function
neigh_graph = _get_sparse_matrix_from_indices_distances_numpy(
neigh_inds[:, 1:], neigh_distances[:, 1:], X.shape[0], k
)
return neigh_graph | /scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/utils/_diffusion_nn.py | 0.674587 | 0.588416 | _diffusion_nn.py | pypi |
import warnings
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from chex import ArrayDevice
from jax import nn
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import check_array
from scib_metrics._types import ArrayLike, IntOrKey, NdArray
def get_ndarray(x: ArrayDevice) -> np.ndarray:
"""Convert Jax device array to Numpy array."""
return np.array(jax.device_get(x))
def one_hot(y: NdArray, n_classes: Optional[int] = None) -> jnp.ndarray:
"""One-hot encode an array. Wrapper around :func:`~jax.nn.one_hot`.
Parameters
----------
y
Array of shape (n_cells,) or (n_cells, 1).
n_classes
Number of classes. If None, inferred from the data.
Returns
-------
one_hot: jnp.ndarray
Array of shape (n_cells, n_classes).
"""
n_classes = n_classes or jnp.max(y) + 1
return nn.one_hot(jnp.ravel(y), n_classes)
def validate_seed(seed: IntOrKey) -> jax.random.KeyArray:
"""Validate a seed and return a Jax random key."""
return jax.random.PRNGKey(seed) if isinstance(seed, int) else seed
def check_square(X: ArrayLike):
"""Check if a matrix is square."""
if X.shape[0] != X.shape[1]:
raise ValueError("X must be a square matrix")
def convert_knn_graph_to_idx(X: csr_matrix) -> Tuple[np.ndarray, np.ndarray]:
"""Convert a kNN graph to indices and distances."""
check_array(X, accept_sparse="csr")
check_square(X)
n_neighbors = np.unique(X.nonzero()[0], return_counts=True)[1]
if len(np.unique(n_neighbors)) > 1:
raise ValueError("Each cell must have the same number of neighbors.")
n_neighbors = int(np.unique(n_neighbors)[0])
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Precomputed sparse input")
nn_obj = NearestNeighbors(n_neighbors=n_neighbors, metric="precomputed").fit(X)
kneighbors = nn_obj.kneighbors(X)
return kneighbors | /scib_metrics-0.3.3-py3-none-any.whl/scib_metrics/utils/_utils.py | 0.918441 | 0.585012 | _utils.py | pypi |
from pint.quantity import _Quantity
from sci import units
from pint.errors import UndefinedUnitError
def check_units(value, dimension: str):
""" Check if units are of a certain dimension
Parameters
----------
value: `pint.quantity._Quantity`
The pint :class:`pint.quantity._Quantity` to check
dimemension: `str`
Desired dimensionality of value
Returns
-------
result: `bool`
If the units are of the desired dimension, returns True.
Raises
------
ValueError
Raised if the unit dimensions are incorrrect or the
the value is not a pint unit quantity.
Examples
--------
>>> check_units(100 * units.millilters, '[length]^3')
True
Notes
-----
See the pint_ documentation for more examples on dimensionality.
.. pint_: https://pint.readthedocs.io/en/latest/wrapping.html#checking-dimensionality
"""
try:
if value.check(dimension):
return True
else:
raise ValueError(f'{value} must contain pint units of dimension {dimension}.')
except AttributeError:
raise ValueError(f'{value} does contain pint units.(must be of dimension {dimension}).')
def filter_dict_values(input: dict, filter):
''' Filter dictionary values through a function called filter
This function will look recursively through nested dictionaries
and call filter(value) on all dictionary values.
Parameters
----------
input: `dict`
Input dictionary to filter
filter: `callable``
Function for filtering dictionary values.
This is called in form filter(value)
Returns
-------
filtered: `dict`
Returns filtered dictionary
'''
for k, v in input.items():
if isinstance(v, dict):
input[k] = filter_dict_values(v, filter)
else:
input[k] = filter(v)
return input
def stringify(input):
'''Convert pint quantities into strings
Parameters
----------
input: `pint.quantity._Quantity`
Pint unit quantity
Returns
-------
output: `str``
input as a string
'''
if isinstance(input, _Quantity):
return str(input)
else:
return input
def pintify(input: str):
''' Convert strings into pint quantities
Parameters
----------
input: `str`
String to be converted to pint quantity
Returns
-------
result: `pint.quantity._Quantity`
input as a pint quantity
'''
try:
return units(input)
except UndefinedUnitError:
return input
def check_kwargs(key, caller, **kwargs):
''' Check if kwargs has a needed field
Parameters
----------
key: `str`
keyword to look for in kwargs
Returns
-------
value
The value of the kwargs[key]
params: `dict``
The params dictionary (without the returned key/value pair)
Raises
------
ValueError
Raised if the key does not exist in kwargs
'''
if not kwargs.get(key):
raise ValueError('''{} needs to be an argumentwhen instantating a {}.'''
.format(key, caller))
else:
value = kwargs.pop(key)
return value, kwargs | /scici-0.1.0.tar.gz/scici-0.1.0/sci/utils.py | 0.895451 | 0.620507 | utils.py | pypi |
from sci import units
from sci.utils import check_units, filter_dict_values, stringify, check_kwargs, pintify
from pint.quantity import _Quantity
from interface import implements, Interface
from typing import Type, Union, List
class _Ref:
''' Base Class for Refs
Refs are physical containers (e.g., syringes, microplates).
This class should not be used directly. Instead, it should be inherited
by another class.
Parameters
----------
name: `str`
Reference name for the ref (e.g., 0.5M NaOH solution)
**params
The type parameter must be passed in as a keyword argument to all refs.
- ``type``: Ref type
'''
def __init__(self, name: str, **params):
self.type, self.params = check_kwargs('type', 'Ref', **dict(params))
self.name = name
def to_dict(self):
''' Convert ref to a dictionary ready for json serialization
'''
str_params = filter_dict_values(self.params, stringify)
return {"type": self.type, "name": self.name, "params": str_params}
def __repr__(self):
return f"{self.name} ({self.type.lower()})"
#Create interface for refs
_RefInterface = Interface.from_class(_Ref, ['__init__'])
ref_type = Type[_Ref]
def ref_from_dict(input: dict):
''' Create a instance of a ref from a dictionary
Parameters
----------
input: `dict`
Input dictionary for the ref
Returns
-------
ref: `_Ref`
One of the subclasses of ref (e.g., Syringe)
Raises
------
ValueError
Raised if the "type" field not passed in input or
if the passed type is not a valid ref class
Examples
--------
>>> input = {'type': 'Syringe', 'name': '0.5M Citric Acid', 'params': {'liquid_volume': '10 millilters'}}
>>> my_syringe = from_dict(input)
See also
--------
_Ref.to_dict
'''
#Check if "type" field in input
if "type" not in input:
raise ValueError(f"The 'type' field was not passed, which is required.")
#Error handling when checking issubclass
def check_subclass(subclass, superclass):
try:
if issubclass(subclass, superclass): return True
except TypeError:
return False
#Find subclasses of _Ref
subclasses = [cls.__name__ for key, cls
in list(globals().items())
if check_subclass(cls, _Ref)]
subclasses.remove(_Ref.__name__)
#Convert dimensional values to pint quantities
params = filter_dict_values(input["params"], pintify)
#Create instance of class
ref_type = input.get("type")
ref_name = input.pop("name")
if ref_type in subclasses:
ref = globals()[ref_type]
new_ref = ref(name=ref_name, **params)
return new_ref
else:
raise ValueError(f"sci saying hi: {type} is not one of the available refs.")
class Syringe(_Ref, implements(_RefInterface),):
''' Ref for syringes
Parameters
----------
name: `str`
Reference name for the syringe (e.g., 0.5M NaOH solution)
**kwargs
- ``liquid_volume``: Volume of liquid in the syringe, not the total volume of syringe (`pint.quantity. _Quantity`)
'''
def __init__(self, name: str, **params):
#Make sure liquid volume is keyword arg and that units are correct
liquid_volume, _ = check_kwargs('liquid_volume', 'Syringe', **params)
check_units(liquid_volume, '[length]^3')
#Add type to params dictionary
params.update({'type': 'Syringe'})
#Inhert superclass __init__ method
super().__init__(name, **params) | /scici-0.1.0.tar.gz/scici-0.1.0/sci/refs.py | 0.881538 | 0.32118 | refs.py | pypi |
Copyright (c) 2014 `Science Automation Inc. <http://www.scivm.com>`_. All rights reserved.
email: [email protected]
Copyright (c) 2009 `PiCloud, Inc. <http://www.picloud.com>`_. All rights reserved.
email: [email protected]
The cloud package is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this package; if not, see
http://www.gnu.org/licenses/lgpl-2.1.html
"""
from scicloud import CloudTimeoutError
from . import _getcloud
import multiprocessing
class AsyncResult(object):
"""Result object that emulates multiprocessing.pool.AsyncResult"""
_jid = None #internal - jid (or jid list) associated with this result
def __init__(self, jid):
self._jid = jid
def get(self, timeout=None):
"""
Return result when it arrives.
If timeout is not None and none arrives,
raise multiprocessing.TimeoutError in *timeout* seconds
"""
return _getcloud().result(self._jid)
def wait(self, timeout=None):
"""
Wait until result is available or *timeout* seconds pass
"""
try:
_getcloud().join(self._jid)
except CloudTimeoutError:
pass
def ready(self):
"""Returns true if the job finished (done or errored)"""
c = _getcloud()
status = c.status(self._jid)
if not hasattr(status, '__iter__'):
return status in c.finished_statuses
else:
for s in status:
if s not in c.finished_statuses:
return False
return True
def successful(self):
"""Returns true if job finished successfully.
Asserts that job has finished"""
assert(self.ready())
status = _getcloud().status(self._jid)
if not hasattr(status, '__iter__'):
return status == 'done'
else:
for s in status:
if s != 'done':
return False
return True
def apply(func, args=()):
"""
Equivalent to Multiprocessing apply.
keyword arguments are not supported
"""
c = _getcloud()
jid = c.call(func, *args)
return c.result(jid)
def apply_async(func, args=(), callback=None):
"""
Equivalent to Multiprocessing apply_async
keyword arguments are not supported
callback is a list of functions that should be run on the callee's computer once this job finishes successfully.
Each callback will be invoked with one argument - the jid of the complete job
"""
c = _getcloud()
jid = c.call(func, _callback = callback, *args)
return AsyncResult(jid)
def map(func, iterable, chunksize=None):
"""
Equivalent to Multiprocessing map
chunksize is not used here
"""
c = _getcloud()
jids = c.map(func, iterable)
return c.result(jids)
def map_async(func, iterable, chunksize=None):
"""
Equivalent to Multiprocessing map_async
chunksize is not used here
"""
c = _getcloud()
jids = c.map(func, iterable)
return AsyncResult(jids)
def imap(func, iterable, chunksize = None):
"""
Equivalent to Multiprocessing imap
chunksize is used only to control the cloud.iresult stage
"""
c = _getcloud()
jids = c.map(func, iterable)
return c.iresult(jids,chunksize)
def imap_unordered(func, iterable, chunksize = None):
"""
Same as imap
"""
return imap(func, iterable, chunksize) | /scicloud-3.0.4.tar.gz/scicloud-3.0.4/src/pool_interface.py | 0.803212 | 0.152001 | pool_interface.py | pypi |
from ..cloud import CloudException
class CloudConnection(object):
"""Abstract connection class to deal with low-level communication of cloud adapter"""
_isopen = False
_adapter = None
@property
def opened(self):
"""Returns whether the connection is open"""
return self._isopen
def open(self):
"""called when this connection is to be used"""
if self._adapter and not self._adapter.opened:
self._adapter.open()
self._isopen = True
def close(self):
"""called when this connection is no longer needed"""
if not self.opened:
raise CloudException("%s: Cannot close a closed connection", str(self))
self._isopen = False
@property
def adapter(self):
return self._adapter
def needs_restart(self, **kwargs):
"""Called to determine if the cloud must be restarted due to different connection parameters"""
return False
def job_add(self, params, logdata = None):
raise NotImplementedError
def jobs_join(self, jids, timeout = None):
"""
Allows connection to manage joining
If connection manages joining, it should return a list of statuses
describing the finished job
Else, return False
"""
return False
def jobs_map(self, params, mapargs, mapkwargs = None, logdata = None):
raise NotImplementedError
def jobs_result(self, jids):
raise NotImplementedError
def jobs_kill(self, jids):
raise NotImplementedError
def jobs_delete(self, jids):
raise NotImplementedError
def jobs_info(self, jids, info_requested):
raise NotImplementedError
def is_simulated(self):
raise NotImplementedError
def connection_info(self):
return {'opened': self.opened, 'connection_type' :None}
def modules_check(self, modules):
pass
def modules_add(self, modules):
pass
def packages_list(self):
"""
Get list of packages from server
"""
return []
def force_adapter_report(self):
"""
Should the SerializationReport for the SerializationAdapter be coerced to be instantiated?
"""
return False
def report_name(self):
raise NotImplementedError
def get_report_dir(self):
raise TypeError('get_report_dir is only valid on connection hooks') | /scicloud-3.0.4.tar.gz/scicloud-3.0.4/src/transport/connection.py | 0.743447 | 0.153042 | connection.py | pypi |
Why SCICO?
==========
Advantages of JAX-based Design
------------------------------
The vast majority of scientific computing packages in Python are based
on `NumPy <https://numpy.org/>`__ and `SciPy <https://scipy.org/>`__.
SCICO, in contrast, is based on `JAX
<https://jax.readthedocs.io/en/latest/>`__, which provides most of the
same features, but with the addition of automatic differentiation, GPU
support, and just-in-time (JIT) compilation. (The availability of
these features in SCICO is subject to some :ref:`caveats
<non_jax_dep>`.) SCICO users and developers are advised to become
familiar with the `differences between JAX and
NumPy. <https://jax.readthedocs.io/en/latest/notebooks/thinking_in_jax.html>`_.
While recent advances in automatic differentiation have primarily been
driven by its important role in deep learning, it is also invaluable in
a functional minimization framework such as SCICO. The most obvious
advantage is allowing the use of gradient-based minimization methods
without the need for tedious mathematical derivation of an expression
for the gradient. Equally valuable, though, is the ability to
automatically compute the adjoint operator of a linear operator, the
manual derivation of which is often time-consuming.
GPU support and JIT compilation both offer the potential for significant
code acceleration, with the speed gains that can be obtained depending
on the algorithm/function to be executed. In many cases, a speed
improvement by an order of magnitude or more can be obtained by running
the same code on a GPU rather than a CPU, and similar speed gains can
sometimes also be obtained via JIT compilation.
The figure below shows timing results obtained on a compute server
with an Intel Xeon Gold 6230 CPU and NVIDIA GeForce RTX 2080 Ti
GPU. It is interesting to note that for :class:`.FiniteDifference` the
GPU provides no acceleration, while JIT provides more than an order of
magnitude of speed improvement on both CPU and GPU. For :class:`.DFT`
and :class:`.Convolve`, significant JIT acceleration is limited to the
GPU, which also provides significant acceleration over the CPU.
.. image:: /figures/jax-timing.png
:align: center
:width: 95%
:alt: Timing results for SCICO operators on CPU and GPU with and without JIT
Related Packages
----------------
Many elements of SCICO are partially available in other packages. We
briefly review them here, highlighting some of the main differences with
SCICO.
`GlobalBioIm <https://biomedical-imaging-group.github.io/GlobalBioIm/>`__
is similar in structure to SCICO (and a major inspiration for SCICO),
providing linear operators and solvers for inverse problems in imaging.
However, it is written in MATLAB and is thus not usable in a completely
free environment. It also lacks the automatic adjoint calculation and
simple GPU support offered by SCICO.
`PyLops <https://pylops.readthedocs.io>`__ provides a linear operator
class and many built-in linear operators. These operators are compatible
with many `SciPy <https://scipy.org/>`__ solvers. GPU support is
provided via `CuPy <https://cupy.dev>`__, which has the disadvantage
that switching for a CPU to GPU requires code changes, unlike SCICO and
`JAX <https://jax.readthedocs.io/en/latest/>`__. SCICO is more focused
on computational imaging that PyLops and has several specialized
operators that PyLops does not.
`Pycsou <https://matthieumeo.github.io/pycsou/html/index>`__, like
SCICO, is a Python project inspired by GlobalBioIm. Since it is based on
PyLops, it shares the disadvantages with respect to SCICO of that
project.
`ODL <https://odlgroup.github.io/odl/>`__ provides a variety of
operators and related infrastructure for prototyping of inverse
problems. It is built on top of
`NumPy <https://numpy.org/>`__/`SciPy <https://scipy.org/>`__, and does
not support any of the advanced features of
`JAX <https://jax.readthedocs.io/en/latest/>`__.
`ProxImaL <http://www.proximal-lang.org/en/latest/>`__ is a Python
package for image optimization problems. Like SCICO and many of the
other projects listed here, problems are specified by combining objects
representing, operators, functionals, and solvers. It does not support
any of the advanced features of
`JAX <https://jax.readthedocs.io/en/latest/>`__.
`ProxMin <https://github.com/pmelchior/proxmin>`__ provides a set of
proximal optimization algorithms for minimizing non-smooth functionals.
It is built on top of
`NumPy <https://numpy.org/>`__/`SciPy <https://scipy.org/>`__, and does
not support any of the advanced features of
`JAX <https://jax.readthedocs.io/en/latest/>`__ (however, an open issue
suggests that `JAX <https://jax.readthedocs.io/en/latest/>`__
compatibility is planned).
`CVXPY <https://www.cvxpy.org>`__ provides a flexible language for
defining optimization problems and a wide selection of solvers, but has
limited support for matrix-free methods.
Other related projects that may be of interest include:
- `ToMoBAR <https://github.com/dkazanc/ToMoBAR>`__
- `CCPi-Regularisation Toolkit <https://github.com/vais-ral/CCPi-Regularisation-Toolkit>`__
- `SPORCO <https://github.com/lanl/sporco>`__
- `SigPy <https://github.com/mikgroup/sigpy>`__
- `MIRT <https://github.com/JeffFessler/MIRT.jl>`__
- `BART <http://mrirecon.github.io/bart/>`__
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/advantages.rst | 0.940463 | 0.909947 | advantages.rst | pypi |
import importlib
import inspect
import os
import pkgutil
import sys
from glob import glob
from runpy import run_path
def run_conf_files(vardict=None, path=None):
"""Execute Python files in conf directory.
Args:
vardict: Dictionary into which variable names should be inserted.
Defaults to empty dict.
path: Path to conf directory. Defaults to path to this module.
Returns:
A dict populated with variables defined during execution of the
configuration files.
"""
if vardict is None:
vardict = {}
if path is None:
path = os.path.dirname(__file__)
files = os.path.join(path, "conf", "*.py")
for f in sorted(glob(files)):
conf = run_path(f, init_globals=vardict)
for k, v in conf.items():
if len(k) >= 4 and k[0:2] == "__" and k[-2:] == "__": # ignore __<name>__ variables
continue
vardict[k] = v
return vardict
def package_classes(package):
"""Get a list of classes in a package.
Return a list of qualified names of classes in the specified
package. Classes in modules with names beginning with an "_" are
omitted, as are classes whose internal module name record is not
the same as the module in which they are found (i.e. indicating
that they have been imported from elsewhere).
Args:
package: Reference to package for which classes are to be listed
(not package name string).
Returns:
A list of qualified names of classes in the specified package.
"""
classes = []
# Iterate over modules in package
for importer, modname, _ in pkgutil.walk_packages(
path=package.__path__, prefix=(package.__name__ + "."), onerror=lambda x: None
):
# Skip modules whose names begin with a "_"
if modname.split(".")[-1][0] == "_":
continue
importlib.import_module(modname)
# Iterate over module members
for name, obj in inspect.getmembers(sys.modules[modname]):
if inspect.isclass(obj):
# Get internal module name of class for comparison with working module name
try:
objmodname = getattr(sys.modules[modname], obj.__name__).__module__
except Exception:
objmodname = None
if objmodname == modname:
classes.append(modname + "." + obj.__name__)
return classes
def get_text_indentation(text, skiplines=0):
"""Compute the leading whitespace indentation in a block of text.
Args:
text: A block of text as a string.
Returns:
Indentation length.
"""
min_indent = len(text)
lines = text.splitlines()
if len(lines) > skiplines:
lines = lines[skiplines:]
else:
return None
for line in lines:
if len(line) > 0:
indent = len(line) - len(line.lstrip())
if indent < min_indent:
min_indent = indent
return min_indent
def add_text_indentation(text, indent):
"""Insert leading whitespace into a block of text.
Args:
text: A block of text as a string.
indent: Number of leading spaces to insert on each line.
Returns:
Text with additional indentation.
"""
lines = text.splitlines()
for n, line in enumerate(lines):
if len(line) > 0:
lines[n] = (" " * indent) + line
return "\n".join(lines)
def insert_inheritance_diagram(clsqname, parts=None, default_nparts=2):
"""Insert an inheritance diagram into a class docstring.
No action is taken for classes without a base clase, and for classes
without a docstring.
Args:
clsqname: Qualified name (i.e. including module name path) of class.
parts: A dict mapping qualified class names to custom values for
the ":parts:" directive.
default_nparts: Default value for the ":parts:" directive.
"""
# Extract module name and class name from qualified class name
clspth = clsqname.split(".")
modname = ".".join(clspth[0:-1])
clsname = clspth[-1]
# Get reference to class
cls = getattr(sys.modules[modname], clsname)
# Return immediately if class has no base classes
if getattr(cls, "__bases__") == (object,):
return
# Get current docstring
docstr = getattr(cls, "__doc__")
# Return immediately if class has no docstring
if docstr is None:
return
# Use class-specific parts or default parts directive value
if parts and clsqname in parts:
nparts = parts[clsqname]
else:
nparts = default_nparts
# Split docstring into individual lines
lines = docstr.splitlines()
# Return immediately if there are no lines
if not lines:
return
# Cut leading whitespace lines
n = 0
for n, line in enumerate(lines):
if line != "":
break
lines = lines[n:]
# Define inheritance diagram insertion text
idstr = f"""
.. inheritance-diagram:: {clsname}
:parts: {nparts}
"""
docstr_indent = get_text_indentation(docstr, skiplines=1)
if docstr_indent is not None and docstr_indent > 4:
idstr = add_text_indentation(idstr, docstr_indent - 4)
# Insert inheritance diagram after summary line and whitespace line following it
lines.insert(2, idstr)
# Construct new docstring and attach it to the class
extdocstr = "\n".join(lines)
setattr(cls, "__doc__", extdocstr) | /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/docsutil.py | 0.567457 | 0.2709 | docsutil.py | pypi |
# Usage Examples
## Organized by Application
### Computed Tomography
> - [TV-Regularized Abel Inversion](ct_abel_tv_admm.ipynb)
> - [Parameter Tuning for TV-Regularized Abel
> Inversion](ct_abel_tv_admm_tune.ipynb)
> - [CT Reconstruction with CG and PCG](ct_astra_noreg_pcg.ipynb)
> - [3D TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_3d_tv_admm.ipynb)
> - [TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_tv_admm.ipynb)
> - [TV-Regularized Low-Dose CT
> Reconstruction](ct_astra_weighted_tv_admm.ipynb)
> - [TV-Regularized CT Reconstruction (Multiple
> Algorithms)](ct_svmbir_tv_multi.ipynb)
> - [PPP (with BM3D) CT Reconstruction (ADMM with CG Subproblem
> Solver)](ct_svmbir_ppp_bm3d_admm_cg.ipynb)
> - [PPP (with BM3D) CT Reconstruction (ADMM with Fast SVMBIR
> Prox)](ct_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [PPP (with BM3D) Fan-Beam CT
> Reconstruction](ct_fan_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [CT Training and Reconstructions with
> MoDL](ct_astra_modl_train_foam2.ipynb)
> - [CT Training and Reconstructions with
> ODP](ct_astra_odp_train_foam2.ipynb)
> - [CT Training and Reconstructions with
> UNet](ct_astra_unet_train_foam2.ipynb)
### Deconvolution
> - [Circulant Blur Image Deconvolution with TV
> Regularization](deconv_circ_tv_admm.ipynb)
> - [Image Deconvolution with TV Regularization (ADMM
> Solver)](deconv_tv_admm.ipynb)
> - [Image Deconvolution with TV Regularization (Proximal ADMM
> Solver)](deconv_tv_padmm.ipynb)
> - [Parameter Tuning for Image Deconvolution with TV Regularization
> (ADMM Solver)](deconv_tv_admm_tune.ipynb)
> - [Deconvolution Microscopy (Single
> Channel)](deconv_microscopy_tv_admm.ipynb)
> - [Deconvolution Microscopy (All
> Channels)](deconv_microscopy_allchn_tv_admm.ipynb)
> - [PPP (with BM3D) Image Deconvolution (ADMM
> Solver)](deconv_ppp_bm3d_admm.ipynb)
> - [PPP (with BM3D) Image Deconvolution (APGM
> Solver)](deconv_ppp_bm3d_pgm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (ADMM
> Solver)](deconv_ppp_dncnn_admm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (Proximal ADMM
> Solver)](deconv_ppp_dncnn_padmm.ipynb)
> - [PPP (with BM4D) Volume Deconvolution](deconv_ppp_bm4d_admm.ipynb)
> - [Deconvolution Training and Reconstructions with
> MoDL](deconv_modl_train_foam1.ipynb)
> - [Deconvolution Training and Reconstructions with
> ODP](deconv_odp_train_foam1.ipynb)
### Sparse Coding
> - [Non-Negative Basis Pursuit DeNoising
> (ADMM)](sparsecode_admm.ipynb)
> - [Convolutional Sparse Coding (ADMM)](sparsecode_conv_admm.ipynb)
> - [Convolutional Sparse Coding with Mask Decoupling
> (ADMM)](sparsecode_conv_md_admm.ipynb)
> - [Basis Pursuit DeNoising (APGM)](sparsecode_pgm.ipynb)
> - [Non-negative Poisson Loss Reconstruction
> (APGM)](sparsecode_poisson_pgm.ipynb)
### Miscellaneous
> - [PPP (with BM3D) Image Demosaicing](demosaic_ppp_bm3d_admm.ipynb)
> - [PPP (with DnCNN) Image
> Superresolution](superres_ppp_dncnn_admm.ipynb)
> - [ℓ1 Total Variation Denoising](denoise_l1tv_admm.ipynb)
> - [Total Variation Denoising (ADMM)](denoise_tv_admm.ipynb)
> - [Total Variation Denoising with Constraint
> (APGM)](denoise_tv_pgm.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
> - [Complex Total Variation Denoising with NLPADMM
> Solver](denoise_cplx_tv_nlpadmm.ipynb)
> - [Complex Total Variation Denoising with PDHG
> Solver](denoise_cplx_tv_pdhg.ipynb)
> - [Comparison of DnCNN Variants for Image
> Denoising](denoise_dncnn_universal.ipynb)
> - [TV-Regularized 3D DiffuserCam
> Reconstruction](diffusercam_tv_admm.ipynb)
> - [Video Decomposition via Robust PCA](video_rpca_admm.ipynb)
> - [CT Data Generation for NN Training](ct_astra_datagen_foam2.ipynb)
> - [Blurred Data Generation (Natural Images) for NN
> Training](deconv_datagen_bsds.ipynb)
> - [Blurred Data Generation (Foams) for NN
> Training](deconv_datagen_foam1.ipynb)
> - [Noisy Data Generation for NN
> Training](denoise_datagen_bsds.ipynb)
## Organized by Regularization
### Plug and Play Priors
> - [PPP (with BM3D) CT Reconstruction (ADMM with CG Subproblem
> Solver)](ct_svmbir_ppp_bm3d_admm_cg.ipynb)
> - [PPP (with BM3D) CT Reconstruction (ADMM with Fast SVMBIR
> Prox)](ct_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [PPP (with BM3D) Fan-Beam CT
> Reconstruction](ct_fan_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [PPP (with BM3D) Image Deconvolution (ADMM
> Solver)](deconv_ppp_bm3d_admm.ipynb)
> - [PPP (with BM3D) Image Deconvolution (APGM
> Solver)](deconv_ppp_bm3d_pgm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (ADMM
> Solver)](deconv_ppp_dncnn_admm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (Proximal ADMM
> Solver)](deconv_ppp_dncnn_padmm.ipynb)
> - [PPP (with BM4D) Volume Deconvolution](deconv_ppp_bm4d_admm.ipynb)
> - [PPP (with BM3D) Image Demosaicing](demosaic_ppp_bm3d_admm.ipynb)
> - [PPP (with DnCNN) Image
> Superresolution](superres_ppp_dncnn_admm.ipynb)
### Total Variation
> - [TV-Regularized Abel Inversion](ct_abel_tv_admm.ipynb)
> - [Parameter Tuning for TV-Regularized Abel
> Inversion](ct_abel_tv_admm_tune.ipynb)
> - [TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_tv_admm.ipynb)
> - [3D TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_3d_tv_admm.ipynb)
> - [TV-Regularized Low-Dose CT
> Reconstruction](ct_astra_weighted_tv_admm.ipynb)
> - [TV-Regularized CT Reconstruction (Multiple
> Algorithms)](ct_svmbir_tv_multi.ipynb)
> - [Circulant Blur Image Deconvolution with TV
> Regularization](deconv_circ_tv_admm.ipynb)
> - [Image Deconvolution with TV Regularization (ADMM
> Solver)](deconv_tv_admm.ipynb)
> - [Parameter Tuning for Image Deconvolution with TV Regularization
> (ADMM Solver)](deconv_tv_admm_tune.ipynb)
> - [Image Deconvolution with TV Regularization (Proximal ADMM
> Solver)](deconv_tv_padmm.ipynb)
> - [Deconvolution Microscopy (Single
> Channel)](deconv_microscopy_tv_admm.ipynb)
> - [Deconvolution Microscopy (All
> Channels)](deconv_microscopy_allchn_tv_admm.ipynb)
> - [ℓ1 Total Variation Denoising](denoise_l1tv_admm.ipynb)
> - [Total Variation Denoising (ADMM)](denoise_tv_admm.ipynb)
> - [Total Variation Denoising with Constraint
> (APGM)](denoise_tv_pgm.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
> - [Complex Total Variation Denoising with NLPADMM
> Solver](denoise_cplx_tv_nlpadmm.ipynb)
> - [Complex Total Variation Denoising with PDHG
> Solver](denoise_cplx_tv_pdhg.ipynb)
> - [TV-Regularized 3D DiffuserCam
> Reconstruction](diffusercam_tv_admm.ipynb)
### Sparsity
> - [TV-Regularized 3D DiffuserCam
> Reconstruction](diffusercam_tv_admm.ipynb)
> - [Non-Negative Basis Pursuit DeNoising
> (ADMM)](sparsecode_admm.ipynb)
> - [Convolutional Sparse Coding (ADMM)](sparsecode_conv_admm.ipynb)
> - [Convolutional Sparse Coding with Mask Decoupling
> (ADMM)](sparsecode_conv_md_admm.ipynb)
> - [Basis Pursuit DeNoising (APGM)](sparsecode_pgm.ipynb)
> - [Non-negative Poisson Loss Reconstruction
> (APGM)](sparsecode_poisson_pgm.ipynb)
> - [Video Decomposition via Robust PCA](video_rpca_admm.ipynb)
### Machine Learning
> - [CT Data Generation for NN Training](ct_astra_datagen_foam2.ipynb)
> - [CT Training and Reconstructions with
> MoDL](ct_astra_modl_train_foam2.ipynb)
> - [CT Training and Reconstructions with
> ODP](ct_astra_odp_train_foam2.ipynb)
> - [CT Training and Reconstructions with
> UNet](ct_astra_unet_train_foam2.ipynb)
> - [Blurred Data Generation (Natural Images) for NN
> Training](deconv_datagen_bsds.ipynb)
> - [Blurred Data Generation (Foams) for NN
> Training](deconv_datagen_foam1.ipynb)
> - [Deconvolution Training and Reconstructions with
> MoDL](deconv_modl_train_foam1.ipynb)
> - [Deconvolution Training and Reconstructions with
> ODP](deconv_odp_train_foam1.ipynb)
> - [Noisy Data Generation for NN
> Training](denoise_datagen_bsds.ipynb)
> - [Training of DnCNN for Denoising](denoise_dncnn_train_bsds.ipynb)
> - [Comparison of DnCNN Variants for Image
> Denoising](denoise_dncnn_universal.ipynb)
## Organized by Optimization Algorithm
### ADMM
> - [TV-Regularized Abel Inversion](ct_abel_tv_admm.ipynb)
> - [Parameter Tuning for TV-Regularized Abel
> Inversion](ct_abel_tv_admm_tune.ipynb)
> - [TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_tv_admm.ipynb)
> - [3D TV-Regularized Sparse-View CT
> Reconstruction](ct_astra_3d_tv_admm.ipynb)
> - [TV-Regularized Low-Dose CT
> Reconstruction](ct_astra_weighted_tv_admm.ipynb)
> - [TV-Regularized CT Reconstruction (Multiple
> Algorithms)](ct_svmbir_tv_multi.ipynb)
> - [PPP (with BM3D) CT Reconstruction (ADMM with CG Subproblem
> Solver)](ct_svmbir_ppp_bm3d_admm_cg.ipynb)
> - [PPP (with BM3D) CT Reconstruction (ADMM with Fast SVMBIR
> Prox)](ct_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [PPP (with BM3D) Fan-Beam CT
> Reconstruction](ct_fan_svmbir_ppp_bm3d_admm_prox.ipynb)
> - [Circulant Blur Image Deconvolution with TV
> Regularization](deconv_circ_tv_admm.ipynb)
> - [Image Deconvolution with TV Regularization (ADMM
> Solver)](deconv_tv_admm.ipynb)
> - [Parameter Tuning for Image Deconvolution with TV Regularization
> (ADMM Solver)](deconv_tv_admm_tune.ipynb)
> - [Deconvolution Microscopy (Single
> Channel)](deconv_microscopy_tv_admm.ipynb)
> - [Deconvolution Microscopy (All
> Channels)](deconv_microscopy_allchn_tv_admm.ipynb)
> - [PPP (with BM3D) Image Deconvolution (ADMM
> Solver)](deconv_ppp_bm3d_admm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (ADMM
> Solver)](deconv_ppp_dncnn_admm.ipynb)
> - [PPP (with BM4D) Volume Deconvolution](deconv_ppp_bm4d_admm.ipynb)
> - [TV-Regularized 3D DiffuserCam
> Reconstruction](diffusercam_tv_admm.ipynb)
> - [Non-Negative Basis Pursuit DeNoising
> (ADMM)](sparsecode_admm.ipynb)
> - [Convolutional Sparse Coding (ADMM)](sparsecode_conv_admm.ipynb)
> - [Convolutional Sparse Coding with Mask Decoupling
> (ADMM)](sparsecode_conv_md_admm.ipynb)
> - [PPP (with BM3D) Image Demosaicing](demosaic_ppp_bm3d_admm.ipynb)
> - [PPP (with DnCNN) Image
> Superresolution](superres_ppp_dncnn_admm.ipynb)
> - [ℓ1 Total Variation Denoising](denoise_l1tv_admm.ipynb)
> - [Total Variation Denoising (ADMM)](denoise_tv_admm.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
> - [Video Decomposition via Robust PCA](video_rpca_admm.ipynb)
### Linearized ADMM
> - [TV-Regularized CT Reconstruction (Multiple
> Algorithms)](ct_svmbir_tv_multi.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
### Proximal ADMM
> - [Image Deconvolution with TV Regularization (Proximal ADMM
> Solver)](deconv_tv_padmm.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
> - [Complex Total Variation Denoising with NLPADMM
> Solver](denoise_cplx_tv_nlpadmm.ipynb)
> - [PPP (with DnCNN) Image Deconvolution (Proximal ADMM
> Solver)](deconv_ppp_dncnn_padmm.ipynb)
### Non-linear Proximal ADMM
> - [Complex Total Variation Denoising with NLPADMM
> Solver](denoise_cplx_tv_nlpadmm.ipynb)
### PDHG
> - [TV-Regularized CT Reconstruction (Multiple
> Algorithms)](ct_svmbir_tv_multi.ipynb)
> - [Comparison of Optimization Algorithms for Total Variation
> Denoising](denoise_tv_multi.ipynb)
> - [Complex Total Variation Denoising with PDHG
> Solver](denoise_cplx_tv_pdhg.ipynb)
### PGM
> - [PPP (with BM3D) Image Deconvolution (APGM
> Solver)](deconv_ppp_bm3d_pgm.ipynb)
> - [Basis Pursuit DeNoising (APGM)](sparsecode_pgm.ipynb)
> - [Non-negative Poisson Loss Reconstruction
> (APGM)](sparsecode_poisson_pgm.ipynb)
> - [Total Variation Denoising with Constraint
> (APGM)](denoise_tv_pgm.ipynb)
### PCG
> - [CT Reconstruction with CG and PCG](ct_astra_noreg_pcg.ipynb)
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/index.ipynb | 0.708818 | 0.633694 | index.ipynb | pypi |
Noisy Data Generation for NN Training
=====================================
This example demonstrates how to generate noisy image data for
training neural network models for denoising. The original images are
part of the
[BSDS500 dataset](http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/)
provided by the Berkeley Segmentation Dataset and Benchmark project.
```
import numpy as np
from scico import plot
from scico.flax.examples import load_image_data
plot.config_notebook_plotting()
```
Read data from cache or generate if not available.
```
size = 40 # patch size
train_nimg = 400 # number of training images
test_nimg = 64 # number of testing images
nimg = train_nimg + test_nimg
gray = True # use gray scale images
data_mode = "dn" # Denoising problem
noise_level = 0.1 # Standard deviation of noise
noise_range = False # Use fixed noise level
stride = 23 # Stride to sample multiple patches from each image
train_ds, test_ds = load_image_data(
train_nimg,
test_nimg,
size,
gray,
data_mode,
verbose=True,
noise_level=noise_level,
noise_range=noise_range,
stride=stride,
)
```
Plot randomly selected sample. Note that patches have small sizes, thus,
plots may correspond to unidentifiable fragments.
```
indx_tr = np.random.randint(0, train_nimg)
indx_te = np.random.randint(0, test_nimg)
fig, axes = plot.subplots(nrows=2, ncols=2, figsize=(7, 7))
plot.imview(
train_ds["label"][indx_tr, ..., 0],
title="Ground truth - Training Sample",
fig=fig,
ax=axes[0, 0],
)
plot.imview(
train_ds["image"][indx_tr, ..., 0],
title="Noisy Image - Training Sample",
fig=fig,
ax=axes[0, 1],
)
plot.imview(
test_ds["label"][indx_te, ..., 0],
title="Ground truth - Testing Sample",
fig=fig,
ax=axes[1, 0],
)
plot.imview(
test_ds["image"][indx_te, ..., 0], title="Noisy Image - Testing Sample", fig=fig, ax=axes[1, 1]
)
fig.suptitle(r"Training and Testing samples")
fig.tight_layout()
fig.colorbar(
axes[0, 1].get_images()[0],
ax=axes,
shrink=0.5,
pad=0.05,
)
fig.show()
```
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/denoise_datagen_bsds.ipynb | 0.696268 | 0.97066 | denoise_datagen_bsds.ipynb | pypi |
Non-Negative Basis Pursuit DeNoising (ADMM)
===========================================
This example demonstrates the solution of a non-negative sparse coding
problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - D \mathbf{x} \|_2^2
+ \lambda \| \mathbf{x} \|_1 + I(\mathbf{x} \geq 0) \;,$$
where $D$ the dictionary, $\mathbf{y}$ the signal to be represented,
$\mathbf{x}$ is the sparse representation, and $I(\mathbf{x} \geq 0)$
is the non-negative indicator.
```
import numpy as np
import jax
from scico import functional, linop, loss, plot
from scico.optimize.admm import ADMM, MatrixSubproblemSolver
from scico.util import device_info
plot.config_notebook_plotting()
```
Create random dictionary, reference random sparse representation, and
test signal consisting of the synthesis of the reference sparse
representation.
```
m = 32 # signal size
n = 128 # dictionary size
s = 10 # sparsity level
np.random.seed(1)
D = np.random.randn(m, n)
D = D / np.linalg.norm(D, axis=0, keepdims=True) # normalize dictionary
xt = np.zeros(n) # true signal
idx = np.random.randint(low=0, high=n, size=s) # support of xt
xt[idx] = np.random.rand(s)
y = D @ xt + 5e-2 * np.random.randn(m) # synthetic signal
xt = jax.device_put(xt) # convert to jax array, push to GPU
y = jax.device_put(y) # convert to jax array, push to GPU
```
Set up the forward operator and ADMM solver object.
```
lmbda = 1e-1
A = linop.MatrixOperator(D)
f = loss.SquaredL2Loss(y=y, A=A)
g_list = [lmbda * functional.L1Norm(), functional.NonNegativeIndicator()]
C_list = [linop.Identity((n)), linop.Identity((n))]
rho_list = [1.0, 1.0]
maxiter = 100 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=g_list,
C_list=C_list,
rho_list=rho_list,
x0=A.adj(y),
maxiter=maxiter,
subproblem_solver=MatrixSubproblemSolver(),
itstat_options={"display": True, "period": 10},
)
```
Run the solver.
```
print(f"Solving on {device_info()}\n")
x = solver.solve()
```
Plot the recovered coefficients and signal.
```
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((xt, solver.x)).T,
title="Coefficients",
lgnd=("Ground Truth", "Recovered"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((D @ xt, y, D @ solver.x)).T,
title="Signal",
lgnd=("Ground Truth", "Noisy", "Recovered"),
fig=fig,
ax=ax[1],
)
fig.show()
```
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/sparsecode_admm.ipynb | 0.626467 | 0.944944 | sparsecode_admm.ipynb | pypi |
Parameter Tuning for Image Deconvolution with TV Regularization (ADMM Solver)
=============================================================================
This example demonstrates the use of
[scico.ray.tune](../_autosummary/scico.ray.tune.rst) to tune parameters
for the companion [example script](deconv_tv_admm.rst). The `ray.tune`
function API is used in this example.
This script is hard-coded to run on CPU only to avoid the large number of
warnings that are emitted when GPU resources are requested but not available,
and due to the difficulty of supressing these warnings in a way that does
not force use of the CPU only. To enable GPU usage, comment out the
`os.environ` statements near the beginning of the script, and change the
value of the "gpu" entry in the `resources` dict from 0 to 1. Note that
two environment variables are set to suppress the warnings because
`JAX_PLATFORMS` was intended to replace `JAX_PLATFORM_NAME` but this change
has yet to be correctly implemented
(see [google/jax#6805](https://github.com/google/jax/issues/6805) and
[google/jax#10272](https://github.com/google/jax/pull/10272)).
```
# isort: off
import os
os.environ["JAX_PLATFORM_NAME"] = "cpu"
os.environ["JAX_PLATFORMS"] = "cpu"
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.ray import report, tune
plot.config_notebook_plotting()
```
Create a ground truth image.
```
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
```
Set up the forward operator and create a test signal consisting of a
blurred signal with additive Gaussian noise.
```
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = scico.random.randn(Ax.shape, seed=0)
y = Ax + σ * noise
```
Define performance evaluation function.
```
def eval_params(config, x_gt, psf, y):
"""Parameter evaluation function. The `config` parameter is a
dict of specific parameters for evaluation of a single parameter
set (a pair of parameters in this case). The remaining parameters
are objects that are passed to the evaluation function via the
ray object store.
"""
# Extract solver parameters from config dict.
λ, ρ = config["lambda"], config["rho"]
# Put main arrays on jax device.
x_gt, psf, y = jax.device_put([x_gt, psf, y])
# Set up problem to be solved.
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
f = loss.SquaredL2Loss(y=y, A=A)
g = λ * functional.L21Norm()
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
# Define solver.
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.adj(y),
maxiter=10,
subproblem_solver=LinearSubproblemSolver(),
)
# Perform 50 iterations, reporting performance to ray.tune every 10 iterations.
for step in range(5):
x_admm = solver.solve()
report({"psnr": float(metric.psnr(x_gt, x_admm))})
```
Define parameter search space and resources per trial.
```
config = {"lambda": tune.loguniform(1e-3, 1e-1), "rho": tune.loguniform(1e-2, 1e0)}
resources = {"cpu": 4, "gpu": 0} # cpus per trial, gpus per trial
```
Run parameter search.
```
tuner = tune.Tuner(
tune.with_parameters(eval_params, x_gt=x_gt, psf=psf, y=y),
param_space=config,
resources=resources,
metric="psnr",
mode="max",
num_samples=100, # perform 100 parameter evaluations
)
results = tuner.fit()
```
Display best parameters and corresponding performance.
```
best_result = results.get_best_result()
best_config = best_result.config
print(f"Best PSNR: {best_result.metrics['psnr']:.2f} dB")
print("Best config: " + ", ".join([f"{k}: {v:.2e}" for k, v in best_config.items()]))
```
Plot parameter values visited during parameter search. Marker sizes are
proportional to number of iterations run at each parameter pair. The best
point in the parameter space is indicated in red.
```
fig = plot.figure(figsize=(8, 8))
trials = results.get_dataframe()
for t in trials.iloc:
n = t["training_iteration"]
plot.plot(
t["config/lambda"],
t["config/rho"],
ptyp="loglog",
lw=0,
ms=(0.5 + 1.5 * n),
marker="o",
mfc="blue",
mec="blue",
fig=fig,
)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
title="Parameter search sampling locations\n(marker size proportional to number of iterations)",
xlbl=r"$\rho$",
ylbl=r"$\lambda$",
lw=0,
ms=5.0,
marker="o",
mfc="red",
mec="red",
fig=fig,
)
ax = fig.axes[0]
ax.set_xlim([config["rho"].lower, config["rho"].upper])
ax.set_ylim([config["lambda"].lower, config["lambda"].upper])
fig.show()
```
Plot parameter values visited during parameter search and corresponding
reconstruction PSNRs.The best point in the parameter space is indicated
in red.
```
𝜌 = [t["config/rho"] for t in trials.iloc]
𝜆 = [t["config/lambda"] for t in trials.iloc]
psnr = [t["psnr"] for t in trials.iloc]
minpsnr = min(max(psnr), 18.0)
𝜌, 𝜆, psnr = zip(*filter(lambda x: x[2] >= minpsnr, zip(𝜌, 𝜆, psnr)))
fig, ax = plot.subplots(figsize=(10, 8))
sc = ax.scatter(𝜌, 𝜆, c=psnr, cmap=plot.cm.plasma_r)
fig.colorbar(sc)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
lw=0,
ms=12.0,
marker="2",
mfc="red",
mec="red",
fig=fig,
ax=ax,
)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel(r"$\rho$")
ax.set_ylabel(r"$\lambda$")
ax.set_title("PSNR at each sample location\n(values below 18 dB omitted)")
fig.show()
```
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/deconv_tv_admm_tune.ipynb | 0.734786 | 0.953579 | deconv_tv_admm_tune.ipynb | pypi |
PPP (with BM4D) Volume Deconvolution
====================================
This example demonstrates the solution of a 3D image deconvolution problem
(involving recovering a 3D volume that has been convolved with a 3D kernel
and corrupted by noise) using the ADMM Plug-and-Play Priors (PPP)
algorithm <cite data-cite="venkatakrishnan-2013-plugandplay2"/>, with the BM4D
<cite data-cite="maggioni-2012-nonlocal"/> denoiser.
```
import numpy as np
import jax
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot, random
from scico.examples import create_3d_foam_phantom, downsample_volume, tile_volume_slices
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
plot.config_notebook_plotting()
```
Create a ground truth image.
```
np.random.seed(1234)
N = 128 # phantom size
Nx, Ny, Nz = N, N, N // 4
upsamp = 2
x_gt_hires = create_3d_foam_phantom((upsamp * Nz, upsamp * Ny, upsamp * Nx), N_sphere=100)
x_gt = downsample_volume(x_gt_hires, upsamp)
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
```
Set up forward operator and test signal consisting of blurred signal with
additive Gaussian noise.
```
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n, n)) / (n**3)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = random.randn(Ax.shape)
y = Ax + σ * noise
```
Set up ADMM solver.
```
f = loss.SquaredL2Loss(y=y, A=A)
C = linop.Identity(x_gt.shape)
λ = 40.0 / 255 # BM4D regularization strength
g = λ * functional.BM4D()
ρ = 1.0 # ADMM penalty parameter
maxiter = 10 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.T @ y,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
```
Run the solver.
```
print(f"Solving on {device_info()}\n")
x = solver.solve()
x = snp.clip(x, 0, 1)
hist = solver.itstat_object.history(transpose=True)
```
Show slices of the recovered 3D volume.
```
show_id = Nz // 2
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(tile_volume_slices(x_gt), title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = y[nc:-nc, nc:-nc, nc:-nc]
yc = snp.clip(yc, 0, 1)
plot.imview(
tile_volume_slices(yc),
title="Slices of blurred, noisy volume: %.2f (dB)" % metric.psnr(x_gt, yc),
fig=fig,
ax=ax[1],
)
plot.imview(
tile_volume_slices(x),
title="Slices of deconvolved volume: %.2f (dB)" % metric.psnr(x_gt, x),
fig=fig,
ax=ax[2],
)
fig.show()
```
Plot convergence statistics.
```
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
```
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/deconv_ppp_bm4d_admm.ipynb | 0.723016 | 0.965996 | deconv_ppp_bm4d_admm.ipynb | pypi |
Parameter Tuning for TV-Regularized Abel Inversion
==================================================
This example demonstrates the use of
[scico.ray.tune](../_autosummary/scico.ray.tune.rst) to tune
parameters for the companion [example script](ct_abel_tv_admm.rst). The
`ray.tune` class API is used in this example.
This script is hard-coded to run on CPU only to avoid the large number of
warnings that are emitted when GPU resources are requested but not available,
and due to the difficulty of supressing these warnings in a way that does
not force use of the CPU only. To enable GPU usage, comment out the
`os.environ` statements near the beginning of the script, and change the
value of the "gpu" entry in the `resources` dict from 0 to 1. Note that
two environment variables are set to suppress the warnings because
`JAX_PLATFORMS` was intended to replace `JAX_PLATFORM_NAME` but this change
has yet to be correctly implemented
(see [google/jax#6805](https://github.com/google/jax/issues/6805) and
[google/jax#10272](https://github.com/google/jax/pull/10272).
```
# isort: off
import os
os.environ["JAX_PLATFORM_NAME"] = "cpu"
os.environ["JAX_PLATFORMS"] = "cpu"
import numpy as np
import jax
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import create_circular_phantom
from scico.linop.abel import AbelProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.ray import tune
plot.config_notebook_plotting()
```
Create a ground truth image.
```
N = 256 # image size
x_gt = create_circular_phantom((N, N), [0.4 * N, 0.2 * N, 0.1 * N], [1, 0, 0.5])
```
Set up the forward operator and create a test measurement.
```
A = AbelProjector(x_gt.shape)
y = A @ x_gt
np.random.seed(12345)
y = y + np.random.normal(size=y.shape).astype(np.float32)
```
Compute inverse Abel transform solution for use as initial solution.
```
x_inv = A.inverse(y)
x0 = snp.clip(x_inv, 0.0, 1.0)
```
Define performance evaluation class.
```
class Trainable(tune.Trainable):
"""Parameter evaluation class."""
def setup(self, config, x_gt, x0, y):
"""This method initializes a new parameter evaluation object. It
is called once when a new parameter evaluation object is created.
The `config` parameter is a dict of specific parameters for
evaluation of a single parameter set (a pair of parameters in
this case). The remaining parameters are objects that are passed
to the evaluation function via the ray object store.
"""
# Put main arrays on jax device.
self.x_gt, self.x0, self.y = jax.device_put([x_gt, x0, y])
# Set up problem to be solved.
self.A = AbelProjector(self.x_gt.shape)
self.f = loss.SquaredL2Loss(y=self.y, A=self.A)
self.C = linop.FiniteDifference(input_shape=self.x_gt.shape)
self.reset_config(config)
def reset_config(self, config):
"""This method is only required when `scico.ray.tune.Tuner` is
initialized with `reuse_actors` set to ``True`` (the default). In
this case, a set of parameter evaluation processes and
corresponding objects are created once (including initialization
via a call to the `setup` method), and this method is called when
switching to evaluation of a different parameter configuration.
If `reuse_actors` is set to ``False``, then a new process and
object are created for each parameter configuration, and this
method is not used.
"""
# Extract solver parameters from config dict.
λ, ρ = config["lambda"], config["rho"]
# Set up parameter-dependent functional.
g = λ * functional.L1Norm()
# Define solver.
cg_tol = 1e-4
cg_maxiter = 25
self.solver = ADMM(
f=self.f,
g_list=[g],
C_list=[self.C],
rho_list=[ρ],
x0=self.x0,
maxiter=10,
subproblem_solver=LinearSubproblemSolver(
cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}
),
)
return True
def step(self):
"""This method is called for each step in the evaluation of a
single parameter configuration. The maximum number of times it
can be called is controlled by the `num_iterations` parameter
in the initialization of a `scico.ray.tune.Tuner` object.
"""
# Perform 10 solver steps for every ray.tune step
x_tv = snp.clip(self.solver.solve(), 0.0, 1.0)
return {"psnr": float(metric.psnr(self.x_gt, x_tv))}
```
Define parameter search space and resources per trial.
```
config = {"lambda": tune.loguniform(1e0, 1e2), "rho": tune.loguniform(1e1, 1e3)}
resources = {"gpu": 0, "cpu": 1} # gpus per trial, cpus per trial
```
Run parameter search.
```
tuner = tune.Tuner(
tune.with_parameters(Trainable, x_gt=x_gt, x0=x0, y=y),
param_space=config,
resources=resources,
metric="psnr",
mode="max",
num_samples=100, # perform 100 parameter evaluations
num_iterations=10, # perform at most 10 steps for each parameter evaluation
)
results = tuner.fit()
```
Display best parameters and corresponding performance.
```
best_result = results.get_best_result()
best_config = best_result.config
print(f"Best PSNR: {best_result.metrics['psnr']:.2f} dB")
print("Best config: " + ", ".join([f"{k}: {v:.2e}" for k, v in best_config.items()]))
```
Plot parameter values visited during parameter search. Marker sizes are
proportional to number of iterations run at each parameter pair. The best
point in the parameter space is indicated in red.
```
fig = plot.figure(figsize=(8, 8))
trials = results.get_dataframe()
for t in trials.iloc:
n = t["training_iteration"]
plot.plot(
t["config/lambda"],
t["config/rho"],
ptyp="loglog",
lw=0,
ms=(0.5 + 1.5 * n),
marker="o",
mfc="blue",
mec="blue",
fig=fig,
)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
title="Parameter search sampling locations\n(marker size proportional to number of iterations)",
xlbl=r"$\rho$",
ylbl=r"$\lambda$",
lw=0,
ms=5.0,
marker="o",
mfc="red",
mec="red",
fig=fig,
)
ax = fig.axes[0]
ax.set_xlim([config["rho"].lower, config["rho"].upper])
ax.set_ylim([config["lambda"].lower, config["lambda"].upper])
fig.show()
```
Plot parameter values visited during parameter search and corresponding
reconstruction PSNRs.The best point in the parameter space is indicated
in red.
```
𝜌 = [t["config/rho"] for t in trials.iloc]
𝜆 = [t["config/lambda"] for t in trials.iloc]
psnr = [t["psnr"] for t in trials.iloc]
minpsnr = min(max(psnr), 20.0)
𝜌, 𝜆, psnr = zip(*filter(lambda x: x[2] >= minpsnr, zip(𝜌, 𝜆, psnr)))
fig, ax = plot.subplots(figsize=(10, 8))
sc = ax.scatter(𝜌, 𝜆, c=psnr, cmap=plot.cm.plasma_r)
fig.colorbar(sc)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
lw=0,
ms=12.0,
marker="2",
mfc="red",
mec="red",
fig=fig,
ax=ax,
)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel(r"$\rho$")
ax.set_ylabel(r"$\lambda$")
ax.set_title("PSNR at each sample location\n(values below 20 dB omitted)")
fig.show()
```
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/ct_abel_tv_admm_tune.ipynb | 0.720467 | 0.944842 | ct_abel_tv_admm_tune.ipynb | pypi |
ℓ1 Total Variation Denoising
============================
This example demonstrates impulse noise removal via ℓ1 total variation
<cite data-cite="alliney-1992-digital"/> <cite data-cite="esser-2010-primal"/> (Sec. 2.4.4)
(i.e. total variation regularization with an ℓ1 data fidelity term),
minimizing the functional
$$\mathrm{argmin}_{\mathbf{x}} \; \| \mathbf{y} - \mathbf{x}
\|_1 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $\mathbf{y}$ is the noisy image, $C$ is a 2D finite difference
operator, and $\mathbf{x}$ is the denoised image.
```
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import spnoise
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
from scipy.ndimage import median_filter
plot.config_notebook_plotting()
```
Create a ground truth image and impose salt & pepper noise to create a
noisy test image.
```
N = 256 # image size
phantom = SiemensStar(16)
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = 0.5 * x_gt / x_gt.max()
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
y = spnoise(x_gt, 0.5)
```
Denoise with median filtering.
```
x_med = median_filter(y, size=(5, 5))
```
Denoise with ℓ1 total variation.
```
λ = 1.5e0
g_loss = loss.Loss(y=y, f=functional.L1Norm())
g_tv = λ * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
solver = ADMM(
f=None,
g_list=[g_loss, g_tv],
C_list=[linop.Identity(input_shape=y.shape), C],
rho_list=[5e0, 5e0],
x0=y,
maxiter=100,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 20}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x_tv = solver.solve()
hist = solver.itstat_object.history(transpose=True)
```
Plot results.
```
plt_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.0))
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(13, 12))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy image", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(
x_med,
title=f"Median filtering: {metric.psnr(x_gt, x_med):.2f} (dB)",
fig=fig,
ax=ax[1, 0],
**plt_args,
)
plot.imview(
x_tv,
title=f"ℓ1-TV denoising: {metric.psnr(x_gt, x_tv):.2f} (dB)",
fig=fig,
ax=ax[1, 1],
**plt_args,
)
fig.show()
```
Plot convergence statistics.
```
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
```
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/denoise_l1tv_admm.ipynb | 0.806662 | 0.96856 | denoise_l1tv_admm.ipynb | pypi |
3D TV-Regularized Sparse-View CT Reconstruction
===============================================
This example demonstrates solution of a sparse-view, 3D CT
reconstruction problem with isotropic total variation (TV)
regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, $C$ is
a 3D finite difference operator, and $\mathbf{x}$ is the desired
image.
```
import numpy as np
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scico import functional, linop, loss, metric, plot
from scico.examples import create_tangle_phantom
from scico.linop.radon_astra import TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
plot.config_notebook_plotting()
```
Create a ground truth image and projector.
```
Nx = 128
Ny = 256
Nz = 64
tangle = create_tangle_phantom(Nx, Ny, Nz)
tangle = jax.device_put(tangle)
n_projection = 10 # number of projections
angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles
A = TomographicProjector(
tangle.shape, [1.0, 1.0], [Nz, max(Nx, Ny)], angles
) # Radon transform operator
y = A @ tangle # sinogram
```
Set up ADMM solver object.
```
λ = 2e0 # L1 norm regularization parameter
ρ = 5e0 # ADMM penalty parameter
maxiter = 25 # number of ADMM iterations
cg_tol = 1e-4 # CG relative tolerance
cg_maxiter = 25 # maximum CG iterations per ADMM iteration
# The append=0 option makes the results of horizontal and vertical
# finite differences the same shape, which is required for the L21Norm,
# which is used so that g(Cx) corresponds to isotropic TV.
C = linop.FiniteDifference(input_shape=tangle.shape, append=0)
g = λ * functional.L21Norm()
f = loss.SquaredL2Loss(y=y, A=A)
x0 = A.T(y)
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=x0,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 5},
)
```
Run the solver.
```
print(f"Solving on {device_info()}\n")
solver.solve()
hist = solver.itstat_object.history(transpose=True)
tangle_recon = solver.x
print(
"TV Restruction\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(tangle, tangle_recon), metric.mae(tangle, tangle_recon))
)
```
Show the recovered image.
```
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(7, 5))
plot.imview(tangle[32], title="Ground truth (central slice)", cbar=None, fig=fig, ax=ax[0])
plot.imview(
tangle_recon[32],
title="TV Reconstruction (central slice)\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(tangle, tangle_recon), metric.mae(tangle, tangle_recon)),
fig=fig,
ax=ax[1],
)
divider = make_axes_locatable(ax[1])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[1].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
```
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/ct_astra_3d_tv_admm.ipynb | 0.778649 | 0.973844 | ct_astra_3d_tv_admm.ipynb | pypi |
CT Reconstruction with CG and PCG
=================================
This example demonstrates a simple iterative CT reconstruction using
conjugate gradient (CG) and preconditioned conjugate gradient (PCG)
algorithms to solve the problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, and
$\mathbf{x}$ is the reconstructed image.
```
from time import time
import numpy as np
import jax
import jax.numpy as jnp
from xdesign import Foam, discrete_phantom
from scico import loss, plot
from scico.linop import CircularConvolve
from scico.linop.radon_astra import TomographicProjector
from scico.solver import cg
plot.config_notebook_plotting()
```
Create a ground truth image.
```
N = 256 # phantom size
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
```
Configure a CT projection operator and generate synthetic measurements.
```
n_projection = N # matches the phantom size so this is not few-view CT
angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles
A = 1 / N * TomographicProjector(x_gt.shape, 1, N, angles) # Radon transform operator
y = A @ x_gt # sinogram
```
Forward and back project a single pixel (Kronecker delta) to compute
an approximate impulse response for $\mathbf{A}^T \mathbf{A}$.
```
H = CircularConvolve.from_operator(A.T @ A)
```
Invert in the Fourier domain to form a preconditioner $\mathbf{M}
\approx (\mathbf{A}^T \mathbf{A})^{-1}$. See
<cite data-cite="clinthorne-1993-preconditioning"/> Section V.A. for more details.
```
# γ limits the gain of the preconditioner; higher gives a weaker filter.
γ = 1e-2
# The imaginary part comes from numerical errors in A.T and needs to be
# removed to ensure H is symmetric, positive definite.
frequency_response = np.real(H.h_dft)
inv_frequency_response = 1 / (frequency_response + γ)
# Using circular convolution without padding is sufficient here because
# M is approximate anyway.
M = CircularConvolve(inv_frequency_response, x_gt.shape, h_is_dft=True)
```
Check that $\mathbf{M}$ does approximately invert $\mathbf{A}^T \mathbf{A}$.
```
plot_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.5))
fig, axes = plot.subplots(nrows=1, ncols=3, figsize=(12, 4.5))
plot.imview(x_gt, title="Ground truth, $x_{gt}$", fig=fig, ax=axes[0], **plot_args)
plot.imview(
A.T @ A @ x_gt, title=r"$\mathbf{A}^T \mathbf{A} x_{gt}$", fig=fig, ax=axes[1], **plot_args
)
plot.imview(
M @ A.T @ A @ x_gt,
title=r"$\mathbf{M} \mathbf{A}^T \mathbf{A} x_{gt}$",
fig=fig,
ax=axes[2],
**plot_args,
)
fig.suptitle(r"$\mathbf{M}$ approximately inverts $\mathbf{A}^T \mathbf{A}$")
fig.tight_layout()
fig.colorbar(
axes[2].get_images()[0],
ax=axes,
location="right",
shrink=1.0,
pad=0.05,
label="Arbitrary Units",
)
fig.show()
```
Reconstruct with both standard and preconditioned conjugate gradient.
```
start_time = time()
x_cg, info_cg = cg(
A.T @ A,
A.T @ y,
jnp.zeros(A.input_shape, dtype=A.input_dtype),
tol=1e-5,
info=True,
)
time_cg = time() - start_time
start_time = time()
x_pcg, info_pcg = cg(
A.T @ A,
A.T @ y,
jnp.zeros(A.input_shape, dtype=A.input_dtype),
tol=2e-5, # preconditioning affects the problem scaling so tol differs between CG and PCG
info=True,
M=M,
)
time_pcg = time() - start_time
```
Compare CG and PCG in terms of reconstruction time and data fidelity.
```
f_cg = loss.SquaredL2Loss(y=A.T @ y, A=A.T @ A)
f_data = loss.SquaredL2Loss(y=y, A=A)
print(
f"{'Method':10s}{'Iterations':>15s}{'Time (s)':>15s}{'||ATAx - ATy||':>15s}{'||Ax - y||':>15s}"
)
print(
f"{'CG':10s}{info_cg['num_iter']:>15d}{time_cg:>15.2f}{f_cg(x_cg):>15.2e}{f_data(x_cg):>15.2e}"
)
print(
f"{'PCG':10s}{info_pcg['num_iter']:>15d}{time_pcg:>15.2f}{f_cg(x_pcg):>15.2e}"
f"{f_data(x_pcg):>15.2e}"
)
```
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/ct_astra_noreg_pcg.ipynb | 0.793946 | 0.9838 | ct_astra_noreg_pcg.ipynb | pypi |
Convolutional Sparse Coding with Mask Decoupling (ADMM)
=======================================================
This example demonstrates the solution of a convolutional sparse coding
problem
$$\mathrm{argmin}_{\mathbf{x}} \; \frac{1}{2} \Big\| \mathbf{y} -
B \Big( \sum_k \mathbf{h}_k \ast \mathbf{x}_k \Big) \Big\|_2^2 +
\lambda \sum_k ( \| \mathbf{x}_k \|_1 - \| \mathbf{x}_k \|_2 ) \;,$$
where the $\mathbf{h}$_k is a set of filters comprising the dictionary,
the $\mathbf{x}$_k is a corrresponding set of coefficient maps,
$\mathbf{y}$ is the signal to be represented, and $B$ is a cropping
operator that allows the boundary artifacts resulting from circular
convolution to be avoided. Following the mask decoupling approach
<cite data-cite="almeida-2013-deconvolving"/>, the problem is posed in ADMM form
as
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}_0, \mathbf{z}_1} \; (1/2) \|
\mathbf{y} - B \mb{z}_0 \|_2^2 + \lambda \sum_k ( \| \mathbf{z}_{1,k}
\|_1 - \| \mathbf{z}_{1,k} \|_2 ) \\ \;\; \text{s.t.} \;\;
\mathbf{z}_0 = \sum_k \mathbf{h}_k \ast \mathbf{x}_k \;\;
\mathbf{z}_{1,k} = \mathbf{x}_k\;,$$.
The most computationally expensive step in the ADMM algorithm is solved
using the frequency-domain approach proposed in
<cite data-cite="wohlberg-2014-efficient"/>.
```
import numpy as np
import jax
import scico.numpy as snp
from scico import plot
from scico.examples import create_conv_sparse_phantom
from scico.functional import L1MinusL2Norm, ZeroFunctional
from scico.linop import CircularConvolve, Crop, Identity, Sum
from scico.loss import SquaredL2Loss
from scico.optimize.admm import ADMM, G0BlockCircularConvolveSolver
from scico.util import device_info
plot.config_notebook_plotting()
```
Set problem size and create random convolutional dictionary (a set of
filters) and a corresponding sparse random set of coefficient maps.
```
N = 121 # image size
Nnz = 128 # number of non-zeros in coefficient maps
h, x0 = create_conv_sparse_phantom(N, Nnz)
```
Normalize dictionary filters and scale coefficient maps accordingly.
```
hnorm = np.sqrt(np.sum(h**2, axis=(1, 2), keepdims=True))
h /= hnorm
x0 *= hnorm
```
Convert numpy arrays to jax arrays.
```
h = jax.device_put(h)
x0 = jax.device_put(x0)
```
Set up required padding and corresponding crop operator.
```
h_center = (h.shape[1] // 2, h.shape[2] // 2)
pad_width = ((0, 0), (h_center[0], h_center[0]), (h_center[1], h_center[1]))
x0p = snp.pad(x0, pad_width=pad_width)
B = Crop(pad_width[1:], input_shape=x0p.shape[1:])
```
Set up sum-of-convolutions forward operator.
```
C = CircularConvolve(h, input_shape=x0p.shape, ndims=2, h_center=h_center)
S = Sum(input_shape=C.output_shape, axis=0)
A = S @ C
```
Construct test image from dictionary $\mathbf{h}$ and padded version of
coefficient maps $\mathbf{x}_0$.
```
y = B(A(x0p))
```
Set functional and solver parameters.
```
λ = 1e0 # l1-l2 norm regularization parameter
ρ0 = 1e0 # ADMM penalty parameters
ρ1 = 3e0
maxiter = 200 # number of ADMM iterations
```
Define loss function and regularization. Note the use of the
$\ell_1 - \ell_2$ norm, which has been found to provide slightly better
performance than the $\ell_1$ norm in this type of problem
<cite data-cite="wohlberg-2021-psf"/>.
```
f = ZeroFunctional()
g0 = SquaredL2Loss(y=y, A=B)
g1 = λ * L1MinusL2Norm()
C0 = A
C1 = Identity(input_shape=x0p.shape)
```
Initialize ADMM solver.
```
solver = ADMM(
f=f,
g_list=[g0, g1],
C_list=[C0, C1],
rho_list=[ρ0, ρ1],
alpha=1.8,
maxiter=maxiter,
subproblem_solver=G0BlockCircularConvolveSolver(check_solve=True),
itstat_options={"display": True, "period": 10},
)
```
Run the solver.
```
print(f"Solving on {device_info()}\n")
x1 = solver.solve()
hist = solver.itstat_object.history(transpose=True)
```
Show the recovered coefficient maps.
```
fig, ax = plot.subplots(nrows=2, ncols=3, figsize=(12, 8.6))
plot.imview(x0[0], title="Coef. map 0", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0])
ax[0, 0].set_ylabel("Ground truth")
plot.imview(x0[1], title="Coef. map 1", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(x0[2], title="Coef. map 2", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 2])
plot.imview(x1[0], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 0])
ax[1, 0].set_ylabel("Recovered")
plot.imview(x1[1], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 1])
plot.imview(x1[2], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 2])
fig.tight_layout()
fig.show()
```
Show test image and reconstruction from recovered coefficient maps. Note
the absence of the wrap-around effects at the boundary that can be seen
in the corresponding images in the [related example](sparsecode_conv_admm.rst).
```
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 6))
plot.imview(y, title="Test image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[0])
plot.imview(B(A(x1)), title="Reconstructed image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[1])
fig.show()
```
Plot convergence statistics.
```
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
```
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/sparsecode_conv_md_admm.ipynb | 0.689096 | 0.975785 | sparsecode_conv_md_admm.ipynb | pypi |
Convolutional Sparse Coding (ADMM)
==================================
This example demonstrates the solution of a simple convolutional sparse
coding problem
$$\mathrm{argmin}_{\mathbf{x}} \; \frac{1}{2} \Big\| \mathbf{y} -
\sum_k \mathbf{h}_k \ast \mathbf{x}_k \Big\|_2^2 + \lambda \sum_k
( \| \mathbf{x}_k \|_1 - \| \mathbf{x}_k \|_2 ) \;,$$
where the $\mathbf{h}$_k is a set of filters comprising the dictionary,
the $\mathbf{x}$_k is a corrresponding set of coefficient maps, and
$\mathbf{y}$ is the signal to be represented. The problem is solved via
an ADMM algorithm using the frequency-domain approach proposed in
<cite data-cite="wohlberg-2014-efficient"/>.
```
import numpy as np
import jax
import scico.numpy as snp
from scico import plot
from scico.examples import create_conv_sparse_phantom
from scico.functional import L1MinusL2Norm
from scico.linop import CircularConvolve, Identity, Sum
from scico.loss import SquaredL2Loss
from scico.optimize.admm import ADMM, FBlockCircularConvolveSolver
from scico.util import device_info
plot.config_notebook_plotting()
```
Set problem size and create random convolutional dictionary (a set of
filters) and a corresponding sparse random set of coefficient maps.
```
N = 128 # image size
Nnz = 128 # number of non-zeros in coefficient maps
h, x0 = create_conv_sparse_phantom(N, Nnz)
```
Normalize dictionary filters and scale coefficient maps accordingly.
```
hnorm = np.sqrt(np.sum(h**2, axis=(1, 2), keepdims=True))
h /= hnorm
x0 *= hnorm
```
Convert numpy arrays to jax arrays.
```
h = jax.device_put(h)
x0 = jax.device_put(x0)
```
Set up sum-of-convolutions forward operator.
```
C = CircularConvolve(h, input_shape=x0.shape, ndims=2)
S = Sum(input_shape=C.output_shape, axis=0)
A = S @ C
```
Construct test image from dictionary $\mathbf{h}$ and coefficient maps
$\mathbf{x}_0$.
```
y = A(x0)
```
Set functional and solver parameters.
```
λ = 1e0 # l1-l2 norm regularization parameter
ρ = 2e0 # ADMM penalty parameter
maxiter = 200 # number of ADMM iterations
```
Define loss function and regularization. Note the use of the
$\ell_1 - \ell_2$ norm, which has been found to provide slightly better
performance than the $\ell_1$ norm in this type of problem
<cite data-cite="wohlberg-2021-psf"/>.
```
f = SquaredL2Loss(y=y, A=A)
g0 = λ * L1MinusL2Norm()
C0 = Identity(input_shape=x0.shape)
```
Initialize ADMM solver.
```
solver = ADMM(
f=f,
g_list=[g0],
C_list=[C0],
rho_list=[ρ],
alpha=1.8,
maxiter=maxiter,
subproblem_solver=FBlockCircularConvolveSolver(check_solve=True),
itstat_options={"display": True, "period": 10},
)
```
Run the solver.
```
print(f"Solving on {device_info()}\n")
x1 = solver.solve()
hist = solver.itstat_object.history(transpose=True)
```
Show the recovered coefficient maps.
```
fig, ax = plot.subplots(nrows=2, ncols=3, figsize=(12, 8.6))
plot.imview(x0[0], title="Coef. map 0", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0])
ax[0, 0].set_ylabel("Ground truth")
plot.imview(x0[1], title="Coef. map 1", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(x0[2], title="Coef. map 2", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 2])
plot.imview(x1[0], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 0])
ax[1, 0].set_ylabel("Recovered")
plot.imview(x1[1], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 1])
plot.imview(x1[2], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 2])
fig.tight_layout()
fig.show()
```
Show test image and reconstruction from recovered coefficient maps.
```
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 6))
plot.imview(y, title="Test image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[0])
plot.imview(A(x1), title="Reconstructed image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[1])
fig.show()
```
Plot convergence statistics.
```
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
```
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/sparsecode_conv_admm.ipynb | 0.719581 | 0.970882 | sparsecode_conv_admm.ipynb | pypi |
Basis Pursuit DeNoising (APGM)
==============================
This example demonstrates the solution of the the sparse coding problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - D \mathbf{x}
\|_2^2 + \lambda \| \mathbf{x} \|_1\;,$$
where $D$ the dictionary, $\mathbf{y}$ the signal to be represented,
and $\mathbf{x}$ is the sparse representation.
```
import numpy as np
import jax
from scico import functional, linop, loss, plot
from scico.optimize.pgm import AcceleratedPGM
from scico.util import device_info
plot.config_notebook_plotting()
```
Construct a random dictionary, a reference random sparse
representation, and a test signal consisting of the synthesis of the
reference sparse representation.
```
m = 512 # Signal size
n = 4 * m # Dictionary size
s = 32 # Sparsity level (number of non-zeros)
σ = 0.5 # Noise level
np.random.seed(12345)
D = np.random.randn(m, n)
L0 = np.linalg.norm(D, 2) ** 2
x_gt = np.zeros(n) # true signal
idx = np.random.permutation(list(range(0, n - 1)))
x_gt[idx[0:s]] = np.random.randn(s)
y = D @ x_gt + σ * np.random.randn(m) # synthetic signal
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
y = jax.device_put(y) # convert to jax array, push to GPU
```
Set up the forward operator and AcceleratedPGM solver object.
```
maxiter = 100
λ = 2.98e1
A = linop.MatrixOperator(D)
f = loss.SquaredL2Loss(y=y, A=A)
g = λ * functional.L1Norm()
solver = AcceleratedPGM(
f=f, g=g, L0=L0, x0=A.adj(y), maxiter=maxiter, itstat_options={"display": True, "period": 10}
)
```
Run the solver.
```
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
```
Plot the recovered coefficients and convergence statistics.
```
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((x_gt, x)).T,
title="Coefficients",
lgnd=("Ground Truth", "Recovered"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((hist.Objective, hist.Residual)).T,
ptyp="semilogy",
title="Convergence",
xlbl="Iteration",
lgnd=("Objective", "Residual"),
fig=fig,
ax=ax[1],
)
fig.show()
```
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/sparsecode_pgm.ipynb | 0.659734 | 0.971402 | sparsecode_pgm.ipynb | pypi |
Training of DnCNN for Denoising
===============================
This example demonstrates the training and application of the DnCNN model
from <cite data-cite="zhang-2017-dncnn"/> to denoise images that have been corrupted
plot.config_notebook_plotting()
with additive Gaussian noise.
```
import os
from time import time
import numpy as np
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scico import flax as sflax
from scico import metric, plot
from scico.flax.examples import load_image_data
```
Prepare parallel processing. Set an arbitrary processor count (only
applies if GPU is not available).
```
os.environ["XLA_FLAGS"] = "--xla_force_host_platform_device_count=8"
platform = jax.lib.xla_bridge.get_backend().platform
print("Platform: ", platform)
```
Read data from cache or generate if not available.
```
size = 40 # patch size
train_nimg = 400 # number of training images
test_nimg = 16 # number of testing images
nimg = train_nimg + test_nimg
gray = True # use gray scale images
data_mode = "dn" # Denoising problem
noise_level = 0.1 # Standard deviation of noise
noise_range = False # Use fixed noise level
stride = 23 # Stride to sample multiple patches from each image
train_ds, test_ds = load_image_data(
train_nimg,
test_nimg,
size,
gray,
data_mode,
verbose=True,
noise_level=noise_level,
noise_range=noise_range,
stride=stride,
)
```
Define configuration dictionary for model and training loop.
Parameters have been selected for demonstration purposes and relatively
short training. The depth of the model has been reduced to 6, instead of
the 17 of the original model. The suggested settings can be found in the
original paper.
```
# model configuration
model_conf = {
"depth": 6,
"num_filters": 64,
}
# training configuration
train_conf: sflax.ConfigDict = {
"seed": 0,
"opt_type": "ADAM",
"batch_size": 128,
"num_epochs": 50,
"base_learning_rate": 1e-3,
"warmup_epochs": 0,
"log_every_steps": 5000,
"log": True,
}
```
Construct DnCNN model.
```
channels = train_ds["image"].shape[-1]
model = sflax.DnCNNNet(
depth=model_conf["depth"],
channels=channels,
num_filters=model_conf["num_filters"],
)
```
Run training loop.
```
workdir = os.path.join(os.path.expanduser("~"), ".cache", "scico", "examples", "dncnn_out")
train_conf["workdir"] = workdir
print(f"{'JAX process: '}{jax.process_index()}{' / '}{jax.process_count()}")
print(f"{'JAX local devices: '}{jax.local_devices()}")
trainer = sflax.BasicFlaxTrainer(
train_conf,
model,
train_ds,
test_ds,
)
start_time = time()
modvar, stats_object = trainer.train()
time_train = time() - start_time
```
Evaluate on testing data.
```
test_patches = 720
start_time = time()
fmap = sflax.FlaxMap(model, modvar)
output = fmap(test_ds["image"][:test_patches])
time_eval = time() - start_time
output = np.clip(output, a_min=0, a_max=1.0)
```
Compare trained model in terms of reconstruction time and data fidelity.
```
snr_eval = metric.snr(test_ds["label"][:test_patches], output)
psnr_eval = metric.psnr(test_ds["label"][:test_patches], output)
print(
f"{'DnCNNNet training':18s}{'epochs:':2s}{train_conf['num_epochs']:>5d}"
f"{'':21s}{'time[s]:':10s}{time_train:>7.2f}"
)
print(
f"{'DnCNNNet testing':18s}{'SNR:':5s}{snr_eval:>5.2f}{' dB'}{'':3s}"
f"{'PSNR:':6s}{psnr_eval:>5.2f}{' dB'}{'':3s}{'time[s]:':10s}{time_eval:>7.2f}"
)
```
Plot comparison. Note that patches have small sizes, thus, plots may
correspond to unidentifiable fragments.
```
np.random.seed(123)
indx = np.random.randint(0, high=test_patches)
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(test_ds["label"][indx, ..., 0], title="Ground truth", cbar=None, fig=fig, ax=ax[0])
plot.imview(
test_ds["image"][indx, ..., 0],
title="Noisy: \nSNR: %.2f (dB), PSNR: %.2f"
% (
metric.snr(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
metric.psnr(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
),
cbar=None,
fig=fig,
ax=ax[1],
)
plot.imview(
output[indx, ..., 0],
title="DnCNNNet Reconstruction\nSNR: %.2f (dB), PSNR: %.2f"
% (
metric.snr(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
metric.psnr(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
),
fig=fig,
ax=ax[2],
)
divider = make_axes_locatable(ax[2])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[2].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
```
Plot convergence statistics. Statistics only generated if a training
cycle was done (i.e. not reading final epoch results from checkpoint).
```
if stats_object is not None:
hist = stats_object.history(transpose=True)
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((hist.Train_Loss, hist.Eval_Loss)).T,
x=hist.Epoch,
ptyp="semilogy",
title="Loss function",
xlbl="Epoch",
ylbl="Loss value",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((hist.Train_SNR, hist.Eval_SNR)).T,
x=hist.Epoch,
title="Metric",
xlbl="Epoch",
ylbl="SNR (dB)",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[1],
)
fig.show()
```
| /scico-0.0.4.tar.gz/scico-0.0.4/docs/source/examples/denoise_dncnn_train_bsds.ipynb | 0.657428 | 0.894329 | denoise_dncnn_train_bsds.ipynb | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.