repo_name
stringlengths 7
90
| path
stringlengths 5
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 976
581k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ghorn/rawesome | studies/pendulum_collocation.py | 2 | 5071 | # Copyright 2012-2013 Greg Horn
#
# This file is part of rawesome.
#
# rawesome is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rawesome is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with rawesome. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import zmq
import numpy
from numpy import pi
import copy
import casadi as C
import rawe
def main():
nk = 50
print "creating model"
dae = rawe.models.pendulum2()
dae.addP('endTime')
print "setting up OCP"
ocp = rawe.collocation.Coll(dae, nk=nk,nicp=1,deg=4, collPoly='RADAU')
print "setting up collocation"
ocp.setupCollocation( ocp.lookup('endTime') )
# constrain invariants
ocp.constrain(ocp.lookup('c',timestep=0),'==',0)
ocp.constrain(ocp.lookup('cdot',timestep=0),'==',0)
# bounds
r = 0.3
ocp.bound('x',(-2*r,2*r))
ocp.bound('z',(-2*r,0.01*r))
ocp.bound('dx',(-5,5))
ocp.bound('dz',(-5,5))
ocp.bound('torque',(-50,50))
ocp.bound('m',(0.3,0.3))
ocp.bound('endTime',(0.1,3.5))
# boundary conditions
ocp.bound('x',(r,r),timestep=0)
ocp.bound('z',(0,0),timestep=0)
ocp.bound('x',(0,0),timestep=-1)
ocp.bound('z',(-10*r,0.01*r),timestep=-1)
ocp.bound('dx',(0,0),timestep=0)
ocp.bound('dz',(0,0),timestep=0)
ocp.bound('dx',(0,0),timestep=-1)
ocp.bound('dz',(-0.5,0.5),timestep=-1)
# make the solver
obj = 0
for k in range(ocp.nk):
t = ocp.lookup('torque',timestep=k)
obj += t*t
ocp.setObjective(ocp.lookup('endTime') + 1e-6*obj/float(nk))
# ocp.setObjective(1e-6*obj/float(nk))
context = zmq.Context(1)
publisher = context.socket(zmq.PUB)
publisher.bind("tcp://*:5563")
# callback function
class MyCallback:
def __init__(self):
self.iter = 0
def __call__(self,f,*args):
xOpt = numpy.array(f.input(C.NLP_X_OPT))
self.iter = self.iter + 1
traj = rawe.collocation.trajectory.Trajectory(ocp,xOpt)
po = rawe.kite_pb2.PendulumOpt()
po.x.extend(list([traj.lookup('x',timestep=k) for k in range(ocp.nk+1)]))
po.z.extend(list([traj.lookup('z',timestep=k) for k in range(ocp.nk+1)]))
po.messages.append('endTime: %.3f'% traj.lookup('endTime'))
po.messages.append('mass: %.3f'% traj.lookup('m'))
po.messages.append('iters: %d' % self.iter)
publisher.send_multipart(["pendulum-opt", po.SerializeToString()])
# solver
solverOptions = [ ("linear_solver","ma57")
# , ("derivative_test","first-order")
, ("expand_f",True)
, ("expand_g",True)
, ("generate_hessian",True)
, ("max_iter",10000)
, ("tol",1e-8)
]
# solverOptions = [ ("Timeout", 1e6),
# ("UserHM", True)]
# ("ScaleConIter",True),
# ("ScaledFD",True),
# ("ScaledKKT",True),
# ("ScaledObj",True),
# ("ScaledQP",True)]
constraintFunOptions = [('numeric_jacobian',False)]
# initial conditions
endTime = 0.3
xOld = r
zOld = 0
dt0 = endTime/nk
for k in range(nk+1):
theta = float(k)/nk*C.pi/8.0
x = r*C.cos(theta)
z = -r*C.sin(theta)
ocp.guess('x',x,timestep=k)
ocp.guess('z',z,timestep=k)
ocp.guess('dx',(x-xOld)/dt0,timestep=k)
ocp.guess('dz',(z-zOld)/dt0,timestep=k)
xOld = x
zOld = z
for k in range(ocp.nk):
if k < ocp.nk:
ocp.guess('torque',0,timestep=k)
else:
ocp.guess('torque',-0,timestep=k)
ocp.guess('m',0.3)
ocp.guess('endTime',endTime)
## ocp.guess('dx',0)
## ocp.guess('dz',0)
# ocp.guessX([r,0,0,0])
# ocp.guessX([0,-r,0,0])
# ocp.guess('torque',0)
# ocp.interpolateInitialGuess("data/pendulum_opt.dat",force=True,quiet=True)
print "setting up solver"
ocp.setupSolver( solverOpts=solverOptions,
constraintFunOpts=constraintFunOptions,
callback=MyCallback() )
print "solving"
traj = ocp.solve()
print "endTime: "+str(traj.lookup('endTime'))
print "mass: "+str(traj.lookup('m'))
print "saving optimal trajectory"
traj.save("data/pendulum_opt.dat")
# Plot the results
traj.subplot([['x','z'],['dx','dz']])
traj.plot('torque')
traj.plot('tau')
plt.show()
if __name__=='__main__':
main()
| lgpl-3.0 |
mne-tools/mne-python | mne/io/fiff/tests/test_raw_fiff.py | 4 | 72663 | # -*- coding: utf-8 -*-
# Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
from copy import deepcopy
from functools import partial
from io import BytesIO
import os
import os.path as op
import pathlib
import pickle
import shutil
import sys
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose)
import pytest
from mne.datasets import testing
from mne.filter import filter_data
from mne.io.constants import FIFF
from mne.io import RawArray, concatenate_raws, read_raw_fif, base
from mne.io.open import read_tag, read_tag_info
from mne.io.tag import _read_tag_header
from mne.io.tests.test_raw import _test_concat, _test_raw_reader
from mne import (concatenate_events, find_events, equalize_channels,
compute_proj_raw, pick_types, pick_channels, create_info,
pick_info)
from mne.utils import (requires_pandas, assert_object_equal, _dt_to_stamp,
requires_mne, run_subprocess,
assert_and_remove_boundary_annot)
from mne.annotations import Annotations
testing_path = testing.data_path(download=False)
data_dir = op.join(testing_path, 'MEG', 'sample')
fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
ms_fname = op.join(testing_path, 'SSS', 'test_move_anon_raw.fif')
skip_fname = op.join(testing_path, 'misc', 'intervalrecording_raw.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
test_fif_fname = op.join(base_dir, 'test_raw.fif')
test_fif_gz_fname = op.join(base_dir, 'test_raw.fif.gz')
ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
fif_bad_marked_fname = op.join(base_dir, 'test_withbads_raw.fif')
bad_file_works = op.join(base_dir, 'test_bads.txt')
bad_file_wrong = op.join(base_dir, 'test_wrong_bads.txt')
hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
@testing.requires_testing_data
def test_acq_skip(tmpdir):
"""Test treatment of acquisition skips."""
raw = read_raw_fif(skip_fname, preload=True)
picks = [1, 2, 10]
assert len(raw.times) == 17000
annotations = raw.annotations
assert len(annotations) == 3 # there are 3 skips
assert_allclose(annotations.onset, [14, 19, 23])
assert_allclose(annotations.duration, [2., 2., 3.]) # inclusive!
data, times = raw.get_data(
picks, reject_by_annotation='omit', return_times=True)
expected_data, expected_times = zip(raw[picks, :2000],
raw[picks, 4000:7000],
raw[picks, 9000:11000],
raw[picks, 14000:17000])
expected_times = np.concatenate(list(expected_times), axis=-1)
assert_allclose(times, expected_times)
expected_data = list(expected_data)
assert_allclose(data, np.concatenate(expected_data, axis=-1), atol=1e-22)
# Check that acquisition skips are handled properly in filtering
kwargs = dict(l_freq=None, h_freq=50., fir_design='firwin')
raw_filt = raw.copy().filter(picks=picks, **kwargs)
for data in expected_data:
filter_data(data, raw.info['sfreq'], copy=False, **kwargs)
data = raw_filt.get_data(picks, reject_by_annotation='omit')
assert_allclose(data, np.concatenate(expected_data, axis=-1), atol=1e-22)
# Check that acquisition skips are handled properly during I/O
fname = tmpdir.join('test_raw.fif')
raw.save(fname, fmt=raw.orig_format)
# first: file size should not increase much (orig data is missing
# 7 of 17 buffers, so if we write them out it should increase the file
# size quite a bit.
orig_size = op.getsize(skip_fname)
new_size = op.getsize(fname)
max_size = int(1.05 * orig_size) # almost the same + annotations
assert new_size < max_size, (new_size, max_size)
raw_read = read_raw_fif(fname)
assert raw_read.annotations is not None
assert_allclose(raw.times, raw_read.times)
assert_allclose(raw_read[:][0], raw[:][0], atol=1e-17)
# Saving with a bad buffer length emits warning
raw.pick_channels(raw.ch_names[:2])
with pytest.warns(None) as w:
raw.save(fname, buffer_size_sec=0.5, overwrite=True)
assert len(w) == 0
with pytest.warns(RuntimeWarning, match='did not fit evenly'):
raw.save(fname, buffer_size_sec=2., overwrite=True)
def test_fix_types():
"""Test fixing of channel types."""
for fname, change in ((hp_fif_fname, True), (test_fif_fname, False),
(ctf_fname, False)):
raw = read_raw_fif(fname)
mag_picks = pick_types(raw.info, meg='mag')
other_picks = np.setdiff1d(np.arange(len(raw.ch_names)), mag_picks)
# we don't actually have any files suffering from this problem, so
# fake it
if change:
for ii in mag_picks:
raw.info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T2
orig_types = np.array([ch['coil_type'] for ch in raw.info['chs']])
raw.fix_mag_coil_types()
new_types = np.array([ch['coil_type'] for ch in raw.info['chs']])
if not change:
assert_array_equal(orig_types, new_types)
else:
assert_array_equal(orig_types[other_picks], new_types[other_picks])
assert ((orig_types[mag_picks] != new_types[mag_picks]).all())
assert ((new_types[mag_picks] ==
FIFF.FIFFV_COIL_VV_MAG_T3).all())
def test_concat(tmpdir):
"""Test RawFIF concatenation."""
# we trim the file to save lots of memory and some time
raw = read_raw_fif(test_fif_fname)
raw.crop(0, 2.)
test_name = tmpdir.join('test_raw.fif')
raw.save(test_name)
# now run the standard test
_test_concat(partial(read_raw_fif), test_name)
@testing.requires_testing_data
def test_hash_raw():
"""Test hashing raw objects."""
raw = read_raw_fif(fif_fname)
pytest.raises(RuntimeError, raw.__hash__)
raw = read_raw_fif(fif_fname).crop(0, 0.5)
raw_size = raw._size
raw.load_data()
raw_load_size = raw._size
assert (raw_size < raw_load_size)
raw_2 = read_raw_fif(fif_fname).crop(0, 0.5)
raw_2.load_data()
assert hash(raw) == hash(raw_2)
# do NOT use assert_equal here, failing output is terrible
assert pickle.dumps(raw) == pickle.dumps(raw_2)
raw_2._data[0, 0] -= 1
assert hash(raw) != hash(raw_2)
@testing.requires_testing_data
def test_maxshield():
"""Test maxshield warning."""
with pytest.warns(RuntimeWarning, match='Internal Active Shielding') as w:
read_raw_fif(ms_fname, allow_maxshield=True)
assert ('test_raw_fiff.py' in w[0].filename)
@testing.requires_testing_data
def test_subject_info(tmpdir):
"""Test reading subject information."""
raw = read_raw_fif(fif_fname).crop(0, 1)
assert (raw.info['subject_info'] is None)
# fake some subject data
keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex',
'hand']
vals = [1, 'foobar', 'bar', 'foo', (1901, 2, 3), 0, 1]
subject_info = dict()
for key, val in zip(keys, vals):
subject_info[key] = val
raw.info['subject_info'] = subject_info
out_fname = tmpdir.join('test_subj_info_raw.fif')
raw.save(out_fname, overwrite=True)
raw_read = read_raw_fif(out_fname)
for key in keys:
assert subject_info[key] == raw_read.info['subject_info'][key]
assert raw.info['meas_date'] == raw_read.info['meas_date']
for key in ['secs', 'usecs', 'version']:
assert raw.info['meas_id'][key] == raw_read.info['meas_id'][key]
assert_array_equal(raw.info['meas_id']['machid'],
raw_read.info['meas_id']['machid'])
@testing.requires_testing_data
def test_copy_append():
"""Test raw copying and appending combinations."""
raw = read_raw_fif(fif_fname, preload=True).copy()
raw_full = read_raw_fif(fif_fname)
raw_full.append(raw)
data = raw_full[:, :][0]
assert data.shape[1] == 2 * raw._data.shape[1]
@testing.requires_testing_data
def test_output_formats(tmpdir):
"""Test saving and loading raw data using multiple formats."""
formats = ['short', 'int', 'single', 'double']
tols = [1e-4, 1e-7, 1e-7, 1e-15]
# let's fake a raw file with different formats
raw = read_raw_fif(test_fif_fname).crop(0, 1)
temp_file = tmpdir.join('raw.fif')
for ii, (fmt, tol) in enumerate(zip(formats, tols)):
# Let's test the overwriting error throwing while we're at it
if ii > 0:
pytest.raises(IOError, raw.save, temp_file, fmt=fmt)
raw.save(temp_file, fmt=fmt, overwrite=True)
raw2 = read_raw_fif(temp_file)
raw2_data = raw2[:, :][0]
assert_allclose(raw2_data, raw[:, :][0], rtol=tol, atol=1e-25)
assert raw2.orig_format == fmt
def _compare_combo(raw, new, times, n_times):
"""Compare data."""
for ti in times: # let's do a subset of points for speed
orig = raw[:, ti % n_times][0]
# these are almost_equals because of possible dtype differences
assert_allclose(orig, new[:, ti][0])
@pytest.mark.slowtest
@testing.requires_testing_data
def test_multiple_files(tmpdir):
"""Test loading multiple files simultaneously."""
# split file
raw = read_raw_fif(fif_fname).crop(0, 10)
raw.load_data()
raw.load_data() # test no operation
split_size = 3. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp)
tmins = np.round(np.arange(0., nsamp, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp]))
tmaxs /= sfreq
tmins /= sfreq
assert raw.n_times == len(raw.times)
# going in reverse order so the last fname is the first file (need later)
raws = [None] * len(tmins)
for ri in range(len(tmins) - 1, -1, -1):
fname = tmpdir.join('test_raw_split-%d_raw.fif' % ri)
raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri])
raws[ri] = read_raw_fif(fname)
assert (len(raws[ri].times) ==
int(round((tmaxs[ri] - tmins[ri]) *
raw.info['sfreq'])) + 1) # + 1 b/c inclusive
events = [find_events(r, stim_channel='STI 014') for r in raws]
last_samps = [r.last_samp for r in raws]
first_samps = [r.first_samp for r in raws]
# test concatenation of split file
pytest.raises(ValueError, concatenate_raws, raws, True, events[1:])
all_raw_1, events1 = concatenate_raws(raws, preload=False,
events_list=events)
assert_allclose(all_raw_1.times, raw.times)
assert raw.first_samp == all_raw_1.first_samp
assert raw.last_samp == all_raw_1.last_samp
assert_allclose(raw[:, :][0], all_raw_1[:, :][0])
raws[0] = read_raw_fif(fname)
all_raw_2 = concatenate_raws(raws, preload=True)
assert_allclose(raw[:, :][0], all_raw_2[:, :][0])
# test proper event treatment for split files
events2 = concatenate_events(events, first_samps, last_samps)
events3 = find_events(all_raw_2, stim_channel='STI 014')
assert_array_equal(events1, events2)
assert_array_equal(events1, events3)
# test various methods of combining files
raw = read_raw_fif(fif_fname, preload=True)
n_times = raw.n_times
# make sure that all our data match
times = list(range(0, 2 * n_times, 999))
# add potentially problematic points
times.extend([n_times - 1, n_times, 2 * n_times - 1])
raw_combo0 = concatenate_raws([read_raw_fif(f)
for f in [fif_fname, fif_fname]],
preload=True)
_compare_combo(raw, raw_combo0, times, n_times)
raw_combo = concatenate_raws([read_raw_fif(f)
for f in [fif_fname, fif_fname]],
preload=False)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([read_raw_fif(f)
for f in [fif_fname, fif_fname]],
preload='memmap8.dat')
_compare_combo(raw, raw_combo, times, n_times)
assert raw[:, :][0].shape[1] * 2 == raw_combo0[:, :][0].shape[1]
assert raw_combo0[:, :][0].shape[1] == raw_combo0.n_times
# with all data preloaded, result should be preloaded
raw_combo = read_raw_fif(fif_fname, preload=True)
raw_combo.append(read_raw_fif(fif_fname, preload=True))
assert (raw_combo.preload is True)
assert raw_combo.n_times == raw_combo._data.shape[1]
_compare_combo(raw, raw_combo, times, n_times)
# with any data not preloaded, don't set result as preloaded
raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=True),
read_raw_fif(fif_fname, preload=False)])
assert (raw_combo.preload is False)
assert_array_equal(find_events(raw_combo, stim_channel='STI 014'),
find_events(raw_combo0, stim_channel='STI 014'))
_compare_combo(raw, raw_combo, times, n_times)
# user should be able to force data to be preloaded upon concat
raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=False),
read_raw_fif(fif_fname, preload=True)],
preload=True)
assert (raw_combo.preload is True)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([read_raw_fif(fif_fname, preload=False),
read_raw_fif(fif_fname, preload=True)],
preload='memmap3.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([
read_raw_fif(fif_fname, preload=True),
read_raw_fif(fif_fname, preload=True)], preload='memmap4.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([
read_raw_fif(fif_fname, preload=False),
read_raw_fif(fif_fname, preload=False)], preload='memmap5.dat')
_compare_combo(raw, raw_combo, times, n_times)
# verify that combining raws with different projectors throws an exception
raw.add_proj([], remove_existing=True)
pytest.raises(ValueError, raw.append,
read_raw_fif(fif_fname, preload=True))
# now test event treatment for concatenated raw files
events = [find_events(raw, stim_channel='STI 014'),
find_events(raw, stim_channel='STI 014')]
last_samps = [raw.last_samp, raw.last_samp]
first_samps = [raw.first_samp, raw.first_samp]
events = concatenate_events(events, first_samps, last_samps)
events2 = find_events(raw_combo0, stim_channel='STI 014')
assert_array_equal(events, events2)
# check out the len method
assert len(raw) == raw.n_times
assert len(raw) == raw.last_samp - raw.first_samp + 1
@testing.requires_testing_data
@pytest.mark.parametrize('on_mismatch', ('ignore', 'warn', 'raise'))
def test_concatenate_raws(on_mismatch):
"""Test error handling during raw concatenation."""
raw = read_raw_fif(fif_fname).crop(0, 10)
raws = [raw, raw.copy()]
raws[1].info['dev_head_t']['trans'] += 0.1
kws = dict(raws=raws, on_mismatch=on_mismatch)
if on_mismatch == 'ignore':
concatenate_raws(**kws)
elif on_mismatch == 'warn':
with pytest.warns(RuntimeWarning, match='different head positions'):
concatenate_raws(**kws)
elif on_mismatch == 'raise':
with pytest.raises(ValueError, match='different head positions'):
concatenate_raws(**kws)
@testing.requires_testing_data
@pytest.mark.parametrize('mod', (
'meg',
pytest.param('raw', marks=[pytest.mark.filterwarnings(
'ignore:.*naming conventions.*:RuntimeWarning')]),
))
def test_split_files(tmpdir, mod, monkeypatch):
"""Test writing and reading of split raw files."""
raw_1 = read_raw_fif(fif_fname, preload=True)
# Test a very close corner case
raw_crop = raw_1.copy().crop(0, 1.)
assert_allclose(raw_1.buffer_size_sec, 10., atol=1e-2) # samp rate
split_fname = tmpdir.join(f'split_raw_{mod}.fif')
# intended filenames
split_fname_elekta_part2 = tmpdir.join(f'split_raw_{mod}-1.fif')
split_fname_bids_part1 = tmpdir.join(f'split_raw_split-01_{mod}.fif')
split_fname_bids_part2 = tmpdir.join(f'split_raw_split-02_{mod}.fif')
raw_1.set_annotations(Annotations([2.], [5.5], 'test'))
# Check that if BIDS is used and no split is needed it defaults to
# simple writing without _split- entity.
raw_1.save(split_fname, split_naming='bids', verbose=True)
assert op.isfile(split_fname)
assert not op.isfile(split_fname_bids_part1)
for split_naming in ('neuromag', 'bids'):
with pytest.raises(FileExistsError, match='Destination file'):
raw_1.save(split_fname, split_naming=split_naming, verbose=True)
os.remove(split_fname)
with open(split_fname_bids_part1, 'w'):
pass
with pytest.raises(FileExistsError, match='Destination file'):
raw_1.save(split_fname, split_naming='bids', verbose=True)
assert not op.isfile(split_fname)
raw_1.save(split_fname, split_naming='neuromag', verbose=True) # okay
os.remove(split_fname)
os.remove(split_fname_bids_part1)
raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB',
verbose=True)
# check that the filenames match the intended pattern
assert op.isfile(split_fname)
assert op.isfile(split_fname_elekta_part2)
# check that filenames are being formatted correctly for BIDS
raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB',
split_naming='bids', overwrite=True, verbose=True)
assert op.isfile(split_fname_bids_part1)
assert op.isfile(split_fname_bids_part2)
annot = Annotations(np.arange(20), np.ones((20,)), 'test')
raw_1.set_annotations(annot)
split_fname = op.join(tmpdir, 'split_raw.fif')
raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB')
raw_2 = read_raw_fif(split_fname)
assert_allclose(raw_2.buffer_size_sec, 1., atol=1e-2) # samp rate
assert_allclose(raw_1.annotations.onset, raw_2.annotations.onset)
assert_allclose(raw_1.annotations.duration, raw_2.annotations.duration,
rtol=0.001 / raw_2.info['sfreq'])
assert_array_equal(raw_1.annotations.description,
raw_2.annotations.description)
data_1, times_1 = raw_1[:, :]
data_2, times_2 = raw_2[:, :]
assert_array_equal(data_1, data_2)
assert_array_equal(times_1, times_2)
raw_bids = read_raw_fif(split_fname_bids_part1)
data_bids, times_bids = raw_bids[:, :]
assert_array_equal(data_1, data_bids)
assert_array_equal(times_1, times_bids)
del raw_bids
# split missing behaviors
os.remove(split_fname_bids_part2)
with pytest.raises(ValueError, match='manually renamed'):
read_raw_fif(split_fname_bids_part1, on_split_missing='raise')
with pytest.warns(RuntimeWarning, match='Split raw file detected'):
read_raw_fif(split_fname_bids_part1, on_split_missing='warn')
read_raw_fif(split_fname_bids_part1, on_split_missing='ignore')
# test the case where we only end up with one buffer to write
# (GH#3210). These tests rely on writing meas info and annotations
# taking up a certain number of bytes, so if we change those functions
# somehow, the numbers below for e.g. split_size might need to be
# adjusted.
raw_crop = raw_1.copy().crop(0, 5)
raw_crop.set_annotations(Annotations([2.], [5.5], 'test'),
emit_warning=False)
with pytest.raises(ValueError,
match='after writing measurement information'):
raw_crop.save(split_fname, split_size='1MB', # too small a size
buffer_size_sec=1., overwrite=True)
with pytest.raises(ValueError,
match='too large for the given split size'):
raw_crop.save(split_fname,
split_size=3003000, # still too small, now after Info
buffer_size_sec=1., overwrite=True)
# just barely big enough here; the right size to write exactly one buffer
# at a time so we hit GH#3210 if we aren't careful
raw_crop.save(split_fname, split_size='4.5MB',
buffer_size_sec=1., overwrite=True)
raw_read = read_raw_fif(split_fname)
assert_allclose(raw_crop[:][0], raw_read[:][0], atol=1e-20)
# Check our buffer arithmetic
# 1 buffer required
raw_crop = raw_1.copy().crop(0, 1)
raw_crop.save(split_fname, buffer_size_sec=1., overwrite=True)
raw_read = read_raw_fif(split_fname)
assert_array_equal(np.diff(raw_read._raw_extras[0]['bounds']), (301,))
assert_allclose(raw_crop[:][0], raw_read[:][0])
# 2 buffers required
raw_crop.save(split_fname, buffer_size_sec=0.5, overwrite=True)
raw_read = read_raw_fif(split_fname)
assert_array_equal(np.diff(raw_read._raw_extras[0]['bounds']), (151, 150))
assert_allclose(raw_crop[:][0], raw_read[:][0])
# 2 buffers required
raw_crop.save(split_fname,
buffer_size_sec=1. - 1.01 / raw_crop.info['sfreq'],
overwrite=True)
raw_read = read_raw_fif(split_fname)
assert_array_equal(np.diff(raw_read._raw_extras[0]['bounds']), (300, 1))
assert_allclose(raw_crop[:][0], raw_read[:][0])
raw_crop.save(split_fname,
buffer_size_sec=1. - 2.01 / raw_crop.info['sfreq'],
overwrite=True)
raw_read = read_raw_fif(split_fname)
assert_array_equal(np.diff(raw_read._raw_extras[0]['bounds']), (299, 2))
assert_allclose(raw_crop[:][0], raw_read[:][0])
# proper ending
assert op.isdir(tmpdir)
with pytest.raises(ValueError, match='must end with an underscore'):
raw_crop.save(
tmpdir.join('test.fif'), split_naming='bids', verbose='error')
# reserved file is deleted
fname = tmpdir.join('test_raw.fif')
monkeypatch.setattr(base, '_write_raw_fid', _err)
with pytest.raises(RuntimeError, match='Killed mid-write'):
raw_1.save(fname, split_size='10MB', split_naming='bids')
assert op.isfile(fname)
assert not op.isfile(tmpdir.join('test_split-01_raw.fif'))
def _err(*args, **kwargs):
raise RuntimeError('Killed mid-write')
def _no_write_file_name(fid, kind, data):
assert kind == FIFF.FIFF_REF_FILE_NAME # the only string we actually write
return
def test_split_numbers(tmpdir, monkeypatch):
"""Test handling of split files using numbers instead of names."""
monkeypatch.setattr(base, 'write_string', _no_write_file_name)
raw = read_raw_fif(test_fif_fname).pick('eeg')
# gh-8339
dashes_fname = tmpdir.join('sub-1_ses-2_task-3_raw.fif')
raw.save(dashes_fname, split_size='5MB',
buffer_size_sec=1.)
assert op.isfile(dashes_fname)
next_fname = str(dashes_fname)[:-4] + '-1.fif'
assert op.isfile(next_fname)
raw_read = read_raw_fif(dashes_fname)
assert_allclose(raw.times, raw_read.times)
assert_allclose(raw.get_data(), raw_read.get_data(), atol=1e-16)
def test_load_bad_channels(tmpdir):
"""Test reading/writing of bad channels."""
# Load correctly marked file (manually done in mne_process_raw)
raw_marked = read_raw_fif(fif_bad_marked_fname)
correct_bads = raw_marked.info['bads']
raw = read_raw_fif(test_fif_fname)
# Make sure it starts clean
assert_array_equal(raw.info['bads'], [])
# Test normal case
raw.load_bad_channels(bad_file_works)
# Write it out, read it in, and check
raw.save(tmpdir.join('foo_raw.fif'))
raw_new = read_raw_fif(tmpdir.join('foo_raw.fif'))
assert correct_bads == raw_new.info['bads']
# Reset it
raw.info['bads'] = []
# Test bad case
pytest.raises(ValueError, raw.load_bad_channels, bad_file_wrong)
# Test forcing the bad case
with pytest.warns(RuntimeWarning, match='1 bad channel'):
raw.load_bad_channels(bad_file_wrong, force=True)
# write it out, read it in, and check
raw.save(tmpdir.join('foo_raw.fif'), overwrite=True)
raw_new = read_raw_fif(tmpdir.join('foo_raw.fif'))
assert correct_bads == raw_new.info['bads']
# Check that bad channels are cleared
raw.load_bad_channels(None)
raw.save(tmpdir.join('foo_raw.fif'), overwrite=True)
raw_new = read_raw_fif(tmpdir.join('foo_raw.fif'))
assert raw_new.info['bads'] == []
@pytest.mark.slowtest
@testing.requires_testing_data
def test_io_raw(tmpdir):
"""Test IO for raw data (Neuromag)."""
rng = np.random.RandomState(0)
# test unicode io
for chars in [u'äöé', 'a']:
with read_raw_fif(fif_fname) as r:
assert ('Raw' in repr(r))
assert (op.basename(fif_fname) in repr(r))
r.info['description'] = chars
temp_file = tmpdir.join('raw.fif')
r.save(temp_file, overwrite=True)
with read_raw_fif(temp_file) as r2:
desc2 = r2.info['description']
assert desc2 == chars
# Let's construct a simple test for IO first
raw = read_raw_fif(fif_fname).crop(0, 3.5)
raw.load_data()
# put in some data that we know the values of
data = rng.randn(raw._data.shape[0], raw._data.shape[1])
raw._data[:, :] = data
# save it somewhere
fname = tmpdir.join('test_copy_raw.fif')
raw.save(fname, buffer_size_sec=1.0)
# read it in, make sure the whole thing matches
raw = read_raw_fif(fname)
assert_allclose(data, raw[:, :][0], rtol=1e-6, atol=1e-20)
# let's read portions across the 1-sec tag boundary, too
inds = raw.time_as_index([1.75, 2.25])
sl = slice(inds[0], inds[1])
assert_allclose(data[:, sl], raw[:, sl][0], rtol=1e-6, atol=1e-20)
@pytest.mark.parametrize('fname_in, fname_out', [
(test_fif_fname, 'raw.fif'),
(test_fif_gz_fname, 'raw.fif.gz'),
(ctf_fname, 'raw.fif')])
def test_io_raw_additional(fname_in, fname_out, tmpdir):
"""Test IO for raw data (Neuromag + CTF + gz)."""
fname_out = tmpdir.join(fname_out)
raw = read_raw_fif(fname_in).crop(0, 2)
nchan = raw.info['nchan']
ch_names = raw.info['ch_names']
meg_channels_idx = [k for k in range(nchan)
if ch_names[k][0] == 'M']
n_channels = 100
meg_channels_idx = meg_channels_idx[:n_channels]
start, stop = raw.time_as_index([0, 5], use_rounding=True)
data, times = raw[meg_channels_idx, start:(stop + 1)]
meg_ch_names = [ch_names[k] for k in meg_channels_idx]
# Set up pick list: MEG + STI 014 - bad channels
include = ['STI 014']
include += meg_ch_names
picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
misc=True, ref_meg=True, include=include,
exclude='bads')
# Writing with drop_small_buffer True
raw.save(fname_out, picks, tmin=0, tmax=4, buffer_size_sec=3,
drop_small_buffer=True, overwrite=True)
raw2 = read_raw_fif(fname_out)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert (times2.max() <= 3)
# Writing
raw.save(fname_out, picks, tmin=0, tmax=5, overwrite=True)
if fname_in in (fif_fname, fif_fname + '.gz'):
assert len(raw.info['dig']) == 146
raw2 = read_raw_fif(fname_out)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert_allclose(data, data2, rtol=1e-6, atol=1e-20)
assert_allclose(times, times2)
assert_allclose(raw.info['sfreq'], raw2.info['sfreq'], rtol=1e-5)
# check transformations
for trans in ['dev_head_t', 'dev_ctf_t', 'ctf_head_t']:
if raw.info[trans] is None:
assert (raw2.info[trans] is None)
else:
assert_array_equal(raw.info[trans]['trans'],
raw2.info[trans]['trans'])
# check transformation 'from' and 'to'
if trans.startswith('dev'):
from_id = FIFF.FIFFV_COORD_DEVICE
else:
from_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
if trans[4:8] == 'head':
to_id = FIFF.FIFFV_COORD_HEAD
else:
to_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
for raw_ in [raw, raw2]:
assert raw_.info[trans]['from'] == from_id
assert raw_.info[trans]['to'] == to_id
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r'])
# test warnings on bad filenames
raw_badname = tmpdir.join('test-bad-name.fif.gz')
with pytest.warns(RuntimeWarning, match='raw.fif'):
raw.save(raw_badname)
with pytest.warns(RuntimeWarning, match='raw.fif'):
read_raw_fif(raw_badname)
@testing.requires_testing_data
def test_io_complex(tmpdir):
"""Test IO with complex data types."""
rng = np.random.RandomState(0)
dtypes = [np.complex64, np.complex128]
raw = _test_raw_reader(partial(read_raw_fif),
fname=fif_fname)
picks = np.arange(5)
start, stop = raw.time_as_index([0, 5])
data_orig, _ = raw[picks, start:stop]
for di, dtype in enumerate(dtypes):
imag_rand = np.array(1j * rng.randn(data_orig.shape[0],
data_orig.shape[1]), dtype)
raw_cp = raw.copy()
raw_cp._data = np.array(raw_cp._data, dtype)
raw_cp._data[picks, start:stop] += imag_rand
with pytest.warns(RuntimeWarning, match='Saving .* complex data.'):
raw_cp.save(tmpdir.join('raw.fif'), picks, tmin=0, tmax=5,
overwrite=True)
raw2 = read_raw_fif(tmpdir.join('raw.fif'))
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
# with preloading
raw2 = read_raw_fif(tmpdir.join('raw.fif'), preload=True)
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
@testing.requires_testing_data
def test_getitem():
"""Test getitem/indexing of Raw."""
for preload in [False, True, 'memmap.dat']:
raw = read_raw_fif(fif_fname, preload=preload)
data, times = raw[0, :]
data1, times1 = raw[0]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data1, times1 = raw[[0, 1]]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
assert_array_equal(raw[raw.ch_names[0]][0][0], raw[0][0][0])
assert_array_equal(
raw[-10:-1, :][0],
raw[len(raw.ch_names) - 10:len(raw.ch_names) - 1, :][0])
with pytest.raises(ValueError, match='No appropriate channels'):
raw[slice(-len(raw.ch_names) - 1), slice(None)]
with pytest.raises(ValueError, match='must be'):
raw[-1000]
@testing.requires_testing_data
def test_proj(tmpdir):
"""Test SSP proj operations."""
for proj in [True, False]:
raw = read_raw_fif(fif_fname, preload=False)
if proj:
raw.apply_proj()
assert (all(p['active'] == proj for p in raw.info['projs']))
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
# test adding / deleting proj
if proj:
pytest.raises(ValueError, raw.add_proj, [],
{'remove_existing': True})
pytest.raises(ValueError, raw.del_proj, 0)
else:
projs = deepcopy(raw.info['projs'])
n_proj = len(raw.info['projs'])
raw.del_proj(0)
assert len(raw.info['projs']) == n_proj - 1
raw.add_proj(projs, remove_existing=False)
# Test that already existing projections are not added.
assert len(raw.info['projs']) == n_proj
raw.add_proj(projs[:-1], remove_existing=True)
assert len(raw.info['projs']) == n_proj - 1
# test apply_proj() with and without preload
for preload in [True, False]:
raw = read_raw_fif(fif_fname, preload=preload)
data, times = raw[:, 0:2]
raw.apply_proj()
data_proj_1 = np.dot(raw._projector, data)
# load the file again without proj
raw = read_raw_fif(fif_fname, preload=preload)
# write the file with proj. activated, make sure proj has been applied
raw.save(tmpdir.join('raw.fif'), proj=True, overwrite=True)
raw2 = read_raw_fif(tmpdir.join('raw.fif'))
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert (all(p['active'] for p in raw2.info['projs']))
# read orig file with proj. active
raw2 = read_raw_fif(fif_fname, preload=preload)
raw2.apply_proj()
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert (all(p['active'] for p in raw2.info['projs']))
# test that apply_proj works
raw.apply_proj()
data_proj_2, _ = raw[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_allclose(data_proj_2, np.dot(raw._projector, data_proj_2))
# Test that picking removes projectors ...
raw = read_raw_fif(fif_fname)
n_projs = len(raw.info['projs'])
raw.pick_types(meg=False, eeg=True)
assert len(raw.info['projs']) == n_projs - 3
# ... but only if it doesn't apply to any channels in the dataset anymore.
raw = read_raw_fif(fif_fname)
n_projs = len(raw.info['projs'])
raw.pick_types(meg='mag', eeg=True)
assert len(raw.info['projs']) == n_projs
# I/O roundtrip of an MEG projector with a Raw that only contains EEG
# data.
out_fname = tmpdir.join('test_raw.fif')
raw = read_raw_fif(test_fif_fname, preload=True).crop(0, 0.002)
proj = raw.info['projs'][-1]
raw.pick_types(meg=False, eeg=True)
raw.info['projs'] = [proj] # Restore, because picking removed it!
raw._data.fill(0)
raw._data[-1] = 1.
raw.save(out_fname)
raw = read_raw_fif(out_fname, preload=False)
raw.apply_proj()
assert_allclose(raw[:, :][0][:1], raw[0, :][0])
@testing.requires_testing_data
@pytest.mark.parametrize('preload', [False, True, 'memmap.dat'])
def test_preload_modify(preload, tmpdir):
"""Test preloading and modifying data."""
rng = np.random.RandomState(0)
raw = read_raw_fif(fif_fname, preload=preload)
nsamp = raw.last_samp - raw.first_samp + 1
picks = pick_types(raw.info, meg='grad', exclude='bads')
data = rng.randn(len(picks), nsamp // 2)
try:
raw[picks, :nsamp // 2] = data
except RuntimeError:
if not preload:
return
else:
raise
tmp_fname = tmpdir.join('raw.fif')
raw.save(tmp_fname, overwrite=True)
raw_new = read_raw_fif(tmp_fname)
data_new, _ = raw_new[picks, :nsamp // 2]
assert_allclose(data, data_new)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_filter():
"""Test filtering (FIR and IIR) and Raw.apply_function interface."""
raw = read_raw_fif(fif_fname).crop(0, 7)
raw.load_data()
sig_dec_notch = 12
sig_dec_notch_fit = 12
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
trans = 2.0
filter_params = dict(picks=picks, filter_length='auto',
h_trans_bandwidth=trans, l_trans_bandwidth=trans,
fir_design='firwin')
raw_lp = raw.copy().filter(None, 8.0, **filter_params)
raw_hp = raw.copy().filter(16.0, None, **filter_params)
raw_bp = raw.copy().filter(8.0 + trans, 16.0 - trans, **filter_params)
raw_bs = raw.copy().filter(16.0, 8.0, **filter_params)
data, _ = raw[picks, :]
lp_data, _ = raw_lp[picks, :]
hp_data, _ = raw_hp[picks, :]
bp_data, _ = raw_bp[picks, :]
bs_data, _ = raw_bs[picks, :]
tols = dict(atol=1e-20, rtol=1e-5)
assert_allclose(bs_data, lp_data + hp_data, **tols)
assert_allclose(data, lp_data + bp_data + hp_data, **tols)
assert_allclose(data, bp_data + bs_data, **tols)
filter_params_iir = dict(picks=picks, n_jobs=2, method='iir',
iir_params=dict(output='ba'))
raw_lp_iir = raw.copy().filter(None, 4.0, **filter_params_iir)
raw_hp_iir = raw.copy().filter(8.0, None, **filter_params_iir)
raw_bp_iir = raw.copy().filter(4.0, 8.0, **filter_params_iir)
del filter_params_iir
lp_data_iir, _ = raw_lp_iir[picks, :]
hp_data_iir, _ = raw_hp_iir[picks, :]
bp_data_iir, _ = raw_bp_iir[picks, :]
summation = lp_data_iir + hp_data_iir + bp_data_iir
assert_array_almost_equal(data[:, 100:-100], summation[:, 100:-100], 11)
# make sure we didn't touch other channels
data, _ = raw[picks_meg[4:], :]
bp_data, _ = raw_bp[picks_meg[4:], :]
assert_array_equal(data, bp_data)
bp_data_iir, _ = raw_bp_iir[picks_meg[4:], :]
assert_array_equal(data, bp_data_iir)
# ... and that inplace changes are inplace
raw_copy = raw.copy()
assert np.may_share_memory(raw._data, raw._data)
assert not np.may_share_memory(raw_copy._data, raw._data)
# this could be assert_array_equal but we do this to mirror the call below
assert (raw._data[0] == raw_copy._data[0]).all()
raw_copy.filter(None, 20., n_jobs=2, **filter_params)
assert not (raw._data[0] == raw_copy._data[0]).all()
assert_array_equal(raw.copy().filter(None, 20., **filter_params)._data,
raw_copy._data)
# do a very simple check on line filtering
raw_bs = raw.copy().filter(60.0 + trans, 60.0 - trans, **filter_params)
data_bs, _ = raw_bs[picks, :]
raw_notch = raw.copy().notch_filter(
60.0, picks=picks, n_jobs=2, method='fir',
trans_bandwidth=2 * trans)
data_notch, _ = raw_notch[picks, :]
assert_array_almost_equal(data_bs, data_notch, sig_dec_notch)
# now use the sinusoidal fitting
assert raw.times[-1] < 10 # catch error with filter_length > n_times
raw_notch = raw.copy().notch_filter(
None, picks=picks, n_jobs=2, method='spectrum_fit',
filter_length='10s')
data_notch, _ = raw_notch[picks, :]
data, _ = raw[picks, :]
assert_array_almost_equal(data, data_notch, sig_dec_notch_fit)
# filter should set the "lowpass" and "highpass" parameters
raw = RawArray(np.random.randn(3, 1000),
create_info(3, 1000., ['eeg'] * 2 + ['stim']))
raw.info['lowpass'] = raw.info['highpass'] = None
for kind in ('none', 'lowpass', 'highpass', 'bandpass', 'bandstop'):
print(kind)
h_freq = l_freq = None
if kind in ('lowpass', 'bandpass'):
h_freq = 70
if kind in ('highpass', 'bandpass'):
l_freq = 30
if kind == 'bandstop':
l_freq, h_freq = 70, 30
assert (raw.info['lowpass'] is None)
assert (raw.info['highpass'] is None)
kwargs = dict(l_trans_bandwidth=20, h_trans_bandwidth=20,
filter_length='auto', phase='zero', fir_design='firwin')
raw_filt = raw.copy().filter(l_freq, h_freq, picks=np.arange(1),
**kwargs)
assert (raw.info['lowpass'] is None)
assert (raw.info['highpass'] is None)
raw_filt = raw.copy().filter(l_freq, h_freq, **kwargs)
wanted_h = h_freq if kind != 'bandstop' else None
wanted_l = l_freq if kind != 'bandstop' else None
assert raw_filt.info['lowpass'] == wanted_h
assert raw_filt.info['highpass'] == wanted_l
# Using all data channels should still set the params (GH#3259)
raw_filt = raw.copy().filter(l_freq, h_freq, picks=np.arange(2),
**kwargs)
assert raw_filt.info['lowpass'] == wanted_h
assert raw_filt.info['highpass'] == wanted_l
def test_filter_picks():
"""Test filtering default channel picks."""
ch_types = ['mag', 'grad', 'eeg', 'seeg', 'dbs', 'misc', 'stim', 'ecog',
'hbo', 'hbr']
info = create_info(ch_names=ch_types, ch_types=ch_types, sfreq=256)
raw = RawArray(data=np.zeros((len(ch_types), 1000)), info=info)
# -- Deal with meg mag grad and fnirs exceptions
ch_types = ('misc', 'stim', 'meg', 'eeg', 'seeg', 'dbs', 'ecog')
# -- Filter data channels
for ch_type in ('mag', 'grad', 'eeg', 'seeg', 'dbs', 'ecog', 'hbo', 'hbr'):
picks = {ch: ch == ch_type for ch in ch_types}
picks['meg'] = ch_type if ch_type in ('mag', 'grad') else False
picks['fnirs'] = ch_type if ch_type in ('hbo', 'hbr') else False
raw_ = raw.copy().pick_types(**picks)
raw_.filter(10, 30, fir_design='firwin')
# -- Error if no data channel
for ch_type in ('misc', 'stim'):
picks = {ch: ch == ch_type for ch in ch_types}
raw_ = raw.copy().pick_types(**picks)
pytest.raises(ValueError, raw_.filter, 10, 30)
@testing.requires_testing_data
def test_crop():
"""Test cropping raw files."""
# split a concatenated file to test a difficult case
raw = concatenate_raws([read_raw_fif(f)
for f in [fif_fname, fif_fname]])
split_size = 10. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp + 1)
# do an annoying case (off-by-one splitting)
tmins = np.r_[1., np.round(np.arange(0., nsamp - 1, split_size * sfreq))]
tmins = np.sort(tmins)
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.copy().crop(tmin, tmax)
if ri < len(tmins) - 1:
assert_allclose(
raws[ri].times,
raw.copy().crop(tmin, tmins[ri + 1], include_tmax=False).times)
assert raws[ri]
all_raw_2 = concatenate_raws(raws, preload=False)
assert raw.first_samp == all_raw_2.first_samp
assert raw.last_samp == all_raw_2.last_samp
assert_array_equal(raw[:, :][0], all_raw_2[:, :][0])
tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
# going in revere order so the last fname is the first file (need it later)
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.copy().crop(tmin, tmax)
# test concatenation of split file
all_raw_1 = concatenate_raws(raws, preload=False)
all_raw_2 = raw.copy().crop(0, None)
for ar in [all_raw_1, all_raw_2]:
assert raw.first_samp == ar.first_samp
assert raw.last_samp == ar.last_samp
assert_array_equal(raw[:, :][0], ar[:, :][0])
# test shape consistency of cropped raw
data = np.zeros((1, 1002001))
info = create_info(1, 1000)
raw = RawArray(data, info)
for tmin in range(0, 1001, 100):
raw1 = raw.copy().crop(tmin=tmin, tmax=tmin + 2)
assert raw1[:][0].shape == (1, 2001)
# degenerate
with pytest.raises(ValueError, match='No samples.*when include_tmax=Fals'):
raw.crop(0, 0, include_tmax=False)
@testing.requires_testing_data
def test_resample_equiv():
"""Test resample (with I/O and multiple files)."""
raw = read_raw_fif(fif_fname).crop(0, 1)
raw_preload = raw.copy().load_data()
for r in (raw, raw_preload):
r.resample(r.info['sfreq'] / 4.)
assert_allclose(raw._data, raw_preload._data)
@testing.requires_testing_data
@pytest.mark.parametrize('preload, n, npad', [
(True, 512, 'auto'),
(False, 512, 0),
])
def test_resample(tmpdir, preload, n, npad):
"""Test resample (with I/O and multiple files)."""
raw = read_raw_fif(fif_fname)
raw.crop(0, raw.times[n - 1])
assert len(raw.times) == n
if preload:
raw.load_data()
raw_resamp = raw.copy()
sfreq = raw.info['sfreq']
# test parallel on upsample
raw_resamp.resample(sfreq * 2, n_jobs=2, npad=npad)
assert raw_resamp.n_times == len(raw_resamp.times)
raw_resamp.save(tmpdir.join('raw_resamp-raw.fif'))
raw_resamp = read_raw_fif(tmpdir.join('raw_resamp-raw.fif'),
preload=True)
assert sfreq == raw_resamp.info['sfreq'] / 2
assert raw.n_times == raw_resamp.n_times // 2
assert raw_resamp.get_data().shape[1] == raw_resamp.n_times
assert raw.get_data().shape[0] == raw_resamp._data.shape[0]
# test non-parallel on downsample
raw_resamp.resample(sfreq, n_jobs=1, npad=npad)
assert raw_resamp.info['sfreq'] == sfreq
assert raw.get_data().shape == raw_resamp._data.shape
assert raw.first_samp == raw_resamp.first_samp
assert raw.last_samp == raw.last_samp
# upsampling then downsampling doubles resampling error, but this still
# works (hooray). Note that the stim channels had to be sub-sampled
# without filtering to be accurately preserved
# note we have to treat MEG and EEG+STIM channels differently (tols)
assert_allclose(raw.get_data()[:306, 200:-200],
raw_resamp._data[:306, 200:-200],
rtol=1e-2, atol=1e-12)
assert_allclose(raw.get_data()[306:, 200:-200],
raw_resamp._data[306:, 200:-200],
rtol=1e-2, atol=1e-7)
# now check multiple file support w/resampling, as order of operations
# (concat, resample) should not affect our data
raw1 = raw.copy()
raw2 = raw.copy()
raw3 = raw.copy()
raw4 = raw.copy()
raw1 = concatenate_raws([raw1, raw2])
raw1.resample(10., npad=npad)
raw3.resample(10., npad=npad)
raw4.resample(10., npad=npad)
raw3 = concatenate_raws([raw3, raw4])
assert_array_equal(raw1._data, raw3._data)
assert_array_equal(raw1._first_samps, raw3._first_samps)
assert_array_equal(raw1._last_samps, raw3._last_samps)
assert_array_equal(raw1._raw_lengths, raw3._raw_lengths)
assert raw1.first_samp == raw3.first_samp
assert raw1.last_samp == raw3.last_samp
assert raw1.info['sfreq'] == raw3.info['sfreq']
# smoke test crop after resample
raw4.crop(tmin=raw4.times[1], tmax=raw4.times[-1])
# test resampling of stim channel
# basic decimation
stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
assert_allclose(raw.resample(8., npad=npad)._data,
[[1, 1, 0, 0, 1, 1, 0, 0]])
# decimation of multiple stim channels
raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ['stim']))
assert_allclose(raw.resample(8., npad=npad, verbose='error')._data,
[[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0]])
# decimation that could potentially drop events if the decimation is
# done naively
stim = [0, 0, 0, 1, 1, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
assert_allclose(raw.resample(4., npad=npad)._data,
[[0, 1, 1, 0]])
# two events are merged in this case (warning)
stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
with pytest.warns(RuntimeWarning, match='become unreliable'):
raw.resample(8., npad=npad)
# events are dropped in this case (warning)
stim = [0, 1, 1, 0, 0, 1, 1, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
with pytest.warns(RuntimeWarning, match='become unreliable'):
raw.resample(4., npad=npad)
# test resampling events: this should no longer give a warning
# we often have first_samp != 0, include it here too
stim = [0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1] # an event at end
# test is on half the sfreq, but should work with trickier ones too
o_sfreq, sfreq_ratio = len(stim), 0.5
n_sfreq = o_sfreq * sfreq_ratio
first_samp = len(stim) // 2
raw = RawArray([stim], create_info(1, o_sfreq, ['stim']),
first_samp=first_samp)
events = find_events(raw)
raw, events = raw.resample(n_sfreq, events=events, npad=npad)
# Try index into raw.times with resampled events:
raw.times[events[:, 0] - raw.first_samp]
n_fsamp = int(first_samp * sfreq_ratio) # how it's calc'd in base.py
# NB np.round used for rounding event times, which has 0.5 as corner case:
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.around.html
assert_array_equal(
events,
np.array([[np.round(1 * sfreq_ratio) + n_fsamp, 0, 1],
[np.round(10 * sfreq_ratio) + n_fsamp, 0, 1],
[np.minimum(np.round(15 * sfreq_ratio),
raw._data.shape[1] - 1) + n_fsamp, 0, 1]]))
# test copy flag
stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
raw = RawArray([stim], create_info(1, len(stim), ['stim']))
raw_resampled = raw.copy().resample(4., npad=npad)
assert (raw_resampled is not raw)
raw_resampled = raw.resample(4., npad=npad)
assert (raw_resampled is raw)
# resample should still work even when no stim channel is present
raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ['eeg']))
raw.info['lowpass'] = 50.
raw.resample(10, npad=npad)
assert raw.info['lowpass'] == 5.
assert len(raw) == 10
def test_resample_stim():
"""Test stim_picks argument."""
data = np.ones((2, 1000))
info = create_info(2, 1000., ('eeg', 'misc'))
raw = RawArray(data, info)
raw.resample(500., stim_picks='misc')
@testing.requires_testing_data
def test_hilbert():
"""Test computation of analytic signal using hilbert."""
raw = read_raw_fif(fif_fname, preload=True)
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
raw_filt = raw.copy()
raw_filt.filter(10, 20, picks=picks, l_trans_bandwidth='auto',
h_trans_bandwidth='auto', filter_length='auto',
phase='zero', fir_window='blackman', fir_design='firwin')
raw_filt_2 = raw_filt.copy()
raw2 = raw.copy()
raw3 = raw.copy()
raw.apply_hilbert(picks, n_fft='auto')
raw2.apply_hilbert(picks, n_fft='auto', envelope=True)
# Test custom n_fft
raw_filt.apply_hilbert(picks, n_fft='auto')
n_fft = 2 ** int(np.ceil(np.log2(raw_filt_2.n_times + 1000)))
raw_filt_2.apply_hilbert(picks, n_fft=n_fft)
assert raw_filt._data.shape == raw_filt_2._data.shape
assert_allclose(raw_filt._data[:, 50:-50], raw_filt_2._data[:, 50:-50],
atol=1e-13, rtol=1e-2)
with pytest.raises(ValueError, match='n_fft.*must be at least the number'):
raw3.apply_hilbert(picks, n_fft=raw3.n_times - 100)
env = np.abs(raw._data[picks, :])
assert_allclose(env, raw2._data[picks, :], rtol=1e-2, atol=1e-13)
@testing.requires_testing_data
def test_raw_copy():
"""Test Raw copy."""
raw = read_raw_fif(fif_fname, preload=True)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert sorted(raw.__dict__.keys()) == sorted(copied.__dict__.keys())
raw = read_raw_fif(fif_fname, preload=False)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert sorted(raw.__dict__.keys()) == sorted(copied.__dict__.keys())
@requires_pandas
def test_to_data_frame():
"""Test raw Pandas exporter."""
from pandas import Timedelta
raw = read_raw_fif(test_fif_fname, preload=True)
_, times = raw[0, :10]
df = raw.to_data_frame(index='time')
assert ((df.columns == raw.ch_names).all())
assert_array_equal(np.round(times * 1e3), df.index.values[:10])
df = raw.to_data_frame(index=None)
assert ('time' in df.columns)
assert_array_equal(df.values[:, 1], raw._data[0] * 1e13)
assert_array_equal(df.values[:, 3], raw._data[2] * 1e15)
# test long format
df_long = raw.to_data_frame(long_format=True)
assert(len(df_long) == raw.get_data().size)
expected = ('time', 'channel', 'ch_type', 'value')
assert set(expected) == set(df_long.columns)
# test bad time format
with pytest.raises(ValueError, match='not a valid time format. Valid'):
raw.to_data_frame(time_format='foo')
# test time format error handling
raw.set_meas_date(None)
with pytest.warns(RuntimeWarning, match='Cannot convert to Datetime when'):
df = raw.to_data_frame(time_format='datetime')
assert isinstance(df['time'].iloc[0], Timedelta)
@requires_pandas
@pytest.mark.parametrize('time_format', (None, 'ms', 'timedelta', 'datetime'))
def test_to_data_frame_time_format(time_format):
"""Test time conversion in epochs Pandas exporter."""
from pandas import Timedelta, Timestamp
raw = read_raw_fif(test_fif_fname, preload=True)
# test time_format
df = raw.to_data_frame(time_format=time_format)
dtypes = {None: np.float64, 'ms': np.int64, 'timedelta': Timedelta,
'datetime': Timestamp}
assert isinstance(df['time'].iloc[0], dtypes[time_format])
def test_add_channels():
"""Test raw splitting / re-appending channel types."""
rng = np.random.RandomState(0)
raw = read_raw_fif(test_fif_fname).crop(0, 1).load_data()
raw_nopre = read_raw_fif(test_fif_fname, preload=False)
raw_eeg_meg = raw.copy().pick_types(meg=True, eeg=True)
raw_eeg = raw.copy().pick_types(meg=False, eeg=True)
raw_meg = raw.copy().pick_types(meg=True, eeg=False)
raw_stim = raw.copy().pick_types(meg=False, eeg=False, stim=True)
raw_new = raw_meg.copy().add_channels([raw_eeg, raw_stim])
assert (
all(ch in raw_new.ch_names
for ch in list(raw_stim.ch_names) + list(raw_meg.ch_names))
)
raw_new = raw_meg.copy().add_channels([raw_eeg])
assert (ch in raw_new.ch_names for ch in raw.ch_names)
assert_array_equal(raw_new[:, :][0], raw_eeg_meg[:, :][0])
assert_array_equal(raw_new[:, :][1], raw[:, :][1])
assert (all(ch not in raw_new.ch_names for ch in raw_stim.ch_names))
# Testing force updates
raw_arr_info = create_info(['1', '2'], raw_meg.info['sfreq'], 'eeg')
orig_head_t = raw_arr_info['dev_head_t']
raw_arr = rng.randn(2, raw_eeg.n_times)
raw_arr = RawArray(raw_arr, raw_arr_info)
# This should error because of conflicts in Info
raw_arr.info['dev_head_t'] = orig_head_t
with pytest.raises(ValueError, match='mutually inconsistent dev_head_t'):
raw_meg.copy().add_channels([raw_arr])
raw_meg.copy().add_channels([raw_arr], force_update_info=True)
# Make sure that values didn't get overwritten
assert_object_equal(raw_arr.info['dev_head_t'], orig_head_t)
# Make sure all variants work
for simult in (False, True): # simultaneous adding or not
raw_new = raw_meg.copy()
if simult:
raw_new.add_channels([raw_eeg, raw_stim])
else:
raw_new.add_channels([raw_eeg])
raw_new.add_channels([raw_stim])
for other in (raw_meg, raw_stim, raw_eeg):
assert_allclose(
raw_new.copy().pick_channels(other.ch_names).get_data(),
other.get_data())
# Now test errors
raw_badsf = raw_eeg.copy()
raw_badsf.info['sfreq'] = 3.1415927
raw_eeg.crop(.5)
pytest.raises(RuntimeError, raw_meg.add_channels, [raw_nopre])
pytest.raises(RuntimeError, raw_meg.add_channels, [raw_badsf])
pytest.raises(AssertionError, raw_meg.add_channels, [raw_eeg])
pytest.raises(ValueError, raw_meg.add_channels, [raw_meg])
pytest.raises(TypeError, raw_meg.add_channels, raw_badsf)
@testing.requires_testing_data
def test_save(tmpdir):
"""Test saving raw."""
temp_fname = tmpdir.join('test_raw.fif')
shutil.copyfile(fif_fname, temp_fname)
raw = read_raw_fif(temp_fname, preload=False)
# can't write over file being read
with pytest.raises(ValueError, match='to the same file'):
raw.save(temp_fname)
raw.load_data()
# can't overwrite file without overwrite=True
with pytest.raises(IOError, match='file exists'):
raw.save(fif_fname)
# test abspath support and annotations
orig_time = _dt_to_stamp(raw.info['meas_date'])[0] + raw._first_time
annot = Annotations([10], [5], ['test'], orig_time=orig_time)
raw.set_annotations(annot)
annot = raw.annotations
new_fname = tmpdir.join('break_raw.fif')
raw.save(new_fname, overwrite=True)
new_raw = read_raw_fif(new_fname, preload=False)
pytest.raises(ValueError, new_raw.save, new_fname)
assert_array_almost_equal(annot.onset, new_raw.annotations.onset)
assert_array_equal(annot.duration, new_raw.annotations.duration)
assert_array_equal(annot.description, new_raw.annotations.description)
assert annot.orig_time == new_raw.annotations.orig_time
# test set_meas_date(None)
raw.set_meas_date(None)
raw.save(new_fname, overwrite=True)
new_raw = read_raw_fif(new_fname, preload=False)
assert new_raw.info['meas_date'] is None
@testing.requires_testing_data
def test_annotation_crop(tmpdir):
"""Test annotation sync after cropping and concatenating."""
annot = Annotations([5., 11., 15.], [2., 1., 3.], ['test', 'test', 'test'])
raw = read_raw_fif(fif_fname, preload=False)
raw.set_annotations(annot)
r1 = raw.copy().crop(2.5, 7.5)
r2 = raw.copy().crop(12.5, 17.5)
r3 = raw.copy().crop(10., 12.)
raw = concatenate_raws([r1, r2, r3]) # segments reordered
assert_and_remove_boundary_annot(raw, 2)
onsets = raw.annotations.onset
durations = raw.annotations.duration
# 2*5s clips combined with annotations at 2.5s + 2s clip, annotation at 1s
assert_array_almost_equal(onsets[:3], [47.95, 52.95, 56.46], decimal=2)
assert_array_almost_equal([2., 2.5, 1.], durations[:3], decimal=2)
# test annotation clipping
orig_time = _dt_to_stamp(raw.info['meas_date'])
orig_time = orig_time[0] + orig_time[1] * 1e-6 + raw._first_time - 1.
annot = Annotations([0., raw.times[-1]], [2., 2.], 'test', orig_time)
with pytest.warns(RuntimeWarning, match='Limited .* expanding outside'):
raw.set_annotations(annot)
assert_allclose(raw.annotations.duration,
[1., 1. + 1. / raw.info['sfreq']], atol=1e-3)
# make sure we can overwrite the file we loaded when preload=True
new_fname = tmpdir.join('break_raw.fif')
raw.save(new_fname)
new_raw = read_raw_fif(new_fname, preload=True)
new_raw.save(new_fname, overwrite=True)
@testing.requires_testing_data
def test_with_statement():
"""Test with statement."""
for preload in [True, False]:
with read_raw_fif(fif_fname, preload=preload) as raw_:
print(raw_)
def test_compensation_raw(tmpdir):
"""Test Raw compensation."""
raw_3 = read_raw_fif(ctf_comp_fname)
assert raw_3.compensation_grade == 3
data_3, times = raw_3[:, :]
# data come with grade 3
for ii in range(2):
raw_3_new = raw_3.copy()
if ii == 0:
raw_3_new.load_data()
raw_3_new.apply_gradient_compensation(3)
assert raw_3_new.compensation_grade == 3
data_new, times_new = raw_3_new[:, :]
assert_array_equal(times, times_new)
assert_array_equal(data_3, data_new)
# change to grade 0
raw_0 = raw_3.copy().apply_gradient_compensation(0)
assert raw_0.compensation_grade == 0
data_0, times_new = raw_0[:, :]
assert_array_equal(times, times_new)
assert (np.mean(np.abs(data_0 - data_3)) > 1e-12)
# change to grade 1
raw_1 = raw_0.copy().apply_gradient_compensation(1)
assert raw_1.compensation_grade == 1
data_1, times_new = raw_1[:, :]
assert_array_equal(times, times_new)
assert (np.mean(np.abs(data_1 - data_3)) > 1e-12)
pytest.raises(ValueError, raw_1.apply_gradient_compensation, 33)
raw_bad = raw_0.copy()
raw_bad.add_proj(compute_proj_raw(raw_0, duration=0.5, verbose='error'))
raw_bad.apply_proj()
pytest.raises(RuntimeError, raw_bad.apply_gradient_compensation, 1)
# with preload
tols = dict(rtol=1e-12, atol=1e-25)
raw_1_new = raw_3.copy().load_data().apply_gradient_compensation(1)
assert raw_1_new.compensation_grade == 1
data_1_new, times_new = raw_1_new[:, :]
assert_array_equal(times, times_new)
assert (np.mean(np.abs(data_1_new - data_3)) > 1e-12)
assert_allclose(data_1, data_1_new, **tols)
# change back
raw_3_new = raw_1.copy().apply_gradient_compensation(3)
data_3_new, times_new = raw_3_new[:, :]
assert_allclose(data_3, data_3_new, **tols)
raw_3_new = raw_1.copy().load_data().apply_gradient_compensation(3)
data_3_new, times_new = raw_3_new[:, :]
assert_allclose(data_3, data_3_new, **tols)
for load in (False, True):
for raw in (raw_0, raw_1):
raw_3_new = raw.copy()
if load:
raw_3_new.load_data()
raw_3_new.apply_gradient_compensation(3)
assert raw_3_new.compensation_grade == 3
data_3_new, times_new = raw_3_new[:, :]
assert_array_equal(times, times_new)
assert (np.mean(np.abs(data_3_new - data_1)) > 1e-12)
assert_allclose(data_3, data_3_new, **tols)
# Try IO with compensation
temp_file = tmpdir.join('raw.fif')
raw_3.save(temp_file, overwrite=True)
for preload in (True, False):
raw_read = read_raw_fif(temp_file, preload=preload)
assert raw_read.compensation_grade == 3
data_read, times_new = raw_read[:, :]
assert_array_equal(times, times_new)
assert_allclose(data_3, data_read, **tols)
raw_read.apply_gradient_compensation(1)
data_read, times_new = raw_read[:, :]
assert_array_equal(times, times_new)
assert_allclose(data_1, data_read, **tols)
# Now save the file that has modified compensation
# and make sure the compensation is the same as it was,
# but that we can undo it
# These channels have norm 1e-11/1e-12, so atol=1e-18 isn't awesome,
# but it's due to the single precision of the info['comps'] leading
# to inexact inversions with saving/loading (casting back to single)
# in between (e.g., 1->3->1 will degrade like this)
looser_tols = dict(rtol=1e-6, atol=1e-18)
raw_1.save(temp_file, overwrite=True)
for preload in (True, False):
raw_read = read_raw_fif(temp_file, preload=preload, verbose=True)
assert raw_read.compensation_grade == 1
data_read, times_new = raw_read[:, :]
assert_array_equal(times, times_new)
assert_allclose(data_1, data_read, **looser_tols)
raw_read.apply_gradient_compensation(3, verbose=True)
data_read, times_new = raw_read[:, :]
assert_array_equal(times, times_new)
assert_allclose(data_3, data_read, **looser_tols)
@requires_mne
def test_compensation_raw_mne(tmpdir):
"""Test Raw compensation by comparing with MNE-C."""
def compensate_mne(fname, grad):
tmp_fname = tmpdir.join('mne_ctf_test_raw.fif')
cmd = ['mne_process_raw', '--raw', fname, '--save', tmp_fname,
'--grad', str(grad), '--projoff', '--filteroff']
run_subprocess(cmd)
return read_raw_fif(tmp_fname, preload=True)
for grad in [0, 2, 3]:
raw_py = read_raw_fif(ctf_comp_fname, preload=True)
raw_py.apply_gradient_compensation(grad)
raw_c = compensate_mne(ctf_comp_fname, grad)
assert_allclose(raw_py._data, raw_c._data, rtol=1e-6, atol=1e-17)
assert raw_py.info['nchan'] == raw_c.info['nchan']
for ch_py, ch_c in zip(raw_py.info['chs'], raw_c.info['chs']):
for key in ('ch_name', 'coil_type', 'scanno', 'logno', 'unit',
'coord_frame', 'kind'):
assert ch_py[key] == ch_c[key]
for key in ('loc', 'unit_mul', 'range', 'cal'):
assert_allclose(ch_py[key], ch_c[key])
@testing.requires_testing_data
def test_drop_channels_mixin():
"""Test channels-dropping functionality."""
raw = read_raw_fif(fif_fname, preload=True)
drop_ch = raw.ch_names[:3]
ch_names = raw.ch_names[3:]
ch_names_orig = raw.ch_names
dummy = raw.copy().drop_channels(drop_ch)
assert ch_names == dummy.ch_names
assert ch_names_orig == raw.ch_names
assert len(ch_names_orig) == raw._data.shape[0]
raw.drop_channels(drop_ch)
assert ch_names == raw.ch_names
assert len(ch_names) == len(raw._cals)
assert len(ch_names) == raw._data.shape[0]
# Test that dropping all channels a projector applies to will lead to the
# removal of said projector.
raw = read_raw_fif(fif_fname)
n_projs = len(raw.info['projs'])
eeg_names = raw.info['projs'][-1]['data']['col_names']
with pytest.raises(RuntimeError, match='loaded'):
raw.copy().apply_proj().drop_channels(eeg_names)
raw.load_data().drop_channels(eeg_names) # EEG proj
assert len(raw.info['projs']) == n_projs - 1
@testing.requires_testing_data
@pytest.mark.parametrize('preload', (True, False))
def test_pick_channels_mixin(preload):
"""Test channel-picking functionality."""
raw = read_raw_fif(fif_fname, preload=preload)
raw_orig = raw.copy()
ch_names = raw.ch_names[:3]
ch_names_orig = raw.ch_names
dummy = raw.copy().pick_channels(ch_names)
assert ch_names == dummy.ch_names
assert ch_names_orig == raw.ch_names
assert len(ch_names_orig) == raw.get_data().shape[0]
raw.pick_channels(ch_names) # copy is False
assert ch_names == raw.ch_names
assert len(ch_names) == len(raw._cals)
assert len(ch_names) == raw.get_data().shape[0]
with pytest.raises(ValueError, match='must be'):
raw.pick_channels(ch_names[0])
assert_allclose(raw[:][0], raw_orig[:3][0])
@testing.requires_testing_data
def test_equalize_channels():
"""Test equalization of channels."""
raw1 = read_raw_fif(fif_fname, preload=True)
raw2 = raw1.copy()
ch_names = raw1.ch_names[2:]
raw1.drop_channels(raw1.ch_names[:1])
raw2.drop_channels(raw2.ch_names[1:2])
my_comparison = [raw1, raw2]
my_comparison = equalize_channels(my_comparison)
for e in my_comparison:
assert ch_names == e.ch_names
def test_memmap(tmpdir):
"""Test some interesting memmapping cases."""
# concatenate_raw
memmaps = [tmpdir.join(str(ii)) for ii in range(3)]
raw_0 = read_raw_fif(test_fif_fname, preload=memmaps[0])
assert raw_0._data.filename == memmaps[0]
raw_1 = read_raw_fif(test_fif_fname, preload=memmaps[1])
assert raw_1._data.filename == memmaps[1]
raw_0.append(raw_1, preload=memmaps[2])
assert raw_0._data.filename == memmaps[2]
# add_channels
orig_data = raw_0[:][0]
new_ch_info = pick_info(raw_0.info, [0])
new_ch_info['chs'][0]['ch_name'] = 'foo'
new_ch_info._update_redundant()
new_data = np.linspace(0, 1, len(raw_0.times))[np.newaxis]
ch = RawArray(new_data, new_ch_info)
raw_0.add_channels([ch])
if sys.platform == 'darwin':
assert not hasattr(raw_0._data, 'filename')
else:
assert raw_0._data.filename == memmaps[2]
assert_allclose(orig_data, raw_0[:-1][0], atol=1e-7)
assert_allclose(new_data, raw_0[-1][0], atol=1e-7)
# now let's see if .copy() actually works; it does, but eventually
# we should make it optionally memmap to a new filename rather than
# create an in-memory version (filename=None)
raw_0 = read_raw_fif(test_fif_fname, preload=memmaps[0])
assert raw_0._data.filename == memmaps[0]
assert raw_0._data[:1, 3:5].all()
raw_1 = raw_0.copy()
assert isinstance(raw_1._data, np.memmap)
assert raw_1._data.filename is None
raw_0._data[:] = 0.
assert not raw_0._data.any()
assert raw_1._data[:1, 3:5].all()
# other things like drop_channels and crop work but do not use memmapping,
# eventually we might want to add support for some of these as users
# require them.
@pytest.mark.parametrize('split', (False, True))
@pytest.mark.parametrize('kind', ('file', 'bytes'))
@pytest.mark.parametrize('preload', (True, str))
def test_file_like(kind, preload, split, tmpdir):
"""Test handling with file-like objects."""
if split:
fname = tmpdir.join('test_raw.fif')
read_raw_fif(test_fif_fname).save(fname, split_size='5MB')
assert op.isfile(fname)
assert op.isfile(str(fname)[:-4] + '-1.fif')
else:
fname = test_fif_fname
if preload is str:
preload = str(tmpdir.join('memmap'))
with open(str(fname), 'rb') as file_fid:
fid = BytesIO(file_fid.read()) if kind == 'bytes' else file_fid
assert not fid.closed
assert not file_fid.closed
with pytest.raises(ValueError, match='preload must be used with file'):
read_raw_fif(fid)
assert not fid.closed
assert not file_fid.closed
# Use test_preloading=False but explicitly pass the preload type
# so that we don't bother testing preload=False
kwargs = dict(fname=fid, preload=preload, on_split_missing='ignore',
test_preloading=False, test_kwargs=False)
_test_raw_reader(read_raw_fif, **kwargs)
assert not fid.closed
assert not file_fid.closed
assert file_fid.closed
def test_str_like():
"""Test handling with str-like objects."""
fname = pathlib.Path(test_fif_fname)
raw_path = read_raw_fif(fname, preload=True)
raw_str = read_raw_fif(test_fif_fname, preload=True)
assert_allclose(raw_path._data, raw_str._data)
@pytest.mark.parametrize('fname', [
test_fif_fname,
testing._pytest_param(fif_fname),
testing._pytest_param(ms_fname),
])
def test_bad_acq(fname):
"""Test handling of acquisition errors."""
# see gh-7844
raw = read_raw_fif(fname, allow_maxshield='yes').load_data()
with open(fname, 'rb') as fid:
for ent in raw._raw_extras[0]['ent']:
fid.seek(ent.pos, 0)
tag = _read_tag_header(fid)
# hack these, others (kind, type) should be correct
tag.pos, tag.next = ent.pos, ent.next
assert tag == ent
@testing.requires_testing_data
@pytest.mark.skipif(sys.platform not in ('darwin', 'linux'),
reason='Needs proper symlinking')
def test_split_symlink(tmpdir):
"""Test split files with symlinks."""
# regression test for gh-9221
first = str(tmpdir.mkdir('first').join('test_raw.fif'))
raw = read_raw_fif(fif_fname).pick('meg').load_data()
raw.save(first, buffer_size_sec=1, split_size='10MB', verbose=True)
second = first[:-4] + '-1.fif'
assert op.isfile(second)
assert not op.isfile(first[:-4] + '-2.fif')
new_first = tmpdir.mkdir('a').join('test_raw.fif')
new_second = tmpdir.mkdir('b').join('test_raw-1.fif')
shutil.move(first, new_first)
shutil.move(second, new_second)
os.symlink(new_first, first)
os.symlink(new_second, second)
raw_new = read_raw_fif(first)
assert_allclose(raw_new.get_data(), raw.get_data())
@testing.requires_testing_data
def test_corrupted(tmpdir):
"""Test that a corrupted file can still be read."""
# Must be a file written by Neuromag, not us, since we don't write the dir
# at the end, so use the skip one (straight from acq).
raw = read_raw_fif(skip_fname)
with open(skip_fname, 'rb') as fid:
tag = read_tag_info(fid)
tag = read_tag(fid)
dirpos = int(tag.data)
assert dirpos == 12641532
fid.seek(0)
data = fid.read(dirpos)
bad_fname = tmpdir.join('test_raw.fif')
with open(bad_fname, 'wb') as fid:
fid.write(data)
with pytest.warns(RuntimeWarning, match='.*tag directory.*corrupt.*'):
raw_bad = read_raw_fif(bad_fname)
assert_allclose(raw.get_data(), raw_bad.get_data())
| bsd-3-clause |
taspinar/twitterscraper | examples/get_twitter_user_data.py | 1 | 1970 | from twitterscraper.query import query_user_info
import pandas as pd
from multiprocessing import Pool
import time
from IPython.display import display
global twitter_user_info
twitter_user_info=[]
def get_user_info(twitter_user):
"""
An example of using the query_user_info method
:param twitter_user: the twitter user to capture user data
:return: twitter_user_data: returns a dictionary of twitter user data
"""
user_info = query_user_info(user= twitter_user)
twitter_user_data = {}
twitter_user_data["user"] = user_info.user
twitter_user_data["fullname"] = user_info.full_name
twitter_user_data["location"] = user_info.location
twitter_user_data["blog"] = user_info.blog
twitter_user_data["date_joined"] = user_info.date_joined
twitter_user_data["id"] = user_info.id
twitter_user_data["num_tweets"] = user_info.tweets
twitter_user_data["following"] = user_info.following
twitter_user_data["followers"] = user_info.followers
twitter_user_data["likes"] = user_info.likes
twitter_user_data["lists"] = user_info.lists
return twitter_user_data
def main():
start = time.time()
users = ['Carlos_F_Enguix', 'mmtung', 'dremio', 'MongoDB', 'JenWike', 'timberners_lee','ataspinar2', 'realDonaldTrump',
'BarackObama', 'elonmusk', 'BillGates', 'BillClinton','katyperry','KimKardashian']
pool = Pool(8)
for user in pool.map(get_user_info,users):
twitter_user_info.append(user)
cols=['id','fullname','date_joined','location','blog', 'num_tweets','following','followers','likes','lists']
data_frame = pd.DataFrame(twitter_user_info, index=users, columns=cols)
data_frame.index.name = "Users"
data_frame.sort_values(by="followers", ascending=False, inplace=True, kind='quicksort', na_position='last')
elapsed = time.time() - start
print(f"Elapsed time: {elapsed}")
display(data_frame)
if __name__ == '__main__':
main() | mit |
bendudson/BOUT | tools/tokamak_grids/pyGridGen/geqdsk.py | 4 | 7335 | #!/usr/bin/env python
import re
import numpy
"""
@brief G-Eqdsk reader class
@version $Id$
Copyright © 2006-2008, Tech-X Corporation, Boulder, CO
See LICENSE file for conditions of use.
The official document describing g-eqdsk files:
http://fusion.gat.com/conferences/snowmass/working/mfe/physics/p3/equilibria/g_eqdsk_s.pdf
"""
class Geqdsk:
def __init__(self):
"""
Constructor
"""
self.data = {}
def openFile(self, filename):
"""
open geqdsk file and parse its content
"""
lines = open(filename, 'r').readlines()
# first line
m = re.search(r'^\s*(.*)\s+\d+\s+(\d+)\s+(\d+)\s*$', lines[0])
self.data['case'] = m.group(1), "Identification character string"
self.data['nw'] = int(m.group(2)), "Number of horizontal R grid points"
self.data['nh'] = int(m.group(3)), "Number of vertical Z grid points"
fltsPat = r'^\s*([ \-]\d\.\d+[Ee][\+\-]\d\d)([ \-]\d\.\d+[Ee][\+\-]\d\d)([ \-]\d\.\d+[Ee][\+\-]\d\d)([ \-]\d\.\d+[Ee][\+\-]\d\d)([ \-]\d\.\d+[Ee][\+\-]\d\d)\s*$'
# 2nd line
m = re.search(fltsPat, lines[1])
self.data['rdim'] = float(m.group(1)), "Horizontal dimension in meter of computational box"
self.data['zdim'] = float(m.group(2)), "Vertical dimension in meter of computational box"
self.data['rcentr'] = float(m.group(3)), "R in meter of vacuum toroidal magnetic field BCENTR"
self.data['rleft'] = float(m.group(4)), "Minimum R in meter of rectangular computational box"
self.data['zmid'] = float(m.group(5)), "Z of center of computational box in meter"
# 3rd line
m = re.search(fltsPat, lines[2])
self.data['rmaxis'] = float(m.group(1)), "R of magnetic axis in meter"
self.data['zmaxis'] = float(m.group(2)), "Z of magnetic axis in meter"
self.data['simag'] = float(m.group(3)), "poloidal flux at magnetic axis in Weber /rad"
self.data['sibry'] = float(m.group(4)), "poloidal flux at the plasma boundary in Weber /rad"
self.data['bcentr'] = float(m.group(5)), "Vacuum toroidal magnetic field in Tesla at RCENTR"
# 4th line
m = re.search(fltsPat, lines[3])
self.data['current'] = float(m.group(1)), "Plasma current in Ampere"
#self.data['simag'] = float(m.group(2)), ""
#self.data['rmaxis'] = float(m.group(4)), ""
# 5th line
m = re.search(fltsPat, lines[4])
#self.data['zmaxis'] = float(m.group(1)), ""
#self.data['sibry'] = float(m.group(3)), ""
# read remaining data
data = []
counter = 5
while 1:
line = lines[counter]
m = re.match(r'^\s*[ \-]\d\.\d+[Ee][\+\-]\d\d', line)
if not m: break
data += eval('[' + re.sub(r'(\d)([ \-]\d\.)', '\\1,\\2', line) + ']')
counter += 1
nw = self.data['nw'][0]
nh = self.data['nh'][0]
self.data['fpol'] = numpy.array(data[0:nw]), "Poloidal current function in m-T, F = RBT on flux grid"
self.data['pres'] = numpy.array(data[nw:2*nw]), "Plasma pressure in nt / m 2 on uniform flux grid"
self.data['ffprime'] = numpy.array(data[2*nw:3*nw]), "FF'(psi) in (mT)^2/(Weber/rad) on uniform flux grid"
self.data['pprime'] = numpy.array(data[3*nw:4*nw]), "P'(psi) in (nt/m2)/(Weber/rad) on uniform flux grid"
self.data['psirz'] = numpy.reshape( data[4*nw:4*nw+nw*nh], (nh, nw) ), "Poloidal flux in Weber / rad on the rectangular grid points"
self.data['qpsi'] = numpy.array(data[4*nw+nw*nh:5*nw+nw*nh]), "q values on uniform flux grid from axis to boundary"
line = lines[counter]
m = re.search(r'^\s*(\d+)\s+(\d+)', line)
print line
nbbbs = int(m.group(1))
limitr = int(m.group(2))
self.data['nbbbs'] = nbbbs, "Number of boundary points"
self.data['limitr'] = limitr, "Number of limiter points"
counter += 1
data = []
while 1:
line = lines[counter]
m = re.search(r'^\s*[ \-]\d\.\d+[Ee][\+\-]\d\d', line)
counter += 1
if not m: break
data += eval('[' + re.sub(r'(\d)([ \-]\d\.)', '\\1,\\2', line) + ']')
self.data['rbbbs'] = numpy.zeros( (nbbbs,), numpy.float64 ), "R of boundary points in meter"
self.data['zbbbs'] = numpy.zeros( (nbbbs,), numpy.float64 ), "Z of boundary points in meter"
for i in range(nbbbs):
self.data['rbbbs'][0][i] = data[2*i]
self.data['zbbbs'][0][i] = data[2*i + 1]
self.data['rlim'] = numpy.zeros( (limitr,), numpy.float64 ), "R of surrounding limiter contour in meter"
self.data['zlim'] = numpy.zeros( (limitr,), numpy.float64 ), "Z of surrounding limiter contour in meter"
for i in range(limitr):
self.data['rbbbs'][0][i] = data[2*nbbbs + 2*i]
self.data['zbbbs'][0][i] = data[2*nbbbs + 2*i + 1]
def getAll(self):
return self.data
def getAllVars(self):
return self.data.keys()
def get(self, varname):
return self.data[varname.lower()][0]
def getDescriptor(self, varname):
return self.data[varname.lower()][1]
################################
def main():
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="g-eqdsk file", default="")
parser.add_option("-a", "--all", dest="all",
help="display all variables", action="store_true",)
parser.add_option("-v", "--vars", dest="vars",
help="comma separated list of variables (use '-v \"*\"' for all)", default="*")
parser.add_option("-p", "--plot", dest="plot",
help="plot all variables", action="store_true",)
parser.add_option("-i", "--inquire", dest="inquire",
help="inquire list of variables", action="store_true",)
options, args = parser.parse_args()
if not options.filename:
parser.error("MUST provide filename (type -h for list of options)")
geq = Geqdsk()
geq.openFile(options.filename)
if options.inquire:
print geq.getAllVars()
if options.all:
print geq.getAll()
vs = geq.getAllVars()
if options.vars != '*':
vs = options.vars.split(',')
for v in vs:
print '%s: %s'% (v, str(geq.get(v)))
if options.plot:
from matplotlib import pylab
if options.vars == '*':
options.vars = geq.getAllVars()
print options.vars
else:
vs = options.vars.split(',')
options.vars = vs
xmin = geq.get('simag')
xmax = geq.get('sibry')
nx = geq.get('nw')
dx = (xmax - xmin)/float(nx - 1)
x = numpy.arange(xmin, xmin + (xmax-xmin)*(1.+1.e-6), dx)
for v in options.vars:
if v[0] != 'r' and v[0] != 'z':
data = geq.get(v)
if len(numpy.shape(data)) == 1:
pylab.figure()
pylab.plot(x, data)
pylab.xlabel('psi poloidal')
pylab.ylabel(v)
pylab.title(geq.getDescriptor(v))
# 2d plasma plot
nw = geq.get('nw')
nh = geq.get('nh')
rmin = geq.get('rleft')
rmax = rmin + geq.get('rdim')
dr = (rmax - rmin)/float(nw - 1)
zmin = geq.get('zmid') - geq.get('zdim')/2.0
zmax = geq.get('zmid') + geq.get('zdim')/2.0
dz = (zmax - zmin)/float(nh - 1)
rs = numpy.arange(rmin, rmin + (rmax-rmin)*(1.+1.e-10), dr)
zs = numpy.arange(zmin, zmin + (zmax-zmin)*(1.+1.e-10), dz)
pylab.figure()
pylab.pcolor(rs, zs, geq.get('psirz'), shading='interp')
pylab.plot(geq.get('rbbbs'), geq.get('zbbbs'), 'w-')
#pylab.plot(geq.get('rlim'), geq.get('zlim'), 'k--')
pylab.axis('image')
pylab.title('poloidal flux')
pylab.xlabel('R')
pylab.ylabel('Z')
pylab.show()
if __name__ == '__main__': main()
| gpl-3.0 |
chapmanb/bcbio-nextgen | bcbio/qc/multiqc.py | 1 | 26725 | """High level summaries of samples and programs with MultiQC.
https://github.com/ewels/MultiQC
"""
import collections
import glob
import io
import json
import mimetypes
import os
import pandas as pd
import shutil
import numpy as np
from collections import OrderedDict
import pybedtools
import six
import toolz as tz
import yaml
from bcbio import utils
from bcbio.cwl import cwlutils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.log import logger
from bcbio.provenance import do, programs
from bcbio.provenance import data as provenancedata
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.bam import ref
from bcbio.qc.qsignature import get_qsig_multiqc_files
from bcbio.structural import annotate
from bcbio.utils import walk_json
from bcbio.variation import bedutils
from bcbio.qc.variant import get_active_vcinfo
from bcbio.upload import get_all_upload_paths_from_sample
from bcbio.variation import coverage
from bcbio.chipseq import atac
def summary(*samples):
"""Summarize all quality metrics together"""
samples = list(utils.flatten(samples))
work_dir = dd.get_work_dir(samples[0])
multiqc = config_utils.get_program("multiqc", samples[0]["config"])
if not multiqc:
logger.debug("multiqc not found. Update bcbio_nextgen.py tools to fix this issue.")
out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "multiqc"))
out_data = os.path.join(out_dir, "multiqc_data")
out_file = os.path.join(out_dir, "multiqc_report.html")
file_list = os.path.join(out_dir, "list_files.txt")
work_samples = cwlutils.unpack_tarballs([utils.deepish_copy(x) for x in samples], samples[0])
work_samples = _summarize_inputs(work_samples, out_dir)
if not utils.file_exists(out_file):
with tx_tmpdir(samples[0], work_dir) as tx_out:
in_files = _get_input_files(work_samples, out_dir, tx_out)
in_files += _merge_metrics(work_samples, out_dir)
if _one_exists(in_files):
with utils.chdir(out_dir):
config_file = _create_config_file(out_dir, work_samples)
input_list_file = _create_list_file(in_files, file_list)
if dd.get_tmp_dir(samples[0]):
export_tmp = "export TMPDIR=%s && " % dd.get_tmp_dir(samples[0])
else:
export_tmp = ""
locale_export = utils.locale_export()
path_export = utils.local_path_export()
other_opts = config_utils.get_resources("multiqc", samples[0]["config"]).get("options", [])
other_opts = " ".join([str(x) for x in other_opts])
cmd = ("{path_export}{export_tmp}{locale_export} "
"{multiqc} -c {config_file} -f -l {input_list_file} {other_opts} -o {tx_out}")
do.run(cmd.format(**locals()), "Run multiqc")
if utils.file_exists(os.path.join(tx_out, "multiqc_report.html")):
shutil.move(os.path.join(tx_out, "multiqc_report.html"), out_file)
shutil.move(os.path.join(tx_out, "multiqc_data"), out_data)
samples = _group_by_sample_and_batch(samples)
if utils.file_exists(out_file) and samples:
data_files = set()
for i, data in enumerate(samples):
data_files.add(os.path.join(out_dir, "report", "metrics", dd.get_sample_name(data) + "_bcbio.txt"))
data_files.add(os.path.join(out_dir, "report", "metrics", "target_info.yaml"))
data_files.add(os.path.join(out_dir, "multiqc_config.yaml"))
[data_files.add(f) for f in glob.glob(os.path.join(out_dir, "multiqc_data", "*"))]
data_files = [f for f in data_files if f and utils.file_exists(f)]
if "summary" not in samples[0]:
samples[0]["summary"] = {}
samples[0]["summary"]["multiqc"] = {"base": out_file, "secondary": data_files}
data_json = os.path.join(out_dir, "multiqc_data", "multiqc_data.json")
data_json_final = _save_uploaded_data_json(samples, data_json, os.path.join(out_dir, "multiqc_data"))
if data_json_final:
samples[0]["summary"]["multiqc"]["secondary"].append(data_json_final)
# Prepare final file list and inputs for downstream usage
file_list_final = _save_uploaded_file_list(samples, file_list, out_dir)
if file_list_final:
samples[0]["summary"]["multiqc"]["secondary"].append(file_list_final)
if any([cwlutils.is_cwl_run(d) for d in samples]):
for indir in ["inputs", "report"]:
tarball = os.path.join(out_dir, "multiqc-%s.tar.gz" % (indir))
if not utils.file_exists(tarball):
with utils.chdir(out_dir):
cmd = ["tar", "-czvpf", tarball, indir]
do.run(cmd, "Compress multiqc inputs: %s" % indir)
samples[0]["summary"]["multiqc"]["secondary"].append(tarball)
if any([cwlutils.is_cwl_run(d) for d in samples]):
samples = _add_versions(samples)
return [[data] for data in samples]
def _add_versions(samples):
"""Add tool and data versions to the summary.
"""
samples[0]["versions"] = {"tools": programs.write_versions(samples[0]["dirs"], samples[0]["config"]),
"data": provenancedata.write_versions(samples[0]["dirs"], samples)}
return samples
def _summarize_inputs(samples, out_dir):
"""Summarize inputs for MultiQC reporting in display.
"""
logger.info("summarize target information")
if samples[0].get("analysis", "").lower() in ["variant", "variant2"]:
metrics_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics"))
samples = _merge_target_information(samples, metrics_dir)
logger.info("summarize fastqc")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "fastqc"))
with utils.chdir(out_dir):
_merge_fastqc(samples)
preseq_samples = [s for s in samples if tz.get_in(["config", "algorithm", "preseq"], s)]
if preseq_samples:
logger.info("summarize preseq")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "preseq"))
with utils.chdir(out_dir):
_merge_preseq(preseq_samples)
return samples
def _save_uploaded_data_json(samples, data_json_work, out_dir):
""" Fixes all absolute work-rooted paths to relative final-rooted paths
"""
if not utils.file_exists(data_json_work):
return None
upload_path_mapping = dict()
for sample in samples:
upload_path_mapping.update(get_all_upload_paths_from_sample(sample))
if not upload_path_mapping:
return data_json_work
with io.open(data_json_work, encoding="utf-8") as f:
data = json.load(f, object_pairs_hook=OrderedDict)
upload_base = samples[0]["upload"]["dir"]
data = walk_json(data, lambda s: _work_path_to_rel_final_path(s, upload_path_mapping, upload_base))
data_json_final = os.path.join(out_dir, "multiqc_data_final.json")
with io.open(data_json_final, "w", encoding="utf-8") as f:
json.dump(data, f, indent=4)
return data_json_final
def _save_uploaded_file_list(samples, file_list_work, out_dir):
""" Fixes all absolute work-rooted paths to relative final-rooted paths
For CWL, prepare paths relative to output directory.
"""
if not utils.file_exists(file_list_work):
return None
if any([cwlutils.is_cwl_run(d) for d in samples]):
upload_paths = []
with open(file_list_work) as f:
for p in (l.strip() for l in f.readlines() if os.path.exists(l.strip())):
if p.startswith(out_dir):
upload_paths.append(p.replace(out_dir + "/", ""))
else:
upload_path_mapping = dict()
for sample in samples:
upload_path_mapping.update(get_all_upload_paths_from_sample(sample))
if not upload_path_mapping:
return None
with open(file_list_work) as f:
paths = [l.strip() for l in f.readlines() if os.path.exists(l.strip())]
upload_paths = [p for p in [
_work_path_to_rel_final_path(path, upload_path_mapping, samples[0]["upload"]["dir"])
for path in paths
] if p]
if not upload_paths:
return None
file_list_final = os.path.join(out_dir, "list_files_final.txt")
with open(file_list_final, "w") as f:
for path in upload_paths:
f.write(path + '\n')
return file_list_final
def _work_path_to_rel_final_path(path, upload_path_mapping, upload_base_dir):
""" Check if `path` is a work-rooted path, and convert to a relative final-rooted path
"""
if not path or not isinstance(path, str):
return path
upload_path = None
# First, check in the mapping: if it's there is a direct reference and
# it's a file, we immediately return it (saves lots of iterations)
if upload_path_mapping.get(path) is not None and os.path.isfile(path):
upload_path = upload_path_mapping[path]
else:
# Not a file: check for elements in the mapping that contain
# it
paths_to_check = [key for key in upload_path_mapping
if path.startswith(key)]
if paths_to_check:
for work_path in paths_to_check:
if os.path.isdir(work_path):
final_path = upload_path_mapping[work_path]
upload_path = path.replace(work_path, final_path)
break
if upload_path is not None:
return os.path.relpath(upload_path, upload_base_dir)
else:
return None
def _one_exists(input_files):
"""
at least one file must exist for multiqc to run properly
"""
for f in input_files:
if os.path.exists(f):
return True
return False
def _get_input_files(samples, base_dir, tx_out_dir):
"""Retrieve input files, keyed by sample and QC method name.
Stages files into the work directory to ensure correct names for
MultiQC sample assessment when running with CWL.
"""
in_files = collections.defaultdict(list)
for data in samples:
sum_qc = tz.get_in(["summary", "qc"], data, {})
if sum_qc in [None, "None"]:
sum_qc = {}
elif isinstance(sum_qc, six.string_types):
sum_qc = {dd.get_algorithm_qc(data)[0]: sum_qc}
elif not isinstance(sum_qc, dict):
raise ValueError("Unexpected summary qc: %s" % sum_qc)
for program, pfiles in sum_qc.items():
if isinstance(pfiles, dict):
pfiles = [pfiles["base"]] + pfiles.get("secondary", [])
# CWL: presents output files as single file plus associated secondary files
elif isinstance(pfiles, six.string_types):
if os.path.exists(pfiles):
pfiles = [os.path.join(basedir, f) for basedir, subdir, filenames in os.walk(os.path.dirname(pfiles)) for f in filenames]
else:
pfiles = []
in_files[(dd.get_sample_name(data), program)].extend(pfiles)
staged_files = []
for (sample, program), files in in_files.items():
cur_dir = utils.safe_makedir(os.path.join(base_dir, "inputs", sample, program))
for f in files:
if _check_multiqc_input(f) and _is_good_file_for_multiqc(f):
if _in_temp_directory(f) or any([cwlutils.is_cwl_run(d) for d in samples]):
staged_f = os.path.join(cur_dir, os.path.basename(f))
shutil.copy(f, staged_f)
staged_files.append(staged_f)
else:
staged_files.append(f)
staged_files.extend(get_qsig_multiqc_files(samples))
# Back compatible -- to migrate to explicit specifications in input YAML
if not any([cwlutils.is_cwl_run(d) for d in samples]):
staged_files += ["trimmed", "htseq-count/*summary"]
# Add in created target_info file
if os.path.isfile(os.path.join(base_dir, "report", "metrics", "target_info.yaml")):
staged_files += [os.path.join(base_dir, "report", "metrics", "target_info.yaml")]
return sorted(list(set(staged_files)))
def _in_temp_directory(f):
return any(x.startswith("tmp") for x in f.split("/"))
def _get_batches(data):
batches = dd.get_batch(data) or dd.get_sample_name(data)
if not isinstance(batches, (list, tuple)):
batches = [batches]
return batches
def _group_by_sample_and_batch(samples):
"""Group samples split by QC method back one per sample-batch.
"""
out = collections.defaultdict(list)
for data in samples:
out[(dd.get_sample_name(data), dd.get_align_bam(data), tuple(_get_batches(data)))].append(data)
return [xs[0] for xs in out.values()]
def _create_list_file(paths, out_file):
with open(out_file, "w") as f:
for path in paths:
f.write(path + '\n')
return out_file
def _create_config_file(out_dir, samples):
"""Provide configuration file for multiqc report."""
out_file = os.path.join(out_dir, "multiqc_config.yaml")
out = {"table_columns_visible": dict()}
extra_fn_clean_trim = []
extra_fn_clean_trim.extend(["coverage.mosdepth.region.dist", "coverage.mosdepth.global.dist"])
out["extra_fn_clean_trim"] = extra_fn_clean_trim
# Avoid duplicated bcbio columns with qualimap
if any(("qualimap" in dd.get_tools_on(d) or "qualimap_full" in dd.get_tools_on(d)) for d in samples):
# Hiding metrics duplicated by Qualimap
out["table_columns_visible"]["bcbio"] = {"Average_insert_size": False}
out["table_columns_visible"]["FastQC"] = {"percent_gc": False}
# Setting up thresholds for Qualimap depth cutoff calculations, based on sample avg depths
avg_depths = [tz.get_in(["summary", "metrics", "Avg_coverage"], s) for s in samples]
avg_depths = [x for x in avg_depths if x]
# Picking all thresholds up to the highest sample average depth
thresholds = [t for t in coverage.DEPTH_THRESHOLDS if not avg_depths or t <= max(avg_depths)]
# ...plus one more
if len(thresholds) < len(coverage.DEPTH_THRESHOLDS):
thresholds.append(coverage.DEPTH_THRESHOLDS[len(thresholds)])
# Showing only thresholds surrounding any of average depths
thresholds_hidden = []
for i, t in enumerate(thresholds):
if t > 20: # Not hiding anything below 20x
if any(thresholds[i-1] <= c < thresholds[i] for c in avg_depths if c and i-1 >= 0) or \
any(thresholds[i] <= c < thresholds[i+1] for c in avg_depths if c and i+1 < len(thresholds)):
pass
else:
thresholds_hidden.append(t)
# Hide coverage unless running full qualimap, downsampled inputs are confusing
if not any(("qualimap_full" in dd.get_tools_on(d)) for d in samples):
thresholds_hidden = thresholds + thresholds_hidden
thresholds_hidden.sort()
thresholds = []
out['qualimap_config'] = {
'general_stats_coverage': [str(t) for t in thresholds],
'general_stats_coverage_hidden': [str(t) for t in thresholds_hidden]}
# Avoid confusing peddy outputs, sticking to ancestry and sex prediction
out["table_columns_visible"]["Peddy"] = {"family_id": False, "sex_het_ratio": False,
"error_sex_check": False}
# Setting the module order
module_order = []
module_order.extend([
"bcbio",
"samtools",
"goleft_indexcov",
"peddy"
])
out['bcftools'] = {'write_separate_table': True}
# if germline calling was performed:
if any("germline" in (get_active_vcinfo(s) or {}) or # tumor-only somatic with germline extraction
dd.get_phenotype(s) == "germline" or # or paired somatic with germline calling for normal
_has_bcftools_germline_stats(s) # CWL organized statistics
for s in samples):
# Split somatic and germline variant stats into separate multiqc submodules,
# with somatic going into General Stats, and germline going into a separate table:
module_order.extend([{
'bcftools': {
'name': 'Bcftools (somatic)',
'info': 'Bcftools stats for somatic variant calls only.',
'path_filters': ['*_bcftools_stats.txt'],
'custom_config': {'write_general_stats': True},
}},
{'bcftools': {
'name': 'Bcftools (germline)',
'info': 'Bcftools stats for germline variant calls only.',
'path_filters': ['*_bcftools_stats_germline.txt'],
'custom_config': {'write_general_stats': False},
}},
])
else:
module_order.append("bcftools")
module_order.extend([
"salmon",
"star",
"picard",
"qualimap",
"snpeff",
"fastqc",
"preseq",
"bismark"
])
out["module_order"] = module_order
preseq_samples = [s for s in samples if tz.get_in(["config", "algorithm", "preseq"], s)]
if preseq_samples:
out["preseq"] = _make_preseq_multiqc_config(preseq_samples)
with open(out_file, "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return out_file
def _has_bcftools_germline_stats(data):
"""Check for the presence of a germline stats file, CWL compatible.
"""
stats_file = tz.get_in(["summary", "qc"], data)
if isinstance(stats_file, dict):
stats_file = tz.get_in(["variants", "base"], stats_file)
if not stats_file:
stats_file = ""
return stats_file.find("bcftools_stats_germline") > 0
def _check_multiqc_input(path):
"""Check if file exists, and return empty if it doesn't"""
if utils.file_exists(path):
return path
# ## report and coverage
def _is_good_file_for_multiqc(fpath):
"""Returns False if the file is binary or image."""
# Use mimetypes to exclude binary files where possible
(ftype, encoding) = mimetypes.guess_type(fpath)
if encoding is not None:
return False
if ftype is not None and ftype.startswith('image'):
return False
return True
def _parse_disambiguate(disambiguatestatsfilename):
"""Parse disambiguation stats from given file.
"""
disambig_stats = [0, 0, 0]
with open(disambiguatestatsfilename, "r") as in_handle:
for i, line in enumerate(in_handle):
fields = line.strip().split("\t")
if i == 0:
assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']
else:
disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])]
return disambig_stats
def _add_disambiguate(sample):
# check if disambiguation was run
if "disambiguate" in sample:
if utils.file_exists(sample["disambiguate"]["summary"]):
disambigStats = _parse_disambiguate(sample["disambiguate"]["summary"])
sample["summary"]["metrics"]["Disambiguated %s reads" % str(sample["genome_build"])] = disambigStats[0]
disambigGenome = (sample["config"]["algorithm"]["disambiguate"][0]
if isinstance(sample["config"]["algorithm"]["disambiguate"], (list, tuple))
else sample["config"]["algorithm"]["disambiguate"])
sample["summary"]["metrics"]["Disambiguated %s reads" % disambigGenome] = disambigStats[1]
sample["summary"]["metrics"]["Disambiguated ambiguous reads"] = disambigStats[2]
return sample
def _add_atac(sample):
atac_metrics = atac.calculate_encode_complexity_metrics(sample)
if not atac_metrics:
return sample
sample["summary"]["metrics"] = tz.merge(atac_metrics, sample["summary"]["metrics"])
return sample
def _fix_duplicated_rate(dt):
"""Get RNA duplicated rate if exists and replace by samtools metric"""
if "Duplication_Rate_of_Mapped" in dt:
dt["Duplicates_pct"] = 100.0 * dt["Duplication_Rate_of_Mapped"]
return dt
def _merge_metrics(samples, out_dir):
"""Merge metrics from multiple QC steps
"""
logger.info("summarize metrics")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics"))
sample_metrics = collections.defaultdict(dict)
for s in samples:
s = _add_disambiguate(s)
s = _add_atac(s)
m = tz.get_in(['summary', 'metrics'], s)
if isinstance(m, six.string_types):
m = json.loads(m)
if m:
for me in list(m.keys()):
if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple):
m.pop(me, None)
sample_metrics[dd.get_sample_name(s)].update(m)
out = []
for sample_name, m in sample_metrics.items():
sample_file = os.path.join(out_dir, "%s_bcbio.txt" % sample_name)
with file_transaction(samples[0], sample_file) as tx_out_file:
dt = pd.DataFrame(m, index=['1'])
dt.columns = [k.replace(" ", "_").replace("(", "").replace(")", "") for k in dt.columns]
dt['sample'] = sample_name
if m.get('rRNA_rate'):
dt['rRNA_rate'] = m.get('rRNA_rate')
if m.get("RiP"):
dt['RiP_pct'] = "%.3f" % (int(m.get("RiP")) / float(m.get("Total_reads", 1)) * 100)
dt = _fix_duplicated_rate(dt)
dt.transpose().to_csv(tx_out_file, sep="\t", header=False)
out.append(sample_file)
return out
def _merge_fastqc(samples):
"""
merge all fastqc samples into one by module
"""
fastqc_list = collections.defaultdict(list)
seen = set()
for data in samples:
name = dd.get_sample_name(data)
if name in seen:
continue
seen.add(name)
fns = glob.glob(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "fastqc") + "/*")
for fn in fns:
if fn.endswith("tsv"):
metric = os.path.basename(fn)
fastqc_list[metric].append([name, fn])
for metric in fastqc_list:
dt_by_sample = []
for fn in fastqc_list[metric]:
dt = pd.read_csv(fn[1], sep="\t")
dt['sample'] = fn[0]
dt_by_sample.append(dt)
dt = utils.rbind(dt_by_sample)
dt.to_csv(metric, sep="\t", index=False, mode ='w')
return samples
def _merge_preseq(samples):
metrics = [utils.get_in(s, ("summary", "metrics")) for s in samples]
real_counts_file = os.path.abspath(os.path.join("preseq_real_counts.txt"))
with file_transaction(samples[0], real_counts_file) as tx_out_file:
with open(tx_out_file, "w") as f:
for s, m in zip(samples, metrics):
line = dd.get_sample_name(s) + "\t" + str(m["Preseq_read_count"])
if m.get("Preseq_unique_count") is not None:
line += "\t" + str(m["Preseq_unique_count"])
line += "\n"
f.write(line)
samples[0]["summary"]["qc"]["preseq"]["secondary"] = [real_counts_file]
def _make_preseq_multiqc_config(samples):
metrics = [utils.get_in(s, ("summary", "metrics")) for s in samples]
out = {"read_length": float(np.median([m["Preseq_read_length"] for m in metrics]))}
genome_sizes = list(set(m["Preseq_genome_size"] for m in metrics))
if len(genome_sizes) == 1:
out["genome_size"] = genome_sizes[0]
return out
def _merge_target_information(samples, metrics_dir):
out_file = os.path.abspath(os.path.join(metrics_dir, "target_info.yaml"))
if utils.file_exists(out_file):
return samples
genomes = set(dd.get_genome_build(data) for data in samples)
coverage_beds = set(dd.get_coverage(data) for data in samples)
original_variant_regions = set(dd.get_variant_regions_orig(data) for data in samples)
data = samples[0]
info = {}
# Reporting in MultiQC only if the genome is the same across all samples
if len(genomes) == 1:
info["genome_info"] = {
"name": dd.get_genome_build(data),
"size": sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])]),
}
# Reporting in MultiQC only if the target is the same across all samples
vcr_orig = None
if len(original_variant_regions) == 1 and list(original_variant_regions)[0] is not None:
vcr_orig = list(original_variant_regions)[0]
vcr_clean = bedutils.clean_file(vcr_orig, data)
info["variants_regions_info"] = {
"bed": vcr_orig,
"size": sum(len(x) for x in pybedtools.BedTool(dd.get_variant_regions_merged(data))),
"regions": pybedtools.BedTool(vcr_clean).count(),
}
gene_num = annotate.count_genes(vcr_clean, data)
if gene_num is not None:
info["variants_regions_info"]["genes"] = gene_num
else:
info["variants_regions_info"] = {
"bed": "callable regions",
}
# Reporting in MultiQC only if the target is the same across samples
if len(coverage_beds) == 1:
cov_bed = list(coverage_beds)[0]
if cov_bed not in [None, "None"]:
if vcr_orig and vcr_orig == cov_bed:
info["coverage_bed_info"] = info["variants_regions_info"]
else:
clean_bed = bedutils.clean_file(cov_bed, data, prefix="cov-", simple=True)
info["coverage_bed_info"] = {
"bed": cov_bed,
"size": pybedtools.BedTool(cov_bed).total_coverage(),
"regions": pybedtools.BedTool(clean_bed).count(),
}
gene_num = annotate.count_genes(clean_bed, data)
if gene_num is not None:
info["coverage_bed_info"]["genes"] = gene_num
else:
info["coverage_bed_info"] = info["variants_regions_info"]
coverage_intervals = set(data["config"]["algorithm"]["coverage_interval"] for data in samples)
if len(coverage_intervals) == 1:
info["coverage_interval"] = list(coverage_intervals)[0]
if info:
with open(out_file, "w") as out_handle:
yaml.safe_dump(info, out_handle)
return samples
| mit |
GoogleCloudPlatform/ai-platform-samples | training/pytorch/structured/custom_containers/base/trainer/inputs.py | 1 | 5007 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the \"License\");
# you may not use this file except in compliance with the License.\n",
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from google.cloud import storage
import pandas as pd
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.utils.data import random_split
from torch.utils.data.sampler import SubsetRandomSampler
import metadata
class CSVDataset(Dataset):
def __init__(self, args, csv_files, transform=None):
"""
Args:
args: arguments passed to the python script
csv_files (list): Path to the list of csv files with annotations.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.dataframe = None
for csv_file in csv_files:
if self.dataframe is None:
self.dataframe = pd.read_csv(csv_file, header=0)
else:
self.dataframe = pd.concat(
[self.dataframe, pd.read_csv(csv_file, header=0)])
self.transform = transform
# Convert the categorical columns in place to a numerical category
# Example: Payment_Type =
# ['Credit Card' 'Cash' 'No Charge' 'Dispute' 'Unknown']
# Converted: Payment_Type = [0, 1, 2, 3, 4]
if args.embed_categorical_columns:
for category in metadata.CATEGORICAL_COLUMNS:
self.dataframe[category].replace(
{val: i for i, val in enumerate(
self.dataframe[category].unique())},
inplace=True)
def __len__(self):
return len(self.dataframe)
def __getitem__(self, idx):
# When retrieving an item from the dataset, get the features and the
# target. In this template, the target is 0th column and the features
# are all the other columns.
features = self.dataframe.iloc[idx, 1:].values
target = self.dataframe.iloc[idx, :1].values
# Load the data as a tensor
item = {'features': torch.from_numpy(features), 'target': target}
if self.transform:
item = self.transform(item)
return item
def load_data(args):
"""Loads the data into three different data loaders. (Train, Test, Evaluation)
Split the training dataset into a train / test dataset.
Args:
args: arguments passed to the python script
"""
train_dataset = CSVDataset(args, args.train_files)
eval_dataset = CSVDataset(args, args.eval_files)
# Determine the size of the dataset and the train/test sets
dataset_size = len(train_dataset)
test_size = int(args.test_split * dataset_size)
train_size = dataset_size - test_size
# Use random_split to get the split indices for the train/test set
train_dataset, test_dataset = random_split(train_dataset,
[train_size, test_size])
# Use the subset random sampler for the dataloader to know which
# parts of the dataset belong to the train/test set
# Note: use `tolist()` to convert the indices tensor to a list or
# enumerating over the DataLoader will fail.
train_sampler = SubsetRandomSampler(train_dataset.indices)
test_sampler = SubsetRandomSampler(test_dataset.indices)
# Create the data loaders with the train/test sets.
train_loader = DataLoader(
train_dataset.dataset,
batch_size=args.batch_size,
sampler=train_sampler)
test_loader = DataLoader(
test_dataset.dataset,
batch_size=args.batch_size,
sampler=test_sampler)
# Create data loader with the eval set
eval_loader = DataLoader(
eval_dataset,
batch_size=args.batch_size)
return train_loader, test_loader, eval_loader
def save_model(args):
"""Saves the model to Google Cloud Storage
Args:
args: contains name for saved model.
"""
scheme = 'gs://'
bucket_name = args.job_dir[len(scheme):].split('/')[0]
prefix = '{}{}/'.format(scheme, bucket_name)
bucket_path = args.job_dir[len(prefix):].rstrip('/')
datetime_ = datetime.datetime.now().strftime('model_%Y%m%d_%H%M%S')
if bucket_path:
model_path = '{}/{}/{}'.format(bucket_path, datetime_, args.model_name)
else:
model_path = '{}/{}'.format(datetime_, args.model_name)
bucket = storage.Client().bucket(bucket_name)
blob = bucket.blob(model_path)
blob.upload_from_filename(args.model_name)
| apache-2.0 |
dhruve/spark | python/pyspark/sql/context.py | 11 | 23848 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import IntegerType, Row, StringType
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext", "UDFRegistration"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession.builder.getOrCreate()
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=None):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is not None, return
defaultValue. If the key is not set and defaultValue is None, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return UDFRegistration(self)
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@ignore_unicode_prefix
@since(1.2)
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a python function (including lambda function) as a UDF
so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
:param name: name of the UDF
:param f: python function
:param returnType: a :class:`pyspark.sql.types.DataType` object
:return: a wrapped :class:`UserDefinedFunction`
>>> strlen = sqlContext.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlContext.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> sqlContext.sql("SELECT 'foo' AS text").select(strlen("text")).collect()
[Row(stringLengthString(text)=u'3')]
>>> from pyspark.sql.types import IntegerType
>>> _ = sqlContext.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> _ = sqlContext.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
"""
return self.sparkSession.catalog.registerFunction(name, f, returnType)
@ignore_unicode_prefix
@since(2.1)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""Register a java UDF so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not specified we would infer it via reflection.
:param name: name of the UDF
:param javaClassName: fully qualified name of java class
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerJavaFunction("javaStringLength",
... "test.org.apache.spark.sql.JavaStringLength", IntegerType())
>>> sqlContext.sql("SELECT javaStringLength('test')").collect()
[Row(UDF(test)=4)]
>>> sqlContext.registerJavaFunction("javaStringLength2",
... "test.org.apache.spark.sql.JavaStringLength")
>>> sqlContext.sql("SELECT javaStringLength2('test')").collect()
[Row(UDF(test)=4)]
"""
jdt = None
if returnType is not None:
jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json())
self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table or view as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(database=u'', tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
if jhiveContext is None:
sparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
class UDFRegistration(object):
"""Wrapper for user-defined function registration."""
def __init__(self, sqlContext):
self.sqlContext = sqlContext
def register(self, name, f, returnType=StringType()):
return self.sqlContext.registerFunction(name, f, returnType)
register.__doc__ = SQLContext.registerFunction.__doc__
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
astocko/agpy | agpy/__init__.py | 5 | 1642 | """
====
agpy
====
The functions included below are the 'mature' codes from the agpy package.
.. moduleauthor:: Adam Ginsburg <[email protected]>
"""
##from luminosity import luminosity
#import readcol as readcol_mod
from readcol import readcol
##from UCHIIfitter import HIIregion
import gaussfitter
from gaussfitter import moments,twodgaussian,gaussfit,onedgaussian,onedgaussfit
#import kdist
from kdist import kdist,vector_kdist
#from plfit import plfit
from reg_gal2cel import gal2cel
from posang import posang
#import densitymap
#import downsample as downsample_mod
from AG_image_tools import downsample,downsample_cube
#from asinh_norm import AsinhNorm
#import showspec # imports matplotlib = BAD
from contributed import parallel_map
from timer import print_timing
from region_photometry import region_photometry
from region_photometry_files import region_photometry_files
from PCA_tools import efuncs,pca_subtract,unpca_subtract,smooth_waterfall
import constants
import blackbody
import AG_fft_tools
from AG_fft_tools import *
import AG_image_tools
from AG_image_tools import *
import cutout
import get_cutouts
import pymc_plotting
import imf
import montage_wrapper as montage
from __version__ import __version__
# import all of the functions but not the modules...
__all__ = ['readcol', 'gaussfitter', 'kdist', 'reg_gal2cel', 'posang',
'densitymap', 'downsample', 'correlate2d', 'psds', 'convolve', 'radialprofile',
'constants','gal2cel', 'convolve', 'smooth', 'azimuthalAverage',
'azimuthalAverageBins', 'kdist', 'vector_kdist', 'moments', 'twodgaussian',
'gaussfit', 'onedgaussian', 'onedgaussfit']
| mit |
reedessick/populations | plot.py | 1 | 23650 | #!/usr/bin/python
usage = """plots.py [--options] data.pkl"""
description = """ulot/analyze the results from main.py """
author = "R. Essick"
import os
import pickle
import numpy as np
import analytics
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot as plt
from optparse import OptionParser
#=================================================
figwidth = 15
figheight = 8
axpos = [0.15, 0.15, 0.7, 0.8]
axpos1 = [0.15, 0.15, 0.35, 0.8]
axpos2 = [0.50, 0.15, 0.35, 0.8]
#=================================================
parser=OptionParser(usage=usage, description=description)
parser.add_option("-v", "--verbose", default=False, action="store_true")
parser.add_option("-g", "--grid", default=False, action="store_true")
parser.add_option("", "--individual-tau", default=False, action="store_true")
parser.add_option("", "--max-tau", default=np.infty, type="float")
parser.add_option("-t", "--tag", default="", type="string")
parser.add_option("-o", "--output-dir", default="./", type="string")
parser.add_option("", "--continuity-correction", default=False, action="store_true", help="apply a continuity correction in Gaussian approximation for scatter plots.")
opts, args = parser.parse_args()
#========================
if len(args) != 1:
raise ValueError("please supply exactly 1 argument")
datafilename = args[0]
if opts.continuity_correction:
opts.tag = "cc_%s"%opts.tag
if opts.tag:
opts.tag = "_%s"%opts.tag
if not os.path.exists(opts.output_dir):
os.makedirs(opts.output_dir)
#=================================================
if opts.verbose:
print "===================================================="
print " loading data from %s"%(datafilename)
print "===================================================="
file_obj = open(datafilename, "r")
params = pickle.load(file_obj)
data = pickle.load(file_obj)
file_obj.close()
Ndata = len(data)
### read off taus
### assumes that we have at least one trial
taus = np.array( sorted([key for key in data[0].keys() if isinstance(key, (int,float)) and key <= opts.max_tau]) )
### compute expected rates
dur = params["dur"]
rateS = params["rateS"]
rateA = params["rateA"]
rateB = params["rateB"]
#rateC = {}
#rateCp = {}
#rateCm = {}
#for tau in taus:
# rateC[tau] = rateS + 2*tau*(rateA*rateB + rateS*(rateA+rateB))
# rateCp[tau] = 2*tau*(rateA+rateS)*(rateB+rateS)
# rateCm[tau] = 2*tau*rateA*rateB*(1 - (1-np.exp(-2*tau*rateA)) - (1-np.exp(-2*tau*rateS)))*(1 - (1-np.exp(-2*tau*rateB)) - (1-np.exp(-2*tau*rateS)))
### number of slides
Rs = dict( (tau, data[0][tau]["slideDur"]/dur) for tau in taus )
#=================================================
### plot stuff
if opts.verbose:
print "===================================================="
print " generating plots"
print "===================================================="
#=======================
# basic sanity plots
#=======================
if opts.verbose:
print "\tbasic sanity plots"
figS = plt.figure(figsize=(figwidth,figheight))
ax1S = figS.add_axes(axpos1)
ax2S = figS.add_axes(axpos2)
figA = plt.figure(figsize=(figwidth,figheight))
ax1A = figA.add_axes(axpos1)
ax2A = figA.add_axes(axpos2)
figB = plt.figure(figsize=(figwidth,figheight))
ax1B = figB.add_axes(axpos1)
ax2B = figB.add_axes(axpos2)
nbins = max(Ndata/10, 1)
### plot
ax1S.hist([len(datum["S"]) for datum in data], nbins, histtype="step")
ax2S.hist([(len(datum["S"]) - analytics.d(rateS, dur))/analytics.d(rateS, dur)**0.5 for datum in data], nbins, histtype="step")
ax1A.hist([len(datum["A"]) for datum in data], nbins, histtype="step")
ax2A.hist([(len(datum["A"]) - analytics.d(rateA, dur))/analytics.d(rateA, dur)**0.5 for datum in data], nbins, histtype="step")
ax1B.hist([len(datum["B"]) for datum in data], nbins, histtype="step")
ax2B.hist([(len(datum["B"]) - analytics.d(rateB, dur))/analytics.d(rateB, dur)**0.5 for datum in data], nbins, histtype="step")
### label
ax1S.set_xlabel("No. S")
ax2S.set_xlabel("$z_S$")
ax1A.set_xlabel("No. A")
ax2A.set_xlabel("$z_A$")
ax1B.set_xlabel("No. B")
ax2B.set_xlabel("$z_B$")
ax1S.set_ylabel("count")
ax2S.set_ylabel("count")
ax2S.yaxis.tick_right()
ax2S.yaxis.set_label_position("right")
ax1A.set_ylabel("count")
ax2A.set_ylabel("count")
ax2A.yaxis.tick_right()
ax2A.yaxis.set_label_position("right")
ax1B.set_ylabel("count")
ax2B.set_ylabel("count")
ax2B.yaxis.tick_right()
ax2B.yaxis.set_label_position("right")
### decorate
ax1S.grid(opts.grid)
ax2S.grid(opts.grid)
ax1A.grid(opts.grid)
ax2A.grid(opts.grid)
ax1B.grid(opts.grid)
ax2B.grid(opts.grid)
### save
figname = "%s/S%s.png"%(opts.output_dir, opts.tag)
if opts.verbose:
print "\t\t", figname
figS.savefig(figname)
plt.close(figS)
figname = "%s/A%s.png"%(opts.output_dir, opts.tag)
if opts.verbose:
print "\t\t", figname
figA.savefig(figname)
plt.close(figA)
figname = "%s/B%s.png"%(opts.output_dir, opts.tag)
if opts.verbose:
print "\t\t", figname
figB.savefig(figname)
plt.close(figB)
#========================
# counts as a function of tau
#========================
if opts.verbose:
print "\tcounts as a function of tau"
figC = plt.figure() ### rate of zero-lag coincs
axC = figC.add_axes(axpos)
figCp = plt.figure() ### rate of slide-coincs with zero-lag
axCp = figCp.add_axes(axpos)
figCm = plt.figure() ### rate of slide-coincs wiout zero-lag
axCm = figCm.add_axes(axpos)
### plot observed data
for datum in data:
axC.plot( taus, [datum[tau]["num_C"]/dur for tau in taus], '.-')
axCp.plot( taus, [datum[tau]["num_Cp"]/datum[tau]["slideDur"] for tau in taus], '.-' )
axCm.plot( taus, [datum[tau]["num_Cm"]/datum[tau]["slideDur"] for tau in taus], '.-' )
### plot expected values as dashed line
fine_taus = np.linspace(taus[0], taus[-1], 1001)
#axC.plot( fine_taus, rateS + 2*fine_taus*rateA*rateB, color='k', linestyle=":")
#axC.plot( fine_taus, rateS + 2*fine_taus*(rateA*rateB + rateS*rateB + rateA*rateS), color='k', linestyle="--")
axC.plot( fine_taus, analytics.Nc(rateA, rateB, rateS, fine_taus), color='k', linestyle="--")
#axCp.plot( fine_taus, 2*fine_taus*(rateA+rateS)*(rateB+rateS), color='k', linestyle="--")
axCp.plot( fine_taus, analytics.Np(rateA, rateB, rateS, fine_taus), color='k', linestyle="--")
#rate_accident = 2*fine_taus*rateA*rateB
#axCm.plot( fine_taus, 2*fine_taus*(rateA-rate_accident)*(rateB-rate_accident), color='k', linestyle="--")
axCm.plot( fine_taus, analytics.Nm(rateA, rateB, rateS, fine_taus), color='k', linestyle="--")
### label
axC.set_xlabel("$\\tau$")
axC.set_ylabel("rate of zero-lag coincidences")
axCp.set_xlabel("$\\tau$")
axCp.set_ylabel("rate of coincs in slides with zero-lag coincs")
axCm.set_xlabel("$\\tau$")
axCm.set_ylabel("rate of coincs in slides without zero-lag coincs")
### decorate
axC.grid(opts.grid)
axCp.grid(opts.grid)
axCm.grid(opts.grid)
### save
figname = "%s/C-tau%s.png"%(opts.output_dir, opts.tag)
if opts.verbose:
print "\t\t", figname
figC.savefig(figname)
plt.close(figC)
figname = "%s/Cp-tau%s.png"%(opts.output_dir, opts.tag)
if opts.verbose:
print "\t\t", figname
figCp.savefig(figname)
plt.close(figCp)
figname = "%s/Cm-tau%s.png"%(opts.output_dir, opts.tag)
if opts.verbose:
print "\t\t", figname
figCm.savefig(figname)
plt.close(figCm)
#========================
# averages
#========================
if opts.verbose:
print "\taverage counts as a function of tau"
figC = plt.figure() ### rate of zero-lag coincs
#axC = figC.add_axes(axpos1)
#axCv = figC.add_axes(axpos2)
axC = figC.add_axes(axpos)
axCv = axC.twinx()
figCp = plt.figure() ### rate of slide-coincs with zero-lag
#axCp = figCp.add_axes(axpos1)
#axCpv = figCp.add_axes(axpos2)
axCp = figCp.add_axes(axpos)
axCpv = axCp.twinx()
figCm = plt.figure() ### rate of slide-coincs wiout zero-lag
#axCm = figCm.add_axes(axpos1)
#axCmv = figCm.add_axes(axpos2)
axCm = figCm.add_axes(axpos)
axCmv = axCm.twinx()
### plot observed data
means = np.array( [np.mean([datum[tau]["num_C"]/dur for datum in data]) for tau in taus] )
#print np.polyfit(taus, means, 0)[-1]
stdvs = np.array( [np.std([datum[tau]["num_C"]/dur for datum in data]) for tau in taus] )
#print np.polyfit(taus, stdvs/taus, 3)
axC.plot( taus, means, ".-", color="b")
axC.fill_between( taus, means-stdvs, means+stdvs, color="b", alpha=0.25)
axCv.plot( taus, stdvs, ".-", color="r")
means = np.array( [np.mean([datum[tau]["num_Cp"]/datum[tau]["slideDur"] for datum in data]) for tau in taus] )
#print np.polyfit(taus, means/taus, 0)[-1]
stdvs = np.array( [np.std([datum[tau]["num_Cp"]/datum[tau]["slideDur"] for datum in data]) for tau in taus] )
#print np.polyfit(taus, stdvs/taus, 3)
axCp.plot( taus, means, ".-", color="b")
axCp.fill_between( taus, means-stdvs, means+stdvs, color="b", alpha=0.25)
axCpv.plot( taus, stdvs, ".-", color="r")
means = np.array( [np.mean([datum[tau]["num_Cm"]/datum[tau]["slideDur"] for datum in data]) for tau in taus] )
#print np.polyfit(taus, means/taus, 3)[-1]
stdvs = np.array( [np.std([datum[tau]["num_Cm"]/datum[tau]["slideDur"] for datum in data]) for tau in taus] )
#print np.polyfit(taus, stdvs/taus, 3)
axCm.plot( taus, means, ".-", color="b")
axCm.fill_between( taus, means-stdvs, means+stdvs, color="b", alpha=0.25)
axCmv.plot( taus, stdvs, ".-", color="r")
### plot expected values as dashed line
fine_taus = np.linspace(taus[0], taus[-1], 1001)
ylim = axC.get_ylim()
axC.plot( fine_taus, analytics.Nc(rateA, rateB, rateS, fine_taus), color='k', linestyle="--")
axC.plot( fine_taus, analytics.Nc(rateA, rateB, 0.0, fine_taus), color='grey', linestyle=':')
axC.set_ylim(ylim)
ylim = axCv.get_ylim()
var = analytics.Vcc(rateA, rateB, rateS, fine_taus)/dur
axCv.plot( fine_taus, var**0.5, color='k', linestyle='-.')
axCv.set_ylim(ylim)
ylim = axCp.get_ylim()
axCp.plot( fine_taus, analytics.Np(rateA, rateB, rateS, fine_taus), color='k', linestyle="--")
axCp.plot( fine_taus, analytics.Np(rateA, rateB, 0.0, fine_taus), color='grey', linestyle=':')
axCp.set_ylim(ylim)
ylim = axCpv.get_ylim()
var = np.array( [analytics.Vpp(rateA, rateB, rateS, tau, T=1.0, R=data[0][tau]["slideDur"]/dur)/(dur*(data[0][tau]["slideDur"]/dur)**2) for tau in taus] )
axCpv.plot( taus, var**0.5, color='k', linestyle="-.")
axCpv.set_ylim(ylim)
ylim = axCm.get_ylim()
axCm.plot( fine_taus, analytics.Nm(rateA, rateB, rateS, fine_taus), color='k', linestyle='--')
axCm.plot( fine_taus, analytics.Nm(rateA, rateB, 0.0, fine_taus), color='grey', linestyle=':')
axCm.set_ylim(ylim)
ylim = axCm.get_ylim()
var = np.array( [analytics.Vmm(rateA, rateB, rateS, tau, T=1.0, R=data[0][tau]["slideDur"]/dur)/(dur*(data[0][tau]["slideDur"]/dur)**2) for tau in taus] )
axCmv.plot( taus, var**0.5, color='k', linestyle="-.")
axCm.set_ylim(ylim)
### label
axC.set_xlabel("$\\tau$")
axC.set_ylabel("$\lambda_C$", color='b')
#axCv.set_xlabel("$\\tau$")
axCv.set_ylabel("$\sigma_{\lambda_C}$", color='r')
axCv.yaxis.tick_right()
axCv.yaxis.set_label_position("right")
axCp.set_xlabel("$\\tau$")
axCp.set_ylabel("$\lambda_+$", color='b')
#axCpv.set_xlabel("$\\tau$")
axCpv.set_ylabel("$\sigma_{\lambda_+}$", color='r')
axCpv.yaxis.tick_right()
axCpv.yaxis.set_label_position("right")
axCm.set_xlabel("$\\tau$")
axCm.set_ylabel("$\lambda_-$", color='b')
#axCmv.set_xlabel("$\\tau$")
axCmv.set_ylabel("$\sigma_{\lambda_-}$", color='r')
axCmv.yaxis.tick_right()
axCmv.yaxis.set_label_position("right")
### decorate
axC.grid(opts.grid)
axCp.grid(opts.grid)
axCm.grid(opts.grid)
axCv.grid(opts.grid)
axCpv.grid(opts.grid)
axCmv.grid(opts.grid)
#axC.set_yscale('log')
#axC.set_xscale('log')
#axCv.set_yscale('log')
#axCv.set_xscale('log')
#axCp.set_yscale('log')
#axCp.set_xscale('log')
#axCpv.set_yscale('log')
#axCpv.set_xscale('log')
#axCm.set_yscale('log')
#axCm.set_ylim(ymin=2*1e-3*0.1*0.1, ymax=0.020)
#axCm.set_xscale('log')
#axCmv.set_yscale('log')
#axCmv.set_xscale('log')
### save
figname = "%s/aveC-tau%s.png"%(opts.output_dir, opts.tag)
if opts.verbose:
print "\t\t", figname
figC.savefig(figname)
plt.close(figC)
figname = "%s/aveCp-tau%s.png"%(opts.output_dir, opts.tag)
if opts.verbose:
print "\t\t", figname
figCp.savefig(figname)
plt.close(figCp)
figname = "%s/aveCm-tau%s.png"%(opts.output_dir, opts.tag)
if opts.verbose:
print "\t\t", figname
figCm.savefig(figname)
plt.close(figCm)
#========================
# scatter plots
#========================
if opts.verbose:
print "\tscatter plots"
extractors = [ ("d_A", lambda x, tau: len(x["A"])+len(x["S"])),\
("d_B", lambda x, tau: len(x["B"])+len(x["S"])),\
("N_c",lambda x, tau: x[tau]["num_C"]),\
("N_+",lambda x, tau: x[tau]["num_Cp"]),\
("N_-",lambda x, tau: x[tau]["num_Cm"])\
]
means = { \
"d_A": lambda rA, rB, rS, t, T, R: analytics.dA(rA, rS, T=T),\
"d_B": lambda rA, rB, rS, t, T, R: analytics.dB(rB, rS, T=T),\
"N_c":lambda rA, rB, rS, t, T, R: analytics.Nc(rA, rB, rS, t, T=T),\
"N_+":lambda rA, rB, rS, t, T, R: analytics.Np(rA, rB, rS, t, T=T, R=R),\
"N_-":lambda rA, rB, rS, t, T, R: analytics.Nm(rA, rB, rS, t, T=T, R=R)\
}
variances = { \
"d_A": lambda rA, rB, rS, t, T, R: analytics.Vaa(rA, rS, T=T),\
"d_B": lambda rA, rB, rS, t, T, R: analytics.Vbb(rB, rS, T=T),\
"N_c":lambda rA, rB, rS, t, T, R: analytics.Vcc(rA, rB, rS, t, T=T),\
"N_+":lambda rA, rB, rS, t, T, R: analytics.Vpp(rA, rB, rS, t, T=T, R=R),\
"N_-":lambda rA, rB, rS, t, T, R: analytics.Vmm(rA, rB, rS, t, T=T, R=R)\
}
covariances = { \
("d_A","d_B") : lambda rA, rB, rS, t, T, R: analytics.Vab(rS, T=T),\
("d_A","N_c") : lambda rA, rB, rS, t, T, R: analytics.Vac(rA, rB, rS, t, T=T),\
("d_A","N_+") : lambda rA, rB, rS, t, T, R: analytics.Vap(rA, rB, rS, t, T=T, R=R),\
("d_A","N_-") : lambda rA, rB, rS, t, T, R: analytics.Vam(rA, rB, rS, t, T=T, R=R),\
("d_B","N_c") : lambda rA, rB, rS, t, T, R: analytics.Vbc(rA, rB, rS, t, T=T),\
("d_B","N_+") : lambda rA, rB, rS, t, T, R: analytics.Vbp(rA, rB, rS, t, T=T, R=R),\
("d_B","N_-") : lambda rA, rB, rS, t, T, R: analytics.Vbm(rA, rB, rS, t, T=T, R=R),\
("N_c","N_+"): lambda rA, rB, rS, t, T, R: analytics.Vcp(rA, rB, rS, t, T=T, R=R),\
("N_c","N_-"): lambda rA, rB, rS, t, T, R: analytics.Vcm(rA, rB, rS, t, T=T, R=R),\
("N_+","N_-"): lambda rA, rB, rS, t, T, R: analytics.Vpm(rA, rB, rS, t, T=T, R=R) \
}
for tau in taus:
R = Rs[tau] ### number of slides
fig = plt.figure(figsize=(10, 10))
for i, (l, e) in enumerate(extractors[:-1]):
d = np.array([e(datum, tau) for datum in data])
# compute expected means and correlations
m = means[l](rateA, rateB, rateS, tau, dur, R)
v = variances[l](rateA, rateB, rateS, tau, dur, R)
set_label = True
for I, (L, E) in enumerate(extractors[i+1:]):
D = np.array([E(datum, tau) for datum in data])
# compute expected means and correlations
M = means[L](rateA, rateB, rateS, tau, dur, R)
V = variances[L](rateA, rateB, rateS, tau, dur, R)
c = covariances[(l,L)](rateA, rateB, rateS, tau, dur, R)
p = c/(V*v)**0.5 ### correlation coefficient
#===========================================================================
### generate single figure with projected histograms
#===========================================================================
fig1 = plt.figure()
axa = fig1.add_axes([0.12, 0.1, 0.6, 0.6])
axD = fig1.add_axes([0.12, 0.7, 0.6, 0.25])
axd = fig1.add_axes([0.72, 0.1, 0.25, 0.6])
axa.plot(D, d, marker='o', markeredgecolor='none', markerfacecolor='k', markersize=2, linestyle='none', alpha=0.5)
### add contours
npts = 1001
xlim = axa.get_xlim()
xlim = np.max(np.abs(np.array(xlim)-M))
xlim = (M-xlim, 0.999*(M+xlim))
ylim = axa.get_ylim()
ylim = np.max(np.abs(np.array(ylim)-m))
ylim = (m-ylim, 0.999*(m+ylim))
X, Y = np.meshgrid( np.linspace(xlim[0], xlim[1], npts), np.linspace(ylim[0], ylim[1], npts) )
if opts.continuity_correction:
Z = (2*np.pi*(V*v-c**2))**-0.5 * np.exp( -0.5*( (X-M+0.5)**2/V + (Y-m+0.5)**2/v - 2*c*(X-M+0.5)*(Y-m+0.5)/(V*v) )/(1-p**2) )
else:
Z = (2*np.pi*(V*v-c**2))**-0.5 * np.exp( -0.5*( (X-M)**2/V + (Y-m)**2/v - 2*c*(X-M)*(Y-m)/(V*v) )/(1-p**2) )
axa.contour(X, Y, Z)
### plot projected histograms
nbins = min(max(len(D)/10, 5), xlim[1]-xlim[0])
N, _, _ = axD.hist( D, nbins, histtype="step", color='k', normed=True)
x = np.linspace(xlim[0], xlim[1], npts)
if opts.continuity_correction:
z = (2*np.pi*V)**-0.5 * np.exp( -0.5*(x-M+0.5)**2/V )
else:
z = (2*np.pi*V)**-0.5 * np.exp( -0.5*(x-M)**2/V )
axD.plot( x, z, color='b' )
nbins = min(max(len(d)/10, 5), ylim[1]-ylim[0])
n, _, _ = axd.hist( d, nbins, histtype="step", color='k', normed=True, orientation='horizontal')
y = np.linspace(ylim[0], ylim[1], npts)
if opts.continuity_correction:
z = (2*np.pi*v)**-0.5 * np.exp( -0.5*(y-m+0.5)**2/v )
else:
z = (2*np.pi*v)**-0.5 * np.exp( -0.5*(y-m)**2/v )
axd.plot( z, y, color='b' )
axa.set_xlabel("$%s$"%L)
axa.set_ylabel("$%s$"%l)
axD.set_ylabel("$p(%s)$"%L)
plt.setp(axD.get_xticklabels(), visible=False)
axd.set_xlabel("$p(%s)$"%l)
plt.setp(axd.get_yticklabels(), visible=False)
axa.set_xlim(xlim)
axa.set_ylim(ylim)
axD.set_xlim(xlim)
axD.set_ylim(ymin=0, ymax=1.1*max(N))
axd.set_ylim(ylim)
axd.set_xlim(xmin=0, xmax=1.1*max(n))
axa.grid(opts.grid)
axD.grid(opts.grid)
axd.grid(opts.grid)
figname = "%s/scatter_%s-%s-tau=%.5f%s.png"%(opts.output_dir, l, L, tau, opts.tag)
if opts.verbose:
print "\t\t", figname
fig1.savefig(figname)
plt.close(fig1)
#===========================================================================
### add to "corner figure
#===========================================================================
ax = plt.subplot(4, 4, 5*i+I+1)
# ax.plot(D-M, d-m, marker='o', markeredgecolor='k', markerfacecolor='none', markersize=2, linestyle='none')
ax.plot(D-M, d-m, marker='o', markeredgecolor='none', markerfacecolor='k', markersize=2, linestyle='none', alpha=0.5)
### set up contour sample points
npts = 1001
xlim = np.max(np.abs(ax.get_xlim()))
ylim = np.max(np.abs(ax.get_ylim()))
X, Y = np.meshgrid( np.linspace(-xlim, xlim, npts), np.linspace(-ylim, ylim, npts) )
### compute gaussian distribution
if opts.continuity_correction: ### apply a continuity correction derived for binomial distributions to the Gaussian approximations. Z(X,Y) -> Z(X+0.5,Y+0.5)
Z = (2*np.pi*(V*v-c**2))**-0.5 * np.exp( -0.5*( (X+0.5)**2/V + (Y+0.5)**2/v - 2*c*(X+0.5)*(Y+0.5)/(V*v) )/(1-p**2) )
else:
Z = (2*np.pi*(V*v-c**2))**-0.5 * np.exp( -0.5*( X**2/V + Y**2/v - 2*c*X*Y/(V*v) )/(1-p**2) )
### plot contours for gaussian distribution
ax.contour(X, Y, Z)
### decorate
if set_label:
ax.set_xlabel("$%s - \left<%s\\right>$"%(L,L))
else:
plt.setp(ax.xaxis.get_ticklabels(), visible=False)
plt.setp(ax.yaxis.get_ticklabels(), visible=False)
set_label = False
ax.grid(opts.grid)
ax.set_xlim(-xlim, xlim)
ax.set_ylim(-ylim, ylim)
ax.set_ylabel("$%s - \left<%s\\right>$"%(l,l))
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
plt.subplots_adjust(left=0.1, right=0.9, bottom=0.1, top=0.9, wspace=0.05, hspace=0.05)
figname = "%s/scatter-tau=%.5f%s.png"%(opts.output_dir, tau, opts.tag)
if opts.verbose:
print "\n\t\t", figname, "\n"
fig.savefig(figname)
plt.close(fig)
#========================
# plots separately for each tau
#========================
if opts.individual_tau:
if opts.verbose:
print "\tplots for each tau separately"
### iterating over tau
for tauNo, tau in enumerate(taus):
if opts.verbose:
print "\t============================================"
print "\t tau %d / %d -> %.3f"%(tauNo+1, len(taus), tau)
print "\t============================================"
### pull out expected rates
rC = rateC[tau]
rCp = rateCp[tau]
rCm = rateCm[tau]
### HISTOGRAMS
if opts.verbose:
print "\tHistograms"
nbins = max(Ndata/10, 1)
### define axes
figC = plt.figure(figsize=(figwidth,figheight))
ax1C = figC.add_axes(axpos1)
ax2C = figC.add_axes(axpos2)
figCp = plt.figure(figsize=(figwidth,figheight))
ax1Cp = figCp.add_axes(axpos1)
ax2Cp = figCp.add_axes(axpos2)
figCm = plt.figure(figsize=(figwidth,figheight))
ax1Cm = figCm.add_axes(axpos1)
ax2Cm = figCm.add_axes(axpos2)
### histogram
ax1C.hist([datum[tau]["num_C"] for datum in data], nbins, histtype="step")
ax2C.hist([(datum[tau]["num_C"] - rC*dur)/(rC*dur)**0.5 for datum in data], nbins, histtype="step")
ax1Cp.hist([datum[tau]["num_Cp"] for datum in data], nbins, histtype="step")
ax2Cp.hist([(datum[tau]["num_Cp"] - rCp*datum[tau]["slideDur"])/(rCp*datum[tau]["slideDur"])**0.5 for datum in data], nbins, histtype="step")
ax1Cm.hist([datum[tau]["num_Cm"] for datum in data], nbins, histtype="step")
ax2Cm.hist([(datum[tau]["num_Cm"] - rCm*datum[tau]["slideDur"])/(rCm*datum[tau]["slideDur"])**0.5 for datum in data], nbins, histtype="step")
### label
ax1C.set_xlabel("No. C")
ax2C.set_xlabel("$z_C$")
ax1Cp.set_xlabel("No. C$_+$")
ax2Cp.set_xlabel("$z_{C_+}$")
ax1Cm.set_xlabel("No. C$_-$")
ax2Cm.set_xlabel("$z_{C_-}$")
ax1C.set_ylabel("count")
ax2C.set_ylabel("count")
ax2C.yaxis.tick_right()
ax2C.yaxis.set_label_position("right")
ax1Cp.set_ylabel("count")
ax2Cp.set_ylabel("count")
ax2Cp.yaxis.tick_right()
ax2Cp.yaxis.set_label_position("right")
ax1Cm.set_ylabel("count")
ax2Cm.set_ylabel("count")
ax2Cm.yaxis.tick_right()
ax2Cm.yaxis.set_label_position("right")
### decorate
ax1C.grid(opts.grid)
ax2C.grid(opts.grid)
ax1Cp.grid(opts.grid)
ax2Cp.grid(opts.grid)
ax1Cm.grid(opts.grid)
ax2Cm.grid(opts.grid)
### save
figname = "%s/C-tau_%.5f%s.png"%(opts.output_dir, tau, opts.tag)
if opts.verbose:
print "\t\t", figname
figC.savefig(figname)
plt.close(figC)
figname = "%s/Cp-tau_%.5f%s.png"%(opts.output_dir, tau, opts.tag)
if opts.verbose:
print "\t\t", figname
figCp.savefig(figname)
plt.close(figCp)
figname = "%s/Cm-tau_%.5f%s.png"%(opts.output_dir, tau, opts.tag)
if opts.verbose:
print "\t\t", figname
figCm.savefig(figname)
plt.close(figCm)
### SCATTER PLOTS
if opts.verbose:
print "\tScatter Plots"
keys = "num_C num_Cp num_Cm".split()
### against individual streams
for key1 in "S A B".split():
val1 = [len(datum[key1]) for datum in data]
for key2 in keys:
val2 = [datum[tau][key2] for datum in data]
fig = plt.figure()
ax = fig.add_axes(axpos)
### plot
ax.plot(val1, val2, marker="o", linestyle="none")
### label
ax.set_xlabel(key1)
ax.set_ylabel("%s tau=%.5f"%(key2, tau))
### decorate
ax.grid(opts.grid)
### save
figname = "%s/%s-%s_tau-%.5f%s.png"%(opts.output_dir, key1, key2, tau, opts.tag)
if opts.verbose:
print "\t\t", figname
fig.savefig(figname)
plt.close(fig)
### pairs of coincs
for ind1, key1 in enumerate(keys[:-1]):
val1 = [datum[tau][key1] for datum in data]
for key2 in keys[ind1+1:]:
val2 = [datum[tau][key2] for datum in data]
fig = plt.figure()
ax = fig.add_axes(axpos)
### plot
ax.plot(val1, val2, marker="o", markerfacecolor="none", linestyle="none")
### label
ax.set_xlabel("%s tau=%.5f"%(key1, tau))
ax.set_ylabel("%s tau=%.5f"%(key2, tau))
### decorate
ax.grid(opts.grid)
### save
figname = "%s/%s_tau-%.5f-%s_tau-%.5f%s.png"%(opts.output_dir, key1, tau, key2, tau, opts.tag)
if opts.verbose:
print "\t\t", figname
fig.savefig(figname)
| gpl-2.0 |
erramuzpe/C-PAC | CPAC/utils/create_fsl_model.py | 3 | 62821 | import os
import sys
import numpy as np
import csv
import yaml
def create_pheno_dict(gpa_fsl_yml):
def read_phenotypic(pheno_file, ev_selections, subject_id_label):
import csv
import numpy as np
ph = pheno_file
# Read in the phenotypic CSV file into a dictionary named pheno_dict
# while preserving the header fields as they correspond to the data
p_reader = csv.DictReader(open(os.path.abspath(ph), 'rU'), skipinitialspace=True)
#pheno_dict_list = []
# dictionary to store the data in a format Patsy can use
# i.e. a dictionary where each header is a key, and the value is a
# list of all of that header's values
pheno_data_dict = {}
for line in p_reader:
# here, each instance of 'line' is really a dictionary where the
# keys are the pheno headers, and their values are the values of
# each EV for that one subject - each iteration of this loop is
# one subject
for val in line.values():
# if there are any blank values in the pheno row, skip this
# row. if not, continue on with the "else" clause
if val == "":
break
else:
for key in line.keys():
# if there are blank entries because of an empty row in
# the CSV (such as ",,,,,"), move on to the next entry
if len(line[key]) == 0:
continue
if key not in pheno_data_dict.keys():
pheno_data_dict[key] = []
# create a list within one of the dictionary values for that
# EV if it is categorical; formats this list into a form
# Patsy can understand regarding categoricals:
# example: { ADHD: ['adhd1', 'adhd1', 'adhd0', 'adhd1'] }
# instead of just [1, 1, 0, 1], etc.
if 'categorical' in ev_selections.keys():
if key in ev_selections['categorical']:
pheno_data_dict[key].append(key + str(line[key]))
elif key == subject_id_label:
pheno_data_dict[key].append(line[key])
else:
pheno_data_dict[key].append(float(line[key]))
elif key == subject_id_label:
pheno_data_dict[key].append(line[key])
else:
pheno_data_dict[key].append(float(line[key]))
# this needs to run after each list in each key has been fully
# populated above
for key in pheno_data_dict.keys():
# demean the EVs marked for demeaning
if 'demean' in ev_selections.keys():
if key in ev_selections['demean']:
new_demeaned_evs = []
mean_evs = 0.0
# populate a dictionary, a key for each demeanable EV, with
# the value being the sum of all the values (which need to be
# converted to float first)
for val in pheno_data_dict[key]:
mean_evs += float(val)
# calculate the mean of the current EV in this loop
mean_evs = mean_evs / len(pheno_data_dict[key])
# remove the EV's mean from each value of this EV
# (demean it!)
for val in pheno_data_dict[key]:
new_demeaned_evs.append(float(val) - mean_evs)
# replace
pheno_data_dict[key] = new_demeaned_evs
# converts non-categorical EV lists into NumPy arrays
# so that Patsy may read them in properly
if 'categorical' in ev_selections.keys():
if key not in ev_selections['categorical']:
pheno_data_dict[key] = np.array(pheno_data_dict[key])
return pheno_data_dict
# pheno_data_dict gets loaded with the phenotypic data, in a dictionary
# formatted for proper use with Patsy
pheno_data_dict = read_phenotypic(gpa_fsl_yml.pheno_file, gpa_fsl_yml.ev_selections, gpa_fsl_yml.subject_id_label)
return pheno_data_dict
def check_multicollinearity(matrix):
U, s, V = np.linalg.svd(matrix)
max_singular = np.max(s)
min_singular = np.min(s)
print "Max singular: ", max_singular
print "Min singular: ", min_singular
print "Rank: ", np.linalg.matrix_rank(matrix), "\n"
if min_singular == 0:
return 1
else:
condition_number = float(max_singular)/float(min_singular)
print "Condition number: %f\n\n" % condition_number
if condition_number > 30:
return 1
return 0
def create_mat_file(data, col_names, model_name, current_output, output_dir):
"""
create the .mat file
inputs:
data = NumPy matrix of the design matrix
col_names = a list of strings with the design matrix header labels
model_name = a string containing the name of the group analysis model,
this is entered by the user either in the GUI or the
group analysis yaml config file
output_dir = output directory for group analysis outputs, also set by the
user
output:
writes the .mat file to disk for FLAMEO's use later
"""
dimx = None
dimy = None
if len(data.shape) == 1:
dimy = 1
dimx = data.shape[0]
else:
dimx, dimy = data.shape
ppstring = '/PPheights'
for i in range(0, dimy):
ppstring += '\t' + '%1.5e' %(1.0)
ppstring += '\n'
f = open(os.path.join(output_dir, "model_files", current_output, model_name + '.mat'), 'w')
print >>f, '/NumWaves\t%d' %dimy
print >>f, '/NumPoints\t%d' %dimx
print >>f, ppstring
# print labels for the columns - mainly for double-checking your model
col_string = '\n'
for col in col_names:
col_string = col_string + col + '\t'
print >>f, col_string, '\n'
print >>f, '/Matrix'
np.savetxt(f, data, fmt='%1.5e', delimiter='\t')
f.close()
def test_create_mat_file():
"""
unit test for create_mat_file()
"""
"""
TO-DO:
- find a better way to handle picking the output directory
- decide if all of the inputs should be hand-specified like below, or
if they should be taken from the outputs of other unit tests?
- can unit tests have multiple asserts? if so, run an assert for
whether or not the output file successfully opens ("open_mat" below)
"""
import os
import numpy as np
# set test inputs
sub1 = [1.001, 2.001, 3.001, 4.001]
sub2 = [1.002, 2.002, 3.002, 4.002]
sub3 = [1.003, 2.003, 3.003, 4.003]
sub4 = [1.004, 2.004, 3.004, 4.004]
data = [sub1,sub2,sub3,sub4]
data = np.array(data, dtype=np.float16)
col_names = ["EV_1","EV_2","EV_3","EV_4"]
current_dir = os.getcwd()
current_output = "Test_Output"
model_name = "Test_Model"
output_dir = os.path.join(current_dir, "test_model_output")
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# set test output
correct_output_lines = ['/NumWaves\t4\n', '/NumPoints\t4\n', \
'/PPheights\t1.00000e+00\t1.00000e+00\t1.00000e+00\t1.00000e+00\n', \
'\n', '\n', 'EV_1\tEV_2\tEV_3\tEV_4\n', '\n', '/Matrix\n', \
'1.00100e+00\t2.00100e+00\t3.00100e+00\t4.00100e+00\n',
'1.00200e+00\t2.00200e+00\t3.00200e+00\t4.00200e+00\n',
'1.00300e+00\t2.00300e+00\t3.00300e+00\t4.00300e+00\n',
'1.00400e+00\t2.00400e+00\t3.00400e+00\t4.00400e+00\n']
# run the function
create_mat_file(data, col_names, model_name, current_output, output_dir)
# open the file
open_mat = open(os.path.join(output_dir, "model_files", current_output, model_name + ".mat"),"rb")
output_mat = open_mat.readlines()
err_count = 0
for line, correct_line in zip(output_mat, correct_output_lines):
if line != correct_line:
err_count += 1
assert err_count == 0
def create_grp_file(data, model_name, gp_var, current_output, output_dir):
"""
create the grp file
"""
dimx = None
dimy = None
if len(data.shape) == 1:
dimy = 1
dimx = data.shape[0]
else:
dimx, dimy = data.shape
data = np.ones(dimx)
if not (gp_var == None):
i = 1
for key in sorted(gp_var.keys()):
for index in gp_var[key]:
data[index] = i
i += 1
f = open(os.path.join(output_dir, "model_files", current_output, model_name + '.grp'), 'w')
print >>f, '/NumWaves\t1'
print >>f, '/NumPoints\t%d\n' %dimx
print >>f, '/Matrix'
np.savetxt(f, data, fmt='%d', delimiter='\t')
f.close()
def test_create_grp_file():
"""
unit test for create_grp_file()
"""
"""
TO-DO:
- find a better way to handle picking the output directory
- decide if all of the inputs should be hand-specified like below, or
if they should be taken from the outputs of other unit tests?
- can unit tests have multiple asserts? if so, run an assert for
whether or not the output file successfully opens ("open_grp" below)
"""
import os
import numpy as np
# set test inputs
sub1 = [1.001, 2.001, 3.001, 4.001]
sub2 = [1.002, 2.002, 3.002, 4.002]
sub3 = [1.003, 2.003, 3.003, 4.003]
sub4 = [1.004, 2.004, 3.004, 4.004]
data = [sub1,sub2,sub3,sub4]
data = np.array(data, dtype=np.float16)
current_dir = os.getcwd()
model_name = "Test_Model"
current_output = "Test_Output"
output_dir = os.path.join(current_dir, "test_model_output")
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
# set test output
correct_output_lines = ['/NumWaves\t1\n', '/NumPoints\t4\n', '\n', \
'/Matrix\n', '1\n', '1\n', '1\n', '1']
# run the function
create_grp_file(data, model_name, gp_var, current_output, output_dir)
# open the file and check it
open_grp = open(os.path.join(output_dir, "model_files", current_output, model_name + ".grp"),"rb")
output_grp = open_grp.readlines()
err_count = 0
for line, correct_line in zip(output_grp, correct_output_lines):
if line != correct_line:
err_count += 1
assert err_count == 0
# OLDER CODE to facilitate creation of .con and .fts file via user-provided
# contrasts matrix
def create_con_ftst_file(con_file, model_name, current_output, outputModelFilesDirectory, column_names, coding_scheme, group_sep):
"""
Create the contrasts and fts file
"""
evs = open(con_file, 'r').readline()
evs = evs.rstrip('\r\n').split(',')
count_ftests = 0
# remove "Contrasts" label and replace it with "Intercept"
evs[0] = "Intercept"
fTest = False
for ev in evs:
if "f_test" in ev:
count_ftests += 1
if count_ftests > 0:
fTest = True
try:
data = np.genfromtxt(con_file, names=True, delimiter=',', dtype=None)
except:
print "Error: Could not successfully read in contrast file: ", con_file
raise Exception
lst = data.tolist()
ftst = []
fts_columns = []
contrasts = []
contrast_names = []
length = None
length = len(list(lst[0]))
# lst = list of tuples, "tp"
# tp = tuple in the format (contrast_name, 0, 0, 0, 0, ...)
# with the zeroes being the vector of contrasts for that contrast
for tp in lst:
contrast_names.append(tp[0])
# create a list of integers that is the vector for the contrast
# ex. [0, 1, 1, 0, ..]
con_vector = list(tp)[1:(length-count_ftests)]
fts_vector = list(tp)[(length-count_ftests):length]
fts_columns.append(fts_vector)
# add Intercept column
if group_sep == False:
if coding_scheme == "Treatment":
con_vector.insert(0, 0)
elif coding_scheme == "Sum":
con_vector.insert(0, 1)
contrasts.append(con_vector)
# contrast_names = list of the names of the contrasts (not regressors)
# contrasts = list of lists with the contrast vectors
num_EVs_in_con_file = len(contrasts[0])
contrasts = np.array(contrasts, dtype=np.float16)
fts_columns = np.array(fts_columns)
# if there are f-tests, create the array for them
if fTest:
if len(contrast_names) < 2:
errmsg = "\n\n[!] CPAC says: Not enough contrasts for running " \
"f-tests.\nTip: Do you have only one contrast in your " \
"contrasts file? f-tests require more than one contrast.\n" \
"Either remove the f-tests or include more contrasts.\n\n"
raise Exception(errmsg)
'''
# process each f-test
for ftest_string in ftest_list:
ftest_vector = []
cons_in_ftest = ftest_string.split(",")
for con in contrast_names:
if con in cons_in_ftest:
ftest_vector.append(1)
else:
ftest_vector.append(0)
ftst.append(ftest_vector)
fts_n = np.array(ftst)
'''
fts_n = fts_columns.T
if len(column_names) != (num_EVs_in_con_file):
err_string = "\n\n[!] CPAC says: The number of EVs in your model " \
"design matrix (found in the %s.mat file) does not " \
"match the number of EVs (columns) in your custom " \
"contrasts matrix CSV file.\n\nCustom contrasts matrix " \
"file: %s\n\nNumber of EVs in design matrix: %d\n" \
"Number of EVs in contrasts file: %d\n\nThe column " \
"labels in the design matrix should match those in " \
"your contrasts .CSV file.\nColumn labels in design " \
"matrix:\n%s" % (model_name, con_file, \
len(column_names), num_EVs_in_con_file, str(column_names))
raise Exception(err_string)
for design_mat_col, con_csv_col in zip(column_names, evs):
if design_mat_col != con_csv_col:
errmsg = "\n\n[!] CPAC says: The names of the EVs in your " \
"custom contrasts .csv file do not match the names or " \
"order of the EVs in the design matrix. Please make " \
"sure these are consistent.\nDesign matrix EV columns: " \
"%s\nYour contrasts matrix columns: %s\n\n" \
% (column_names, evs)
raise Exception(errmsg)
try:
f = open(os.path.join(outputModelFilesDirectory, "model_files", current_output, model_name + '.con'), 'w')
idx = 1
pp_str = '/PPheights'
re_str = '/RequiredEffect'
for name in contrast_names:
print >>f, '/ContrastName%d' %idx, '\t', name
pp_str += '\t%1.5e' %(1)
re_str += '\t%1.5e' %(1)
idx += 1
print >>f, '/NumWaves\t', (contrasts.shape)[1]
print >>f, '/NumContrasts\t', (contrasts.shape)[0]
print >>f, pp_str
print >>f, re_str + '\n'
# print labels for the columns - mainly for double-checking your model
col_string = '\n'
for ev in evs:
col_string = col_string + ev + '\t'
print >>f, col_string, '\n'
print >>f, '/Matrix'
np.savetxt(f, contrasts, fmt='%1.5e', delimiter='\t')
f.close()
except Exception as e:
filepath = os.path.join(outputModelFilesDirectory, "model_files", current_output, model_name + '.con')
errmsg = "\n\n[!] CPAC says: Could not create the .con file for " \
"FLAMEO or write it to disk.\nAttempted filepath: %s\n" \
"Error details: %s\n\n" % (filepath, e)
raise Exception(errmsg)
if fTest:
try:
print "\nFound f-tests in your model, writing f-tests file " \
"(.fts)..\n"
f = open(os.path.join(outputModelFilesDirectory, "model_files", current_output, model_name + '.fts'), 'w')
print >>f, '/NumWaves\t', (contrasts.shape)[0]
print >>f, '/NumContrasts\t', count_ftests
# print labels for the columns - mainly for double-checking your
# model
col_string = '\n'
for con in contrast_names:
col_string = col_string + con + '\t'
print >>f, col_string, '\n'
print >>f, '/Matrix'
for i in range(fts_n.shape[0]):
print >>f, ' '.join(fts_n[i].astype('str'))
#np.savetxt(f, fts_n, fmt='%1.5e', delimiter=' ')
f.close()
except Exception as e:
filepath = os.path.join(outputModelFilesDirectory, "model_files", current_output, model_name + '.fts')
errmsg = "\n\n[!] CPAC says: Could not create .fts file for " \
"FLAMEO or write it to disk.\nAttempted filepath: %s\n" \
"Error details: %s\n\n" % (filepath, e)
raise Exception(errmsg)
"""
Class to set dictionary keys as map attributes
"""
class Configuration(object):
def __init__(self, config_map):
for key in config_map:
if config_map[key] == 'None':
config_map[key] = None
setattr(self, key, config_map[key])
def pandas_alternate_organize_data(data, c):
import pandas as ps
import csv
df = ps.DataFrame(data)
categorical = []
directional = []
for i in range(0, len(c.columnsInModel)):
if c.deMean[i]:
col = c.columnsInModel[i]
df[col] = df[col].astype('float32') - df[col].astype('float32').mean()
if c.categoricalVsDirectional[i]:
categorical.append(c.columnsInModel[i])
else:
directional.append(c.columnsInModel[i])
#split on the grouping variable
for name, group in df.groupby(c.groupingVariable):
group[c.groupingVariable] = 1
group[c.groupingVariable]= group[c.groupingVariable]
df[c.groupingVariable + '__'+ name] = group[c.groupingVariable]
df[c.groupingVariable + '__'+ name] = df[c.groupingVariable + '__'+ name].fillna(0)
for col in directional:
group[col] = group[col]
df[col + '__'+ name] = group[col]
df[col + '__'+ name] = df[col + '__'+ name].fillna(0)
#split on (grouping variable and each of the (categoricals- grouping variable) )
for col in categorical:
if not (col == c.groupingVariable):
for name, group in df.groupby([c.groupingVariable, col]):
group[col] = 1
df[col+'__'+'_'.join([str(e) for e in name])] = group[col]
df[col+'__'+'_'.join([str(e) for e in name])] = df[col+'__'+'_'.join([str(e) for e in name])].fillna(0)
for col in c.columnsInModel:
del df[col]
df.to_csv('./tempfile.csv', sep=',', index=False)
print 'saved to abc.csv'
sys.exit()
def split_directionals(gp, directional, data, c):
for key in gp.keys():
indices = gp[key]
for col in directional:
new_col = col + '__' + key
for idx in range(0, len(data)):
if idx in indices:
data[idx][new_col] = data[idx][col]
else:
data[idx][new_col] = 0
def split_gp_var(gp, data, c):
for key in gp.keys():
indices = gp[key]
new_col = c.groupingVariable + '__' + key
for idx in range(0, len(data)):
if idx in indices:
data[idx][new_col] = 1
else:
data[idx][new_col] = 0
def group_by_gp_categorical(categorical, data, c):
gp_cat_dict = {}
for cat_col in categorical:
if not (cat_col == c.groupingVariable):
for idx in range(0, len(data)):
new_col = '__'.join([str(cat_col), str(data[idx][cat_col]), str(c.groupingVariable), str(data[idx][c.groupingVariable])])
if new_col in gp_cat_dict:
gp_cat_dict[new_col].append(idx)
else:
gp_cat_dict[new_col] = [idx]
for col in gp_cat_dict.keys():
indices = gp_cat_dict[col]
for idx in range(0, len(data)):
if idx in indices:
data[idx][col] = 1
else:
data[idx][col] = 0
def alternate_organize_data(data, c):
categorical = []
directional = []
mean_cols = []
groups_grouping_var = {}
for i in range(0, len(c.columnsInModel)):
if c.deMean[i]:
col = c.columnsInModel[i]
mean_cols.append(col)
if c.categoricalVsDirectional[i]:
categorical.append(c.columnsInModel[i])
else:
directional.append(c.columnsInModel[i])
sum_ = {}
idx = 0
#take sum of each of columns to be demeaned
for row in data:
#on the side create groups for groups_grouping_var
try:
groups_grouping_var[str(row[c.groupingVariable])].append(idx)
except:
groups_grouping_var[str(row[c.groupingVariable])] = [idx]
for col in mean_cols:
try:
sum_[col] += float(row[col])
except:
sum_[col] = float(row[col])
idx += 1
#take the mean
for row in data:
for col in mean_cols:
row[col] = float(row[col]) - float(sum_[col])/float(len(data))
#split the directonal columns according to the groupingVariable
split_directionals(groups_grouping_var, directional, data, c)
#split the groupingVariable col
split_gp_var(groups_grouping_var, data, c)
#split categorical cols according to groupingVariable
group_by_gp_categorical(categorical, data, c)
#delete all original categorical and directional columns
for idx in range(0, len(data)):
#del data[idx]['subject_id']
for col in directional:
del data[idx][col]
for col in categorical:
del data[idx][col]
print '\t'.join(key for key in sorted(data[0].keys()) )
for idx in range(0, len(data)):
print '\t'.join(str(data[idx][key]) for key in sorted(data[idx].keys()))
return data, data[0].keys(), groups_grouping_var
def run(config, fTest, param_file, derivative_means_dict, pipeline_path, current_output, model_out_dir, roi_means_dict=None, CPAC_run=False):
# create_fsl_model.run()
# this is called from cpac_group_analysis_pipeline.py
# it collects the information the user provided for the FSL gpa model
# which was saved in the group analysis FSL config .yaml file, and then
# puts it into Patsy to create a design matrix
# it then also generates the contrast file from the contrasts the user
# provided
# ultimately this produces the .mat, .con and .grp files needed
# for FLAMEO for group analysis
# see more info on Patsy:
# http://patsy.readthedocs.org/en/latest/overview.html
print "\nBuilding the FSL group analysis model for %s..\n" % current_output
# open the GROUP ANALYSIS FSL .YML CONFIG FILE, not the main pipeline
# config .yml file!
if CPAC_run:
c = config
else:
try:
c = Configuration(yaml.load(open(os.path.realpath(config), 'r')))
except:
raise Exception("Error in reading %s configuration file" % config)
import csv
import numpy as np
# return the data from the phenotype file processed properly for Patsy
# and load it into 'pheno_data_dict'
# format: dictionary, each key is the name of an EV, and its value is
# a LIST of values in order of the subjects
# - categorical EVs are already renamed from '0,1,..' to
# 'EV0,EV1,..' with EV being the EV name
# - EVs to be demeaned are already demeaned
# - numerical EVs (non-categorical) are in a list which
# have been converted into a NumPy array
pheno_data_dict = create_pheno_dict(c)
formula = c.design_formula
# get number of subjects that have the derivative for this current model
# (basically, the amount of time points, which must be greater than the
# number of EVs)
num_subjects = len(derivative_means_dict)
if param_file != None:
''' extract motion measures for insertion as EVs if selected '''
# insert MeanFD or other measures into pheno_data_dict
# first, pull the measure values from the all_params .csv file
# written to the individual-level analysis output directory
# then, ensure the values are in the same order as the subject ids
measures = ['MeanFD', 'MeanFD_Jenkinson', 'MeanDVARS']
try:
measure_dict = {}
f = csv.DictReader(open(param_file,'rU'))
for line in f:
measure_map = {}
for m in measures:
if line.get(m):
measure_map[m] = line[m]
measure_dict[line['Subject']] = measure_map
except:
print '\n\n[!] CPAC says: Could not extract required ' \
'information from the parameters file.\n'
print 'Path: ', param_file, '\n\n'
raise Exception
# function to demean measures the user included in the design formula
# and then insert them in the right location in the pheno_data_dict
def add_measure_to_pheno(measure_name):
measure_list = []
# create a blank list that is the proper length
for sub in pheno_data_dict[c.subject_id_label]:
measure_list.append(0)
for subID in measure_dict.keys():
# find matching subject IDs between the measure_dict and the
# pheno_data_dict so we can insert measure values into the
# pheno_data_dict
for subject in pheno_data_dict[c.subject_id_label]:
if subject == subID:
# return the index (just an integer) of where in the
# pheno_data_dict list structure a subject ID is
idx = np.where(pheno_data_dict[c.subject_id_label]==subID)[0][0]
# insert Mean FD value in the proper point
measure_list[idx] = float(measure_dict[subID][measure_name])
# time to demean the MeanFD values
measure_sum = 0.0
for measure in measure_list:
measure_sum = measure_sum + measure
measure_mean = measure_sum / len(measure_list)
idx = 0
for measure in measure_list:
measure_list[idx] = measure - measure_mean
idx += 1
# add this new list to the pheno_data_dict
pheno_data_dict[measure_name] = np.array(measure_list)
''' insert measures into pheno data '''
# add measures selected in the design formula into pheno_data_dict
# they are also demeaned prior
for measure in measures:
if measure in c.design_formula:
add_measure_to_pheno(measure)
def insert_means_into_model(output_means_dict):
means_list = []
# create a blank list that is the proper length (num of subjects)
for sub in pheno_data_dict[c.subject_id_label]:
means_list.append(0)
for subID in output_means_dict.keys():
# find matching subject IDs between the output_means_dict and
# the pheno_data_dict so we can insert mean values into the
# pheno_data_dict
for subject in pheno_data_dict[c.subject_id_label]:
if subject == subID:
# return the index (just an integer) of where in the
# pheno_data_dict list structure a subject ID is
idx = np.where(pheno_data_dict[c.subject_id_label]==subID)[0][0]
# insert Mean FD value in the proper point
means_list[idx] = float(output_means_dict[subID])
# time to demean the means!
measure_mean = sum(means_list) / len(means_list)
idx = 0
for mean in means_list:
means_list[idx] = mean - measure_mean
idx += 1
return means_list
if 'Measure_Mean' in c.design_formula:
''' extract the mean of derivative for each subject if selected '''
# if the user has selected it to be part of their model, insert the mean
# of the outputs included in group analysis (i.e. if running ReHo in
# group-level analysis, have the mean of each subject's ReHo output
# included as an EV in the phenotype - regress out the mean of measure
output_means_dict = derivative_means_dict
# by the end of this for loop above, output_means_dict should look
# something like this:
# {sub1: mean_val, sub2: mean_val, ..}
# as this code runs once per output, this dictionary contains
# the mean values of the one current output, right now
''' insert means into pheno data if selected '''
measure_means_list = insert_means_into_model(output_means_dict)
# add this new list to the pheno_data_dict
pheno_data_dict['Measure_Mean'] = np.array(measure_means_list)
if "Custom_ROI_Mean" in c.design_formula:
''' include the means of the specified ROIs as regressors '''
# check
if roi_means_dict == None:
err_string = "\n\n[!] CPAC says: The custom ROI means were not " \
"calculated properly during the group analysis " \
"model generation.\n\n"
raise Exception(err_string)
for val in roi_means_dict.values():
roi_num = len(val)
# this will be a dictionary matching ROI regressor header labels with
# the actual ROI dictionaries
roi_dict_dict = {}
# split the roi_means_dict from { subID: [mean1,mean2,mean3,..], ..}
# to three dictionaries of { subID: mean1, .. }, { subID: mean2, .. },
# and so on
for num in range(0,roi_num):
label = "Custom_ROI_Mean_%d" % int(num+1)
temp_roi_dict = {}
for key in roi_means_dict.keys():
temp_roi_dict[key] = roi_means_dict[key][num-1]
roi_dict_dict[label] = temp_roi_dict
add_formula_string = ""
for roi_column in roi_dict_dict.keys():
roi_means_list = insert_means_into_model(roi_dict_dict[roi_column])
# add this new list to the pheno_data_dict
pheno_data_dict[roi_column] = np.array(roi_means_list)
# create a string of all the new custom ROI regressor column names
# to be inserted into the design formula, so that Patsy will accept
# the phenotypic data dictionary that now has these columns
if add_formula_string == "":
add_formula_string = add_formula_string + roi_column
else:
add_formula_string = add_formula_string + " + " + roi_column
# a regressor column of ROI means for each custom-specified ROI has now
# been added to the model with appropriate column labels
formula = formula.replace("Custom_ROI_Mean",add_formula_string)
''' Modeling Group Variances Separately '''
if c.group_sep == True:
if c.grouping_var == None or c.grouping_var not in c.design_formula:
print '\n\n[!] CPAC says: Model group variances separately is ' \
'enabled, but the grouping variable set is either set to ' \
'None, or was not included in the model as one of the ' \
'EVs.\n'
print 'Design formula: ', c.design_formula
print 'Grouping variable: ', c.grouping_var, '\n\n'
raise Exception
coding_scheme = c.coding_scheme[0]
# do this a little early for the grouping variable so that it doesn't
# get in the way of doing this for the other EVs once they have the
# grouping variable in their names
if 'categorical' in c.ev_selections.keys():
for EV_name in c.ev_selections['categorical']:
if EV_name == c.grouping_var:
if coding_scheme == 'Treatment':
formula = formula.replace(EV_name, 'C(' + EV_name + ')')
elif coding_scheme == 'Sum':
formula = formula.replace(EV_name, 'C(' + EV_name + ', Sum)')
groupvar_levels = []
grouping_var_id_dict = {}
idx = 0
for cat_ev_value in pheno_data_dict[c.grouping_var]:
# here, each "cat_ev_value" will be one of the Patsy-format values
# of the categorical EV that the user has selected as the grouping
# variable, i.e. "sex1, sex1, sex0, sex1", etc..
# cat_ev_level is the level digit or label without the EV name
# ex. sex1 becomes 1
cat_ev_level = str(cat_ev_value).replace(str(c.grouping_var), "")
if cat_ev_level not in groupvar_levels:
groupvar_levels.append(cat_ev_level)
# groupvar_levels only keeps track of how many levels there are in
# the grouping variable
# populate this dict for create_grp_file():
try:
grouping_var_id_dict[cat_ev_level].append(idx)
except:
grouping_var_id_dict[cat_ev_level] = [idx]
idx += 1
split_EVs = {}
for key in pheno_data_dict.keys():
# here, "key" is the name of each EV from the phenotype file, as
# they are labeled in the phenotype file (not Patsy format)
if (key in formula) and (key != c.grouping_var):
# for the formula edit
new_key_string = ""
for level in groupvar_levels:
# for the new split EV label
groupvar_with_level = str(c.grouping_var) + str(level)
new_key = key + "__" + groupvar_with_level
# for the formula edit
if new_key_string == "":
new_key_string = new_key
else:
new_key_string = new_key_string + " + " + new_key
split_EVs[new_key] = []
# for the formula as well
if key in c.ev_selections["categorical"]:
c.ev_selections["categorical"].append(new_key)
for val, groupvar_val in zip(pheno_data_dict[key], pheno_data_dict[c.grouping_var]):
if groupvar_with_level == groupvar_val:
split_EVs[new_key].append(val)
else:
split_EVs[new_key].append(0)
del pheno_data_dict[key]
if key in c.ev_selections["categorical"]:
c.ev_selections["categorical"].remove(key)
# formula edit
formula = formula.replace(key, new_key_string)
# put split EVs into pheno data dict
pheno_data_dict.update(split_EVs)
# parse through ev_selections, find the categorical names within the
# design formula and insert C(<name>, Sum) into the design formula
# this is required for Patsy to process the categorical EVs
# properly when generating the design matrix (this goes into the
# .mat file)
if 'categorical' in c.ev_selections.keys():
for EV_name in c.ev_selections['categorical']:
if EV_name != c.grouping_var:
if coding_scheme == 'Treatment':
formula = formula.replace(EV_name, 'C(' + EV_name + ')')
elif coding_scheme == 'Sum':
formula = formula.replace(EV_name, 'C(' + EV_name + ', Sum)')
# if group_sep = Off
else:
grouping_var_id_dict = None
# parse through ev_selections, find the categorical names within the
# design formula and insert C(<name>, Sum) into the design formula
# this is required for Patsy to process the categorical EVs
# properly when generating the design matrix (this goes into the
# .mat file)
coding_scheme = c.coding_scheme[0]
if 'categorical' in c.ev_selections.keys():
for EV_name in c.ev_selections['categorical']:
if coding_scheme == 'Treatment':
formula = formula.replace(EV_name, 'C(' + EV_name + ')')
elif coding_scheme == 'Sum':
formula = formula.replace(EV_name, 'C(' + EV_name + ', Sum)')
# make sure the group analysis output directory exists
try:
if not os.path.exists(c.output_dir):
os.makedirs(c.output_dir)
except:
print '\n\n[!] CPAC says: Could not successfully create the group ' \
'analysis output directory:\n', c.output_dir, '\n\nMake sure ' \
'you have write access in this file structure.\n\n\n'
raise Exception
''' create the Patsy design matrix '''
import patsy
# drop pickles of the inputs meant for Patsy so you can manually test it
# later if needed
#import pickle
#pickle.dump(formula, open(c.output_dir + '/' + "formula.p", "wb" ) )
#pickle.dump(pheno_data_dict, open(c.output_dir + '/' + "data_dict.p", "wb" ) )
#print pheno_data_dict
try:
if c.group_sep == True:
dmatrix = patsy.dmatrix(formula + " - 1", pheno_data_dict, NA_action='raise')
else:
dmatrix = patsy.dmatrix(formula, pheno_data_dict, NA_action='raise')
except:
print '\n\n[!] CPAC says: Design matrix creation wasn\'t ' \
'successful - do the terms in your formula correctly ' \
'correspond to the EVs listed in your phenotype file?\n'
print 'Phenotype file provided: '
print c.pheno_file, '\n\n'
raise Exception
''' CONTRAST FILE PREP FUNCTIONS '''
# parse in user-input contrast strings that were selected, and generate
# the contrast file (.con)
def greater_than(dmat, a, b, coding, group_sep, grouping_var):
c1 = positive(dmat, a, coding, group_sep, grouping_var)
c2 = positive(dmat, b, coding, group_sep, grouping_var)
return c1-c2
def positive(dmat, a, coding, group_sep, grouping_var):
# this is also where the "Intercept" column gets introduced into
# the contrasts columns, for when the user uses the model builder's
# contrast builder
evs = dmat.design_info.column_name_indexes
con = np.zeros(dmat.shape[1])
if group_sep == True:
if "__" in a and grouping_var in a:
ev_desc = a.split("__")
for ev in evs:
count = 0
for desc in ev_desc:
if desc in ev:
count += 1
if count == len(ev_desc):
con[evs[ev]] = 1
break
else:
# it is a dropped term so make all other terms in that category
# at -1
term = a.split('[')[0]
for ev in evs:
if ev.startswith(term):
con[evs[ev]]= -1
elif len(a.split(grouping_var)) > 2:
# this is if the current parsed contrast is the actual
# grouping variable, as the Patsified name will have the
# variable's name string in it twice
for ev in evs:
if a.split(".")[1] in ev:
con[evs[ev]] = 1
break
else:
# it is a dropped term so make all other terms in that category
# at -1
term = a.split('[')[0]
for ev in evs:
if ev.startswith(term):
con[evs[ev]]= -1
# else not modeling group variances separately
else:
if a in evs:
con[evs[a]] = 1
else:
# it is a dropped term so make all other terms in that category
# at -1
term = a.split('[')[0]
for ev in evs:
if ev.startswith(term):
con[evs[ev]]= -1
if coding == "Treatment":
# make Intercept 0
con[0] = 0
elif coding == "Sum":
# make Intercept 1
con[1] = 1
return con
def negative(dmat, a, coding, group_sep, grouping_var):
con = 0-positive(dmat, a, coding, group_sep, grouping_var)
return con
def create_dummy_string(length):
ppstring = ""
for i in range(0, length):
ppstring += '\t' + '%1.5e' %(1.0)
ppstring += '\n'
return ppstring
def create_con_file(con_dict, col_names, file_name, current_output, out_dir):
with open(os.path.join(out_dir, "model_files", current_output, file_name)+".con",'w+') as f:
# write header
num = 1
for key in con_dict:
f.write("/ContrastName%s\t%s\n" %(num,key))
num += 1
f.write("/NumWaves\t%d\n" %len(con_dict[key]))
f.write("/NumContrasts\t%d\n" %len(con_dict))
f.write("/PPheights%s" %create_dummy_string(len(con_dict[key])))
f.write("/RequiredEffect%s" %create_dummy_string(len(con_dict[key])))
f.write("\n\n")
# print labels for the columns - mainly for double-checking your
# model
col_string = '\n'
for col in col_names:
col_string = col_string + col + '\t'
print >>f, col_string, '\n'
# write data
f.write("/Matrix\n")
for key in con_dict:
for v in con_dict[key]:
f.write("%1.5e\t" %v)
f.write("\n")
def create_fts_file(ftest_list, con_dict, model_name, current_output, out_dir):
try:
print "\nFound f-tests in your model, writing f-tests file " \
"(.fts)..\n"
f = open(os.path.join(out_dir, "model_files", current_output, model_name + '.fts'), 'w')
print >>f, '/NumWaves\t', len(con_dict)
print >>f, '/NumContrasts\t', len(ftest_list)
# process each f-test
ftst = []
for ftest_string in ftest_list:
ftest_vector = []
cons_in_ftest = ftest_string.split(",")
for con in con_dict.keys():
if con in cons_in_ftest:
ftest_vector.append(1)
else:
ftest_vector.append(0)
ftst.append(ftest_vector)
fts_n = np.array(ftst)
# print labels for the columns - mainly for double-checking your
# model
col_string = '\n'
for con in con_dict.keys():
col_string = col_string + con + '\t'
print >>f, col_string, '\n'
print >>f, '/Matrix'
for i in range(fts_n.shape[0]):
print >>f, ' '.join(fts_n[i].astype('str'))
f.close()
except Exception as e:
filepath = os.path.join(out_dir, "model_files", current_output, model_name + '.fts')
errmsg = "\n\n[!] CPAC says: Could not create .fts file for " \
"FLAMEO or write it to disk.\nAttempted filepath: %s\n" \
"Error details: %s\n\n" % (filepath, e)
raise Exception(errmsg)
''' Create contrasts_dict dictionary for the .con file generation later '''
contrasts_list = c.contrasts
contrasts_dict = {}
# take the contrast strings and process them appropriately
# extract the two separate contrasts (if there are two), and then
# identify which are categorical - adapting the string if so
def process_contrast(operator):
parsed_EVs_in_contrast = []
EVs_in_contrast = parsed_contrast.split(operator)
if '' in EVs_in_contrast:
EVs_in_contrast.remove('')
for EV in EVs_in_contrast:
skip = 0
# they need to be put back into Patsy formatted header titles
# because the dmatrix gets passed into the function that writes
# out the contrast matrix
if 'categorical' in c.ev_selections.keys():
for cat_EV in c.ev_selections['categorical']:
# second half of this if clause is in case group variances
# are being modeled separately, and we don't want the EV
# that is the grouping variable (which is now present in
# other EV names) to confound this operation
if c.group_sep == True:
gpvar = c.grouping_var
else:
gpvar = "..."
if (cat_EV in EV) and not (gpvar in EV and \
"__" in EV):
# handle interactions
if ":" in EV:
temp_split_EV = EV.split(":")
for interaction_EV in temp_split_EV:
if cat_EV in interaction_EV:
current_EV = interaction_EV
else:
current_EV = EV
if coding_scheme == 'Treatment':
cat_EV_contrast = EV.replace(EV, 'C(' + cat_EV + ')[T.' + current_EV + ']')
elif coding_scheme == 'Sum':
cat_EV_contrast = EV.replace(EV, 'C(' + cat_EV + ', Sum)[S.' + current_EV + ']')
parsed_EVs_in_contrast.append(cat_EV_contrast)
skip = 1
if skip == 0:
parsed_EVs_in_contrast.append(EV)
# handle interactions
if ":" in EV and len(parsed_EVs_in_contrast) == 2:
parsed_EVs_in_contrast = [parsed_EVs_in_contrast[0] + ":" + parsed_EVs_in_contrast[1]]
if ":" in EV and len(parsed_EVs_in_contrast) == 3:
parsed_EVs_in_contrast = [parsed_EVs_in_contrast[0], parsed_EVs_in_contrast[1] + ":" + parsed_EVs_in_contrast[2]]
return parsed_EVs_in_contrast
# parse the user-input contrast strings
for contrast in contrasts_list:
# each 'contrast' is a string the user input of the desired contrast
# remove all spaces
parsed_contrast = contrast.replace(' ', '')
EVs_in_contrast = []
parsed_EVs_in_contrast = []
if '>' in parsed_contrast:
parsed_EVs_in_contrast = process_contrast('>')
contrasts_dict[parsed_contrast] = greater_than(dmatrix, parsed_EVs_in_contrast[0], parsed_EVs_in_contrast[1], coding_scheme, c.group_sep, c.grouping_var)
elif '<' in parsed_contrast:
parsed_EVs_in_contrast = process_contrast('<')
contrasts_dict[parsed_contrast] = greater_than(dmatrix, parsed_EVs_in_contrast[1], parsed_EVs_in_contrast[0], coding_scheme, c.group_sep, c.grouping_var)
else:
contrast_string = parsed_contrast.replace('+',',+,')
contrast_string = contrast_string.replace('-',',-,')
contrast_items = contrast_string.split(',')
if '' in contrast_items:
contrast_items.remove('')
if '+' in contrast_items and len(contrast_items) == 2:
parsed_EVs_in_contrast = process_contrast('+')
contrasts_dict[parsed_contrast] = positive(dmatrix, parsed_EVs_in_contrast[0], coding_scheme, c.group_sep, c.grouping_var)
elif '-' in contrast_items and len(contrast_items) == 2:
parsed_EVs_in_contrast = process_contrast('-')
contrasts_dict[parsed_contrast] = negative(dmatrix, parsed_EVs_in_contrast[0], coding_scheme, c.group_sep, c.grouping_var)
if len(contrast_items) > 2:
idx = 0
for item in contrast_items:
# they need to be put back into Patsy formatted header titles
# because the dmatrix gets passed into the function that writes
# out the contrast matrix
if 'categorical' in c.ev_selections.keys():
for cat_EV in c.ev_selections['categorical']:
if cat_EV in item:
if coding_scheme == 'Treatment':
item = item.replace(item, 'C(' + cat_EV + ')[T.' + item + ']')
elif coding_scheme == 'Sum':
item = item.replace(item, 'C(' + cat_EV + ', Sum)[S.' + item + ']')
if idx == 0:
if item != '+' and item != '-':
contrast_vector = positive(dmatrix, item)
if parsed_contrast not in contrasts_dict.keys():
contrasts_dict[parsed_contrast] = contrast_vector
else:
contrasts_dict[parsed_contrast] += contrast_vector
elif idx != 0:
if item != '+' and item != '-':
if contrast_items[idx-1] == '+':
contrast_vector = positive(dmatrix, item)
if parsed_contrast not in contrasts_dict.keys():
contrasts_dict[parsed_contrast] = contrast_vector
else:
contrasts_dict[parsed_contrast] += contrast_vector
if contrast_items[idx-1] == '-':
contrast_vector = negative(dmatrix, item)
if parsed_contrast not in contrasts_dict.keys():
contrasts_dict[parsed_contrast] = contrast_vector
else:
contrasts_dict[parsed_contrast] += contrast_vector
idx += 1
# convert the Patsy-generated design matrix into a NumPy array
data = np.asarray((dmatrix))
''' check the model for multicollinearity '''
print "\nChecking for multicollinearity in the model for %s.." \
% current_output
if check_multicollinearity(np.array(data)) == 1:
print '[!] CPAC warns: Detected multicollinearity in the ' \
'computed group-level analysis model for %s. Please double-' \
'check your model design.\n\n' % current_output
'''
# prep data and column names if user decides to model group variances
# separately
if c.group_sep == True:
EV_options = []
grouping_options = []
new_options = []
idx = 0
# take in what the grouping variable is. get the names of the options.
for col_name in dmatrix.design_info.column_names:
# first, link what the user entered as the grouping variable to
# what Patsy has renamed it..
if c.grouping_var in col_name:
grouping_var_idx = idx
if col_name != 'Intercept':
skip = 0
if 'categorical' in c.ev_selections.keys():
for cat_EV in c.ev_selections['categorical']:
if cat_EV in col_name:
if coding_scheme == 'Treatment':
cat_EV_stripped = col_name.replace('C(' + cat_EV + ')[T.', '')
elif coding_scheme == 'Sum':
cat_EV_stripped = col_name.replace('C(' + cat_EV + ', Sum)[S.', '')
cat_EV_stripped = cat_EV_stripped.replace(']', '')
EV_options.append(cat_EV_stripped)
skip = 1
if skip == 0:
EV_options.append(col_name)
idx += 1
idx = 1
for ev in EV_options:
if c.grouping_var in ev:
grouping_variable_info = []
grouping_variable_info.append(ev)
grouping_variable_info.append(idx)
grouping_options.append(grouping_variable_info)
# grouping_var_idx is the column numbers in the design matrix
# which holds the grouping variable (and its possible levels)
idx += 1
# all the categorical values/levels of the grouping variable
grouping_var_levels = []
for gv_idx in grouping_options:
for subject in dmatrix:
if c.grouping_var in c.ev_selections["categorical"]:
level_num = str(int(subject[gv_idx[1]]))
else:
level_num = str(subject[gv_idx[1]])
level_label = '__' + c.grouping_var + level_num
if level_label not in grouping_var_levels:
grouping_var_levels.append(level_label)
# make the new header for the reorganized data
for ev in EV_options:
if c.grouping_var not in ev:
for level in grouping_var_levels:
new_options.append(ev + level)
elif c.grouping_var in ev:
new_options.append(ev)
grouped_data = []
# this is a dict that will be something like this:
# grouping variable = Sex, M or F
# { "M": [1,3,7], "F": [2,4,5,6] } with the digits being row numbers
# from the design matrix
grouping_var_id_dict = {}
idx = 0
for subject in dmatrix:
# populate this dict for create_grp_file():
try:
grouping_var_id_dict[str(subject[int(grouping_var_idx)])].append(idx)
except:
grouping_var_id_dict[str(subject[int(grouping_var_idx)])] = [idx]
new_row = []
# put in the Intercept first
new_row.append(subject[0])
for option in grouping_options:
grouping_var_id = option[1]
gp_var_value = subject[grouping_var_id]
gp_var_label = '_' + str(gp_var_value)
for orig_idx in range(1,len(subject)):
# if the current ev_value in the current subject line is the
# grouping variable
if orig_idx == grouping_var_id:
new_row.append(subject[orig_idx])
else:
for new_header in new_options:
if EV_options[orig_idx-1] in new_header:
if gp_var_label in new_header:
new_row.append(subject[orig_idx])
else:
new_row.append(0)
# kill the intercept (not needed in modeling group variances
# separately)
del new_row[0]
grouped_data.append(new_row)
idx += 1
data = np.array(grouped_data, dtype=np.float16)
column_names = new_options
else:
'''
data = np.array(data, dtype=np.float16)
column_names = dmatrix.design_info.column_names
# check to make sure there are more time points than EVs!
if len(column_names) >= num_subjects:
err = "\n\n[!] CPAC says: There are more EVs than there are " \
"subjects currently included in the model for %s. There must " \
"be more subjects than EVs in the design.\n\nNumber of " \
"subjects: %d\nNumber of EVs: %d\n\nNote: An 'Intercept' " \
"column gets added to the design as an EV, so there will be " \
"one more EV than you may have specified in your design. In " \
"addition, if you specified to model group variances " \
"separately, an Intercept column will not be included, but " \
"the amount of EVs can nearly double once they are split " \
"along the grouping variable.\n\n" \
"If the number of subjects is lower than the number of " \
"subjects in your group analysis subject list, this may be " \
"because not every subject in the subject list has an output " \
"for %s in the individual-level analysis output directory.\n\n"\
% (current_output, num_subjects, len(column_names), \
current_output)
raise Exception(err)
# remove the header formatting Patsy creates for categorical variables
# because we are going to use depatsified_EV_names to test user-made
# custom contrast files
depatsified_EV_names = []
for column in column_names:
# if using Sum encoding, a column name may look like this:
# C(adhd, Sum)[S.adhd0]
# this loop leaves it with only "adhd0" in this case, for the
# contrasts list for the next GUI page
column_string = column
string_for_removal = ''
for char in column_string:
string_for_removal = string_for_removal + char
if char == '.':
column_string = column_string.replace(string_for_removal, '')
string_for_removal = ''
column_string = column_string.replace(']', '')
depatsified_EV_names.append(column_string)
''' FLAMEO model input files generation '''
if not os.path.isdir(os.path.join(model_out_dir, "model_files", current_output)):
os.makedirs(os.path.join(model_out_dir, "model_files", current_output))
try:
create_mat_file(data, column_names, c.model_name, current_output, model_out_dir)
except Exception as e:
print '\n\n[!] CPAC says: Could not create .mat file during ' \
'group-level analysis model file generation.\n'
print 'Attempted output directory: ', model_out_dir, '\n'
print "Error details: %s\n\n" % e
raise Exception
try:
create_grp_file(data, c.model_name, grouping_var_id_dict, current_output, model_out_dir)
except Exception as e:
print '\n\n[!] CPAC says: Could not create .grp file during ' \
'group-level analysis model file generation.\n'
print 'Attempted output directory: ', model_out_dir, '\n'
print "Error details: %s\n\n" % e
raise Exception
if (c.custom_contrasts == None) or (c.custom_contrasts == '') or \
("None" in c.custom_contrasts):
print "Writing contrasts file (.con) based on contrasts provided " \
"using the group analysis model builder's contrasts editor.."
try:
create_con_file(contrasts_dict, column_names, c.model_name, current_output, model_out_dir)
except Exception as e:
print '\n\n[!] CPAC says: Could not create .con file during ' \
'group-level analysis model file generation.\n'
print 'Attempted output directory: ', model_out_dir, '\n'
print "Error details: %s\n\n" % e
raise Exception
try:
create_fts_file(c.f_tests, contrasts_dict, c.model_name, current_output, model_out_dir)
except Exception as e:
print '\n\n[!] CPAC says: Could not create .fts file during ' \
'group-level analysis model file generation.\n'
print 'Attempted output directory: ', model_out_dir, '\n'
print "Error details: %s\n\n" % e
raise Exception
else:
print "\nWriting contrasts file (.con) based on contrasts provided " \
"with a custom contrasts matrix CSV file..\n"
create_con_ftst_file(c.custom_contrasts, c.model_name, current_output, model_out_dir, depatsified_EV_names, coding_scheme, c.group_sep)
| bsd-3-clause |
tdsmith/labmisc | parse_eve.py | 1 | 1782 | #!/usr/bin/env python3
import sys
import numpy as np
import openpyxl as xl
import pandas as pd
ROW_DATA_STARTS = 41
ROW_ANALYTE_NAME = 39
def main(filename):
workbook = xl.load_workbook(filename)
ws = workbook.active
invisible_rows = [key for key in
sorted(ws.row_dimensions.keys()) if
not ws.row_dimensions[key].visible]
data_columns = np.where([cell.value for cell in ws.rows[39]])[0] + 1
data_rows = np.where([cell.value for cell in ws.columns[1]])[0] + 1
data_rows = [row for row in data_rows if row >= ROW_DATA_STARTS and row not in invisible_rows]
col_names = data_columns[0]
col_dilution = data_columns[1]
col_last = max(data_columns)
col = data_columns[2]
data = []
while col <= col_last:
analyte = ws.cell(row=ROW_ANALYTE_NAME, column=col).value
for row in data_rows:
datum = {"Analyte": analyte, "Error": None}
datum["Sample"] = ws.cell(row=row, column=col_names).value
datum["Dilution"] = ws.cell(row=row, column=col_dilution).value
intensity_cell = ws.cell(row=row, column=col)
datum["Intensity"] = intensity_cell.value if intensity_cell.data_type == "n" else None
conc_cell = ws.cell(row=row, column=col+1)
if conc_cell.data_type == "n":
datum["Concentration"] = conc_cell.value
if conc_cell.style.font.color.theme != 1:
datum["Error"] = "OOR"
else:
datum["Error"] = conc_cell.value
datum["Expected"] = ws.cell(row=row, column=col+2).value
data.append(datum)
col += 3
pd.DataFrame(data).to_csv(sys.stdout, index=False)
if __name__ == "__main__":
main(sys.argv[1])
| mit |
nanophotonics/nplab | nplab/modelling/mie.py | 1 | 14557 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from builtins import range
import numpy as np
import requests
import matplotlib.pyplot as plt
from scipy.special import riccati_jn,riccati_yn
from nplab.utils.refractive_index_db import RefractiveIndexInfoDatabase
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
#Initialized after first request
WAVELENGTHS = None
REFRACTIVE_INDEX = None
print("starting")
'''
Adapted from: https://github.com/scottprahl/miepython
'''
rfdb = RefractiveIndexInfoDatabase()
water = "main/H2O/Hale.yml"
gold = "main/Au/Yakubovsky-25nm.yml"
water_refractive_index = rfdb.refractive_index_generator(label=water)
gold_refractive_index = rfdb.refractive_index_generator(label=gold)
def Lentz_Dn(z, N):
""" Compute the logarithmic derivative of the Ricatti-Bessel function
This returns the Ricatti-Bessel function of order N with argument z
using the continued fraction technique of Lentz, Appl. Opt., 15,
668-671, (1976).
"""
zinv = 2.0/z
alpha = (N+0.5) * zinv
aj = -(N+1.5) * zinv
alpha_j1 = aj+1/alpha
alpha_j2 = aj
ratio = alpha_j1/alpha_j2
runratio = alpha*ratio
while abs(abs(ratio)-1.0) > 1e-12:
aj = zinv - aj
alpha_j1 = 1.0/alpha_j1 + aj
alpha_j2 = 1.0/alpha_j2 + aj
ratio = alpha_j1/alpha_j2
zinv *= -1
runratio = ratio*runratio
return -N/z+runratio
def D_downwards(z, N):
""" Compute the logarithmic derivative of all Ricatti-Bessel functions
This returns the Ricatti-Bessel function of orders 0 to N for an
argument z using the downwards recurrence relations.
"""
D = np.zeros(N, dtype=complex)
last_D = Lentz_Dn(z, N)
for n in range(N, 0, -1):
last_D = n/z - 1.0/(last_D+n/z)
D[n-1] = last_D
return D
def D_upwards(z, N):
""" Compute the logarithmic derivative of all Ricatti-Bessel functions
This returns the Ricatti-Bessel function of orders 0 to N for an
argument z using the upwards recurrence relations.
"""
D = np.zeros(N, dtype=complex)
exp = np.exp(-2j*z)
D[1] = -1/z + (1-exp)/((1-exp)/z-1j*(1+exp))
for n in range(2, N):
D[n] = 1/(n/z-D[n-1])-n/z
return D
def D_calc(m, x, N):
""" Compute the logarithmic derivative of the Ricatti-Bessel function at all
orders (from 0 to N) with argument z
"""
z = m * x
if abs(z.imag) > 13.78*m.real**2 - 10.8*m.real + 3.9:
return D_upwards(z, N)
else:
return D_downwards(z, N)
def calculate_a_b_coefficients(m,x,n_max):
n = 2*n_max
def psi(rho):
outp, _ =riccati_jn(n,rho)
return outp
def dpsi(rho):
_,outp = riccati_jn(n,rho)
return outp
#Definition:
#Hankel function of first kind:
# Hn = Jn + iYn
# Jn - bessel function of first kind
# Yn - bessel function of second kind
def eps(rho):
jn = riccati_jn(n,rho)[0]
yn = riccati_yn(n,rho)[0]
hn = jn + 1j*yn
return hn
def deps(rho):
d_jn = riccati_jn(n,rho)[1]
d_yn = riccati_yn(n,rho)[1]
d_hn = d_jn + 1j*d_yn
return d_hn
rho = m*x
def a():
num = m*psi(m*x)*dpsi(x) - psi(x)*dpsi(m*x)
denom = m*psi(m*x)*deps(x) - eps(x)*dpsi(m*x)
return (num/denom)[0:n]
def b():
num = psi(m*x)*dpsi(x) - m*psi(x)*dpsi(m*x)
denom = psi(m*x)*deps(x) - m*eps(x)*dpsi(m*x)
return (num/denom)[0:n]
return a(), b()
from scipy.special import jv, yv
def Mie_ab(m,x,n_max):
# http://pymiescatt.readthedocs.io/en/latest/forward.html#Mie_ab
mx = m*x
nmax = np.real(np.round(2+x+4*(x**(1/3))))
nmx = np.round(max(nmax,np.abs(mx))+16)
# print "NMAX:", nmax
n = np.arange(1,np.real(nmax)+1)
nu = n + 0.5
sx = np.sqrt(0.5*np.pi*x)
px = sx*jv(nu,x)
p1x = np.append(np.sin(x), px[0:int(nmax)-1])
chx = -sx*yv(nu,x)
ch1x = np.append(np.cos(x), chx[0:int(nmax)-1])
gsx = px-(0+1j)*chx
gs1x = p1x-(0+1j)*ch1x
# B&H Equation 4.89
Dn = np.zeros(int(nmx),dtype=complex)
for i in range(int(nmx)-1,1,-1):
Dn[i-1] = (i/mx)-(1/(Dn[i]+i/mx))
D = Dn[1:int(nmax)+1] # Dn(mx), drop terms beyond nMax
da = D/m+n/x
db = m*D+n/x
an = (da*px-p1x)/(da*gsx-gs1x)
bn = (db*px-p1x)/(db*gsx-gs1x)
return an, bn
def make_rescaled_parameters(n_med,n_particle,r,wavelength):
# :param r: radius of the sphere
# :param wavelength: wavelength of illumination
# :param n_sph: complex refractive index of the sphere
# :param n_med: real refractive index of the dielectric medium
x= n_med * (2*np.pi/wavelength) * r
m = n_particle/n_med
return x,m
def calculate_pi_tau(mu,n_max):
#calculates angle-dependent functions
#see Absorption and scattering by small particles, Bohren & Huffman, page 94
pi_n = np.zeros(n_max+1)
tau_n = np.zeros(n_max+1)
pi_n[1] = 1.0
tau_n[1] = mu*pi_n[1]
for n in range(2,n_max):
pi_n[n] = ((2.0*n-1)/(n-1))*mu*pi_n[n-1] - ((n)/(n-1))*pi_n[n-2]
tau_n[n] = n*mu*pi_n[n] - (n+1)*pi_n[n-1]
pi_n = np.asarray(pi_n[1:])
tau_n = np.asarray(tau_n[1:])
return pi_n, tau_n
def mie_S1_S2(m,x,mus,n_max):
a,b = Mie_ab(m, x,n_max)
S1s = []
S2s = []
for mu in mus:
S1 = 0.0
S2 = 0.0
pi,tau = calculate_pi_tau(mu,n_max)
for n in range(n_max):
N = n+1
# print len(a),len(pi), len(b), len(tau)
S1 = S1+(float(2.0*N+1)/(N**2+N))*(a[n]*pi[n] + b[n]*tau[n])
S2 = S2+(float(2.0*N+1)/(N**2+N))*(b[n]*pi[n] + a[n]*tau[n])
S1s.append(S1)
S2s.append(S2)
return [S1s,S2s]
def get_refractive_index(target_wavelength, url):
#pull in globals
global WAVELENGTHS
global REFRACTIVE_INDEX
if WAVELENGTHS == None or REFRACTIVE_INDEX == None:
import csv
response = requests.get(url)
reader = csv.reader(response._content)
# for row in reader:
# print row
print(response._content)
# content = ((response._content).replace('\r','\n')).replace('\n\n\n',"\n").replace('\t',",").replace("\n\n","\n")
print(content)
R = [v.split(",") for v in (content.split("\n"))]
R=R[1:]
wavelengths = []
ns = []
for row in R:
try:
w = float(row[0])
n = float(row[1])+1j*float(row[2])
wavelengths.append(w)
ns.append(n)
except:
pass
WAVELENGTHS = wavelengths
REFRACTIVE_INDEX = ns
print(target_wavelength)
print(WAVELENGTHS)
print(REFRACTIVE_INDEX)
return np.interp(target_wavelength,WAVELENGTHS,REFRACTIVE_INDEX)
def get_refractive_index_Au(target_wavelength):
url = "https://refractiveindex.info/data_csv.php?datafile=data/main/Au/Johnson.yml"
return get_refractive_index(target_wavelength,url=url)
def get_refractive_index_Ag(target_wavelength):
url = "https://refractiveindex.info/data_csv.php?datafile=data/main/Ag/Johnson.yml"
return get_refractive_index(target_wavelength,url=url)
def get_refractive_index_water(target_wavelength):
url = "https://refractiveindex.info/data_csv.php?datafile=data/main/H2O/Hale.yml"
return get_refractive_index(target_wavelength,url=url)
def calculate_scattering_cross_section(m,x,r,n_max):
k = x/r
a,b = Mie_ab(m, x,n_max)
a2 = np.abs(a)**2
b2 = np.abs(b)**2
# n = np.arange(1,n_max+1)
# 2n1 = 2.0*n+1.0
total = 0.0
for n in range(len(a2)):
N = n+1
total = total + (2*N+1)*(a2[n]+b2[n])
return ((2*np.pi)/k**2)*total
def calculate_extinction_cross_section(m,x,r,n_max):
k = x/r
a,b = Mie_ab(m, x, n_max)
a2,b2 = np.absolute(a)**2,np.absolute(b)**2
total = 0.0
for n in range(len(a)):
N = n+1
real_ab = a[n].real+b[n].real
# if real_ab < 0:
# print "a",a
# print "b",b
total = total + (2*N+1)*real_ab
return ((2*np.pi)/k**2)*total
def main3():
n_medium = 1.3325
theta = np.pi/2.0
rs = np.linspace(1e-9,500e-9,50)
rs = [20e-9,40e-9,]
wavelengths = np.linspace(400e-9,1000e-9,600)
for r in rs:
print("r",r)
Xs_sca = []
Xs_ext = []
for wavelength in wavelengths:
n_particle = get_refractive_index_Au(wavelength/1e-9)
print("N:",n_particle)
x,m = make_rescaled_parameters(n_med=n_medium,n_particle=n_particle,r=r,wavelength=wavelength)
print("X:{0},M:{1}".format(x,m))
n_max = 20
scatteringXc = calculate_scattering_cross_section(m,x,r,n_max)
extinctionXc = calculate_extinction_cross_section(m,x,r,n_max)
# [scatteringXc,extinctionXc,_,_] = small_mie(m,x)
Xs_sca.append(scatteringXc)
Xs_ext.append(extinctionXc)
fig, ax1 = plt.subplots(1,figsize=(8,8))
Xs_sca = np.asarray(Xs_sca)/(np.pi*r**2)
Xs_ext = np.asarray(Xs_ext)/(np.pi*r**2)
Xs_abs = Xs_ext - Xs_sca
# wavelengths = wavelengths/1e-9
ax1.plot(wavelengths/1e-9,Xs_sca, label="Scattering efficiency $Q_{sca} = \sigma_{sca}/\pi r^2$")
ax1.plot(wavelengths/1e-9,Xs_ext, label="Extinction efficiency $Q_{ext} = \sigma_{ext}/\pi r^2$")
ax1.plot(wavelengths/1e-9,Xs_abs, label="Absorption efficiency $Q_{abs} = \sigma_{abs}/\pi r^2$")
ext_max =np.max(Xs_ext)
lambda_max = wavelengths[np.where(np.abs(Xs_ext==ext_max))][0]
ax1.set_xlabel("Size[nm]")
ax1.set_ylabel("Amplitude")
ax1.legend()
plt.title("Radius [nm]: {0}, $\lambda$: {1}".format(r/1e-9, lambda_max/1e-9))
# plt.savefig("C:\Users\im354\Pictures\Mie\SEA\particle_{0}.png".format(r/1e-9))
plt.show()
def main2():
wavelength = 633.0e-9
n_particle = get_refractive_index_Au(wavelength/1e-9)
n_medium = 1.3325
theta = np.pi/2.0
rs = np.linspace(1e-9,5e-7,1000)
Is = []
Xs_sca = []
Xs_ext = []
Qext = []
Qsca = []
Qback = []
G = []
for r in rs:
x,m = make_rescaled_parameters(n_med=n_medium,n_particle=n_particle,r=r,wavelength=wavelength)
n_max = 100 #int(x + 4.05 * x**0.33333 + 2.0)+1
scatteringXc = calculate_scattering_cross_section(m,x,r,n_max)
extinctionXc = calculate_extinction_cross_section(m,x,r,n_max)
i = I(r,np.asarray([theta]),n_medium,n_particle,wavelength)
Xs_sca.append(scatteringXc)
Xs_ext.append(extinctionXc)
Is.append(i)
[qext, qsca, qback, g] = mie_scalar(m, x,n_max)
Qext.append(qext)
Qsca.append(qsca)
Qback.append(qback)
G.append(g)
fig, [ax1,ax2] = plt.subplots(2)
ax1.plot(rs/1e-9,Xs_sca/(np.pi*rs**2), label="Scattering efficiency $Q_{sca} = \sigma_{sca}/\pi r^2$")
ax1.plot(rs/1e-9,Xs_ext/(np.pi*rs**2), label="Extinction efficiency $Q_{ext} = \sigma_{ext}/\pi r^2$")
ax1.set_xlabel("Size[nm]")
ax1.set_ylabel("Amplitude")
ax2.plot(rs/1e-9,Qsca,label="Scattering")
ax2.plot(rs/1e-9,Qext,label="Extinction")
# ax2.plot(rs/1e-9,Is, label="Scattering intensity")
ax2.set_xlabel("Size[nm]")
ax2.set_ylabel("Amplitude")
ax1.legend()
ax2.legend()
plt.show()
def main():
wavelength = 633.0e-9
n_particle = get_refractive_index_Au(wavelength/1e-9)
n_medium = 1.3325
for r in np.linspace(1e-9,4e-7,50):
fig = plt.figure(figsize=(16,8))
ax1 = fig.add_subplot(121,projection="polar")
ax2 = fig.add_subplot(122)
x,m = make_rescaled_parameters(n_med=n_medium,n_particle=n_particle,r=r,wavelength=wavelength)
n_max = int(x + 4.05 * x**0.33333 + 2.0)+1
print("x:",x)
print("m:",m)
print("n_max", n_max)
theta = np.linspace(-np.pi,np.pi, 360)
mu = np.cos(theta)
[S1,S2] = mie_S1_S2(m,x,mu,n_max)
S11 = np.abs(S1)**2 + np.abs(S2)**2
S12 = np.abs(S1)**2 - np.abs(S2)**2
i_para = S11+S12
i_perp = S11-S12
i_total = i_para + i_perp
ax1.plot(theta,i_para,label="parallel")
ax1.plot(theta,i_perp,label="perp")
ax1.plot(theta,i_total,label="total")
ax2.plot(theta,i_para,label="parallel")
ax2.plot(theta,i_perp,label="perp")
ax2.plot(theta,i_total, label="total")
ax1.set_xlabel("Angle $\\theta$ [rad]")
ax1.set_ylabel("Amplitude")
ax2.set_xlabel("Angle $\\theta$ [rad]")
ax2.set_ylabel("Amplitude")
ax1.legend()
ax2.legend()
plt.title("Particle radius [nm]:{0},x:{1},\nm:{2}, n_max:{3}".format(r/1e-9,x,m,n_max))
# plt.show()
plt.savefig("C:\\Users\im354\Pictures\Mie\particle_{}.png".format(r/1e-9))
def scattering_cross_section(radius,wavelength):
n_particle = gold_refractive_index(required_wavelength=wavelength)
n_med = water_refractive_index(required_wavelength=wavelength)
x,m = make_rescaled_parameters(n_med=n_med,n_particle=n_particle,r=radius,wavelength=wavelength)
output = calculate_scattering_cross_section(m=np.asarray([m]),x=np.asarray([x]),r=np.asarray([radius]),n_max=40)
return output
def main4():
wavelength_range = np.asarray([1e-9*wl for wl in np.linspace(450,1000,550)])
radius_range = np.asarray([r*1e-9 for r in np.linspace(50,250,200)])
x,y= np.meshgrid(radius_range,wavelength_range,indexing="xy")
f = np.vectorize(scattering_cross_section)
z = f(x,y)
fig = plt.figure()
ax = fig.gca(projection='3d')
zmin = np.min(z)
surf = ax.plot_surface(x/1e-9, y/1e-9, z/zmin,cmap=cm.coolwarm)
plt.xlabel("Radius [nm]")
plt.ylabel("Wavelength [nm]")
plt.title("Normalized scattering cross section\n Normalization: z/z_min, z_min = {}".format(zmin))
ratio_low = scattering_cross_section(radius = 120e-9,wavelength=450e-9)/scattering_cross_section(radius = 100e-9,wavelength=450e-9)
ratio_high = scattering_cross_section(radius = 120e-9,wavelength=580e-9)/scattering_cross_section(radius = 100e-9,wavelength=580e-9)
print("RATIO LOW: ", ratio_low)
print("RATIO High: ", ratio_high)
plt.show()
if __name__ == "__main__":
main4() | gpl-3.0 |
mortcanty/SARDocker | src/enlml.py | 1 | 8467 | #!/usr/bin/env python
#******************************************************************************
# Name: enlml.py
# Purpose:
# Estimation of ENL for polSAR covariance images
# using ML method with full covariance matrix (quad, dual or single)
# Anfinsen et al. (2009) IEEE TGARS 47(11), 3795-3809
# Takes input from covariance matrix format images generated
# from polsaringest.py
# Usage:
# python enl.py [OPTIONS] filename
#
# MIT License
#
# Copyright (c) 2016 Mort Canty
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import auxil.auxil as auxil
import auxil.lookup as lookup
import os, sys, getopt, time
import numpy as np
import matplotlib.pyplot as plt
from osgeo import gdal
from osgeo.gdalconst import GA_ReadOnly, GDT_Float32
def get_windex(j,cols):
# first window for row j
windex = np.zeros(49,dtype=int)
six = np.array([0,1,2,3,4,5,6])
windex[0:7] = (j-3)*cols + six
windex[7:14] = (j-2)*cols + six
windex[14:21] = (j-1)*cols + six
windex[21:28] = (j)*cols + six
windex[28:35] = (j+1)*cols + six
windex[35:42] = (j+2)*cols + six
windex[42:49] = (j+3)*cols + six
return windex
def main():
usage = '''
Usage:
------------------------------------------------
Calculate the equivalent number of looks for
a polarimetric matrix image
python %s [OPTIONS] filename
Options:
-h this help
-n suppress graphics output
-d spatial subset list e.g. -d [0,0,400,400]
An ENL image will be written to the same directory with '_enl' appended.
------------------------------------------------''' %sys.argv[0]
options,args = getopt.getopt(sys.argv[1:],'hnd:')
dims = None
graphics = True
for option, value in options:
if option == '-h':
print usage
return
elif option == '-d':
dims = eval(value)
elif option == '-n':
graphics = False
if len(args) != 1:
print 'Incorrect number of arguments'
print usage
sys.exit(1)
infile = args[0]
path = os.path.abspath(infile)
dirn = os.path.dirname(path)
basename = os.path.basename(infile)
root, ext = os.path.splitext(basename)
outfile = dirn + '/' + root + '_enl' + ext
gdal.AllRegister()
inDataset = gdal.Open(infile,GA_ReadOnly)
cols = inDataset.RasterXSize
rows = inDataset.RasterYSize
bands = inDataset.RasterCount
if dims == None:
dims = [0,0,cols,rows]
x0,y0,cols,rows = dims
print '========================='
print ' ENL Estimation'
print '========================='
print time.asctime()
print 'infile: %s'%infile
if bands == 9:
print 'Quad polarimetry'
# T11 (k)
band = inDataset.GetRasterBand(1)
k = np.nan_to_num(band.ReadAsArray(x0,y0,cols,rows)).ravel()
# T12 (a)
band = inDataset.GetRasterBand(2)
a = np.nan_to_num(band.ReadAsArray(x0,y0,cols,rows)).ravel()
band = inDataset.GetRasterBand(3)
im = np.nan_to_num(band.ReadAsArray(x0,y0,cols,rows)).ravel()
a = a + 1j*im
# T13 (rho)
band = inDataset.GetRasterBand(4)
rho = np.nan_to_num(band.ReadAsArray(x0,y0,cols,rows)).ravel()
band = inDataset.GetRasterBand(5)
im = np.nan_to_num(band.ReadAsArray(x0,y0,cols,rows)).ravel()
rho = rho + 1j*im
# T22 (xsi)
band = inDataset.GetRasterBand(6)
xsi = np.nan_to_num(band.ReadAsArray(x0,y0,cols,rows)).ravel()
# T23 (b)
band = inDataset.GetRasterBand(7)
b = np.nan_to_num(band.ReadAsArray(x0,y0,cols,rows)).ravel()
band = inDataset.GetRasterBand(8)
im = np.nan_to_num(band.ReadAsArray(x0,y0,cols,rows)).ravel()
b = b + 1j*im
# T33 (zeta)
band = inDataset.GetRasterBand(9)
zeta = np.nan_to_num(band.ReadAsArray(x0,y0,cols,rows)).ravel()
det = k*xsi*zeta + 2*np.real(a*b*np.conj(rho)) - xsi*(abs(rho)**2) - k*(abs(b)**2) - zeta*(abs(a)**2)
d = 2
elif bands == 4:
print 'Dual polarimetry'
# C11 (k)
band = inDataset.GetRasterBand(1)
k = np.nan_to_num(band.ReadAsArray(x0,y0,cols,rows)).ravel()
# C12 (a)
band = inDataset.GetRasterBand(2)
a = np.nan_to_num(band.ReadAsArray(x0,y0,cols,rows)).ravel()
band = inDataset.GetRasterBand(3)
im = np.nan_to_num(band.ReadAsArray(x0,y0,cols,rows)).ravel()
a = a + 1j*im
# C22 (xsi)
band = inDataset.GetRasterBand(4)
xsi = np.nan_to_num(band.ReadAsArray(x0,y0,cols,rows)).ravel()
det = k*xsi - abs(a)**2
d = 1
elif bands == 1:
print 'Single polarimetry'
# C11 (k)
band = inDataset.GetRasterBand(1)
k = band.ReadAsArray(x0,y0,cols,rows).ravel()
det = k
d = 0
enl_ml = np.zeros((rows,cols), dtype= np.float32)
lu = lookup.table()
print 'filtering...'
print 'row: ',
sys.stdout.flush()
start = time.time()
for i in range(3,rows-3):
if i%100 == 0:
print '%i '%i,
sys.stdout.flush()
windex = get_windex(i,cols)
for j in range(3,cols-3):
detC = det[windex]
if np.min(detC) > 0.0:
avlogdetC = np.sum(np.log(detC))/49
if bands == 9:
k1 = np.sum(k[windex])/49
a1 = np.sum(a[windex])/49
rho1 = np.sum(rho[windex])/49
xsi1 = np.sum(xsi[windex])/49
b1 = np.sum(b[windex])/49
zeta1 = np.sum(zeta[windex])/49
detavC = k1*xsi1*zeta1 + 2*np.real(a1*b1*np.conj(rho1)) - xsi1*(np.abs(rho1)**2) - k1*(np.abs(b1)**2) - zeta1*(np.abs(a1)**2)
elif bands == 4:
k1 = np.sum(k[windex])/49
xsi1 = np.sum(xsi[windex])/49
a1 = np.sum(a[windex])/49
detavC = k1*xsi1 - np.abs(a1)**2
else:
detavC = np.sum(k[windex])/49
logdetavC = np.log(detavC)
arr = avlogdetC - logdetavC + lu[:,d]
ell = np.where(arr*np.roll(arr,1)<0)[0]
if ell != []:
enl_ml[i,j] = float(ell[-1])/10.0
windex += 1
driver = inDataset.GetDriver()
outDataset = driver.Create(outfile,cols,rows,1,GDT_Float32)
projection = inDataset.GetProjection()
geotransform = inDataset.GetGeoTransform()
if geotransform is not None:
gt = list(geotransform)
gt[0] = gt[0] + x0*gt[1]
gt[3] = gt[3] + y0*gt[5]
outDataset.SetGeoTransform(tuple(gt))
if projection is not None:
outDataset.SetProjection(projection)
outBand = outDataset.GetRasterBand(1)
outBand.WriteArray(enl_ml,0,0)
outBand.FlushCache()
outDataset = None
ya,xa = np.histogram(enl_ml,bins=500)
ya[0] = 0
if graphics:
plt.plot(xa[0:-1],ya)
plt.title('Histogram ENL for %s'%infile)
plt.show()
print 'ENL image written to: %s'%outfile
print 'elapsed time: '+str(time.time()-start)
if __name__ == '__main__':
main()
| mit |
themrmax/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 5 | 12516 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
However, note that this transformer will only do a binary one-hot encoding
when feature values are of type string. If categorical features are
represented as numeric values such as int, the DictVectorizer can be
followed by OneHotEncoder to complete binary one-hot encoding.
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator : string, optional
Separator string used when constructing new features for one-hot
coding.
sparse : boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort : boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = np.frombuffer(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
mwest1066/PrairieLearn | exampleCourse/questions/demoRandomPlot/server.py | 5 | 1490 | import matplotlib.pyplot as plt
import io
import random
import numpy
def file(data):
if data['filename']=='figure.png':
# Create the figure
x = numpy.linspace(-5,5)
f = data['params']['m']*x+data['params']['b']
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.plot(x,f)
plt.xticks([x for x in range(-5,6,1)], fontsize=14)
fmin = int(numpy.floor(min(f))-1)
fmax = int(numpy.ceil(max(f))+1)
if fmax-fmin>12:
plt.yticks([y for y in range(fmin,fmax+4,4)], fontsize=14)
plt.axes().set_yticks([y for y in range(fmin,fmax+1,1)], minor=True)
plt.axes().yaxis.grid(True, 'minor')
else:
plt.yticks([y for y in range(fmin,fmax+1,1)], fontsize=14)
plt.grid()
plt.xlabel('$x$', fontsize=18)
plt.ylabel('$f(x)$', fontsize=18)
plt.autoscale(enable=True, tight=True)
fig.set_tight_layout(True)
# Save the figure and return it as a buffer
buf = io.BytesIO()
plt.savefig(buf,format='png')
return buf
def generate(data):
# Pick a non-zero slope
while(True):
m = random.randint(-2,2)
if m is not 0:
break
# Pick a y-intercept
b = random.randint(-3,3)
# Pick x
x = random.randint(-5,5)
# Find f(x)
f = m*x+b
data['params']['m'] = m
data['params']['b'] = b
data['params']['x'] = x
data['correct_answers']['f'] = f
| agpl-3.0 |
mtasende/Machine-Learning-Nanodegree-Capstone | recommender/agent_predictor.py | 1 | 9382 | import numpy as np
import random
import pandas as pd
import predictor.feature_extraction as fe
import utils.preprocessing as pp
BASE_DAYS = 112
class AgentPredictor(object):
""" Reinforcement learner. Will use Q learning, dyna Q, and some custom additions.
(Initially based on the template for the Machine Learning for Trading course, by Tucker Balch)"""
def __init__(self,
num_states,
num_actions,
alpha=0.2,
gamma=0.9,
random_actions_rate=0.9,
random_actions_decrease=0.999,
dyna_iterations=0,
verbose=False,
name='NN',
estimator_close=None,
estimator_volume=None,
env=None,
prediction_window=BASE_DAYS):
self.verbose = verbose
# Dimensions of the problem
self.num_states = num_states
self.num_actions = num_actions
# Parameters
self.alpha = alpha
self.gamma = gamma
self.random_actions_rate = random_actions_rate
self.random_actions_decrease = random_actions_decrease
self.dyna_iterations = dyna_iterations
# Initialization
self.s = 0
self.a = 0
self.Q = 1.0 - 2*np.random.rand(num_states, num_actions)
# QExplore keeps track of how many times the (s,a) pair was visited (with Q update)
self.QExplore = np.ones((num_states, num_actions))
# T and R for the hallucination models
# Probabilities of transition
self.T = 0.00001*np.ones((num_states, num_actions, num_states))
# Expected immediate reward
self.R = 1.0 - 2*np.random.rand(num_states, num_actions)
self.name = name
# For the dyna predictor
self.estimator_close = estimator_close
self.estimator_volume = estimator_volume
self.history_df = None
self.env = env
self.prediction_window = prediction_window
def random_action(self, s, actions=None):
"""
This function chooses a random action, but not uniformly.
It addresses the problem that a totally random exploration is very slow.
So it keeps track of the explored (state,action) pairs and looks for new things to do.
:param s: the current state
:param actions: A list of possible actions
:return: action
"""
if actions is None:
actions = range(self.num_actions)
probas = 1/self.QExplore[s, actions]
# Normalize
probas /= np.sum(probas)
action = np.random.choice(actions, p=probas)
# action = random.randint(0, self.num_actions-1)
return action
def choose_action(self, s):
"""
Chooses an action. With "random_actions_rate" probability it returns a random action.
If it doesn't, then it returns the best option from the Q table.
It doesnt' update the Q table nor the random_actions_rate variable.
:param s: is the current state
:return: action
"""
do_explore = (random.random() < self.random_actions_rate)
if do_explore:
action = self.random_action(s)
else:
actions = range(self.num_actions)
max_q = np.max(self.Q[s])
# Now, get all the actions that have Q == maxQ
optimal_actions = []
for action_temp in actions:
if self.Q[s, action_temp] == max_q:
optimal_actions.append(action_temp)
# Choose one of the optimal choices, at random
# (I could use the QExplore to choose also...)
action = random.choice(optimal_actions)
return action
def hallucinate(self, s):
# Initialize the hallucinating states and actions (the real ones shouldn't change)
# Should hallucinations be more random?? To test later...
# h_radom_actions_rate = self.random_actions_rate
h_s = s
# if self.history_df is not None:
h_history_df = self.history_df.copy() # Initially, it is filled with the real values
h_history_df = h_history_df.append(self.predict_steps(h_history_df, self.dyna_iterations))
stacked_h_history_df = pd.DataFrame(h_history_df.stack(), columns=[self.env.symbol])
internal_env = self.env.clone_with_new_data(stacked_h_history_df)
for i in range(self.dyna_iterations):
# Get new action
h_a = self.choose_action(h_s)
# Simulate transitions and rewards
'''print(i)
print(internal_env.portfolio.current_date)
print(h_history_df.shape)
print(internal_env.data_df.shape)
print(internal_env.data_df)
print('-'*120)'''
h_r, h_s_prime = internal_env.get_consequences_from_fraction_index(h_a)
# Update Q
# Get the best Q for h_s'
max_q_prime = np.max(self.Q[h_s_prime])
# Now use the formula to update Q
self.Q[h_s, h_a] = (1-self.alpha)*self.Q[h_s, h_a] + \
self.alpha*(h_r + self.gamma * max_q_prime)
# Update the state
h_s = h_s_prime
def play_learned_response(self, new_state):
"""
This function does the same as "play", but without updating the Q table. Given a new state, it chooses an action
according to the best learned policy, so far.
It does update the state.
:param new_state: The resulting state for the previous action, or the state that was externally set.
:returns: The chosen action
"""
# Choose an action
action = self.choose_action(new_state)
# Update the state and action
self.s = new_state
self.a = action
if self.verbose:
print("s =", new_state, "a =", action)
return action
def play(self, reward, new_state):
"""
Given a new state, and a reward for the previous action,
chooses an action, updating the Q table in the process.
:param new_state: The resulting state for the previous action.
:param reward: The reward for the previous action.
:returns: The chosen action.
"""
# Update Q ------------------------------------------
# Get the best Q for s'
maxQprime = np.max(self.Q[new_state])
# Now use the formula to update Q
self.Q[self.s, self.a] = (1-self.alpha)*self.Q[self.s, self.a] + \
self.alpha*(reward + self.gamma * maxQprime)
# Hallucinate some experience...
# Update T
self.T[self.s, self.a, new_state] += 1
# Update R
self.R[self.s, self.a] = (1-self.alpha)*self.R[self.s, self.a] + self.alpha * reward
# Update the historical data
self.set_history()
# Hallucinate!
self.hallucinate(new_state)
# End of Update Q -----------------------------------
# Choose an action and then update random_action_rate (!)
action = self.choose_action(new_state)
self.random_actions_rate *= self.random_actions_decrease
# Update the state and action
self.s = new_state
self.a = action
# Update QExplore
self.QExplore[new_state, action] += 1.0
# Print some debugging messages
if self.verbose:
print("s = {} a = {} reward = {}".format(new_state, action, reward))
return action
@staticmethod
def generate_samples(data_df):
start_date = data_df.index[0]
close_sample = pd.DataFrame(data_df['Close'].values, columns=[start_date]).T
close_sample = close_sample / close_sample.iloc[0, 0]
volume_sample = pd.DataFrame(data_df['Volume'].values, columns=[start_date]).T
volume_sample = volume_sample / volume_sample.mean(axis=1)[0]
return close_sample, volume_sample
def predict_one_step(self, h_history_df):
close_sample, volume_sample = self.generate_samples(h_history_df)
estimated_close = self.estimator_close.predict(close_sample).iloc[0, 0] * h_history_df['Close'].iloc[0]
estimated_volume = self.estimator_volume.predict(volume_sample).iloc[0, 0] * \
h_history_df['Volume'].mean()
predicted_date = fe.add_market_days(h_history_df.index[-1], 1)
h_history_df = h_history_df.drop(h_history_df.index[0])
h_history_df.loc[predicted_date, :] = {'Close': estimated_close, 'Volume': estimated_volume}
return h_history_df
def predict_steps(self, h_history_df, n_steps):
predicted_df = pd.DataFrame()
for i in range(n_steps):
h_history_df = self.predict_one_step(h_history_df.copy())
predicted_df = predicted_df.append(h_history_df.iloc[-1])
return predicted_df
def set_history(self):
data_in_df = self.env.data_df[self.env.symbol].unstack()
current_index = data_in_df.index.get_loc(self.env.portfolio.current_date)
window = self.prediction_window
self.history_df = pp.fill_missing(data_in_df[['Close', 'Volume']].
iloc[current_index-window+1:current_index+1])
def __str__(self):
return self.name
__repr__ = __str__
| mit |
EmreAtes/spack | var/spack/repos/builtin/packages/pandaseq/package.py | 3 | 2109 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Pandaseq(AutotoolsPackage):
"""PANDASEQ is a program to align Illumina reads, optionally with PCR
primers embedded in the sequence, and reconstruct an overlapping
sequence."""
homepage = "https://github.com/neufeld/pandaseq"
url = "https://github.com/neufeld/pandaseq/archive/v2.11.tar.gz"
version('2.11', 'a8ae0e938bac592fc07dfa668147d80b')
version('2.10', '5b5b04c9b693a999f10a9c9bd643f068')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type=('build', 'link'))
depends_on('m4', type='build')
depends_on('zlib', type='build')
depends_on('pkg-config', type='build')
depends_on('bzip2', type='link')
def autoreconf(self, spec, prefix):
bash = which('bash')
bash('./autogen.sh')
| lgpl-2.1 |
rjw57/openni-skeleton-export | examples/normalshade.py | 1 | 3466 | #!/usr/bin/env python
#
# An example script for shading user depth silhouettes with some simple "fake"
# shading.
"""
Usage:
normalshade.py (-h | --help)
normalshade.py [--verbose] <logfile> <frame-prefix>
Options:
-h, --help Show a brief usage summary.
-v, --verbose Increase verbosity of output.
"""
import logging
import docopt
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import scipy.ndimage as ndi
import tables
LOG = logging.getLogger()
def main():
"""Main entry point."""
opts = docopt.docopt(__doc__)
logging.basicConfig(
level=logging.INFO if opts['--verbose'] else logging.WARN
)
LOG.info('Opening log file {0}'.format(opts['<logfile>']))
log_root = tables.open_file(opts['<logfile>']).root
for frame in log_root.frames:
frame_idx = frame._v_attrs.idx
if frame_idx % 30 == 0:
LOG.info('Processing frame {0}...'.format(frame_idx))
# Copy depth and label image to numpy array
depth, label = frame.depth[:], frame.label[:]
# Copy points to numpy array
points = frame.points[:]
# Create NxMx3 "point map"
point_map = np.zeros(depth.shape + (3,))
point_map[depth != 0, :] = points
# Overall normal image
normals = np.zeros(depth.shape + (3,))
# Extract each user from the depth image
for user in frame.users:
user_mask = label == user._v_attrs.idx
user_points = np.where(np.dstack((user_mask,)*3), point_map, 0)
user_normals = compute_normals(user_points, user_mask)
normals = np.where(np.dstack((user_mask,) * 3), user_normals, normals)
# Compute a normal image
light_im = 0.5 + 0.5 * normals
Image.fromarray(np.clip(255*light_im, 0, 255).astype(np.uint8)).save(
'{0}-{1:05d}.png'.format(opts['<frame-prefix>'], frame_idx))
def compute_normals(points, mask):
"""
Given a NxMx3 array of points, compute a NxMx3 array of computed normals.
mask should be a NxM array of booleans which are true iff the corresponding
pixels in points are valid.
"""
# Mask invalid points
points = np.copy(points)
for i in range(3):
points[np.logical_not(mask), i] = points[..., i].min()
# Use a dilation filter to "grow" each point
grow_points = np.copy(points)
for i in range(4):
grow_points = np.where(np.dstack((mask,)*3),
grow_points,
ndi.filters.maximum_filter(grow_points, size=(3,3,1))
)
# Blur point image
for i in range(3):
grow_points[...,i] = ndi.filters.gaussian_filter(grow_points[...,i], 1)
# Compute row-wise and col-wise gradients
dxdu, dxdv = np.gradient(grow_points[...,0])
dydu, dydv = np.gradient(grow_points[...,1])
dzdu, dzdv = np.gradient(grow_points[...,2])
# Compute 3d tangent vectors
row_tangents = np.dstack((dxdu, dydu, dzdu))
col_tangents = -np.dstack((dxdv, dydv, dzdv)) # NB: -ve since v points along -ve y
# Take cross product to compute normal
normals = np.cross(row_tangents, col_tangents)
# Normalise normals
norm_lens = np.sqrt(np.sum(normals ** 2, axis=-1))
norm_lens[norm_lens == 0] = 1 # Don't touch zero-length normals
for i in range(3):
normals[...,i] /= norm_lens
normals[...,i] *= mask
return normals
if __name__ == '__main__':
main()
| apache-2.0 |
pablocarderam/genetargeter | gRNAScores/Rule_Set_2_scoring_v1/analysis/models/baselines.py | 1 | 3429 | from __future__ import print_function
from builtins import range
import numpy as np
import sklearn
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
import sklearn.linear_model
def mean_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options):
return np.ones((test.sum(), 1))*y[train].mean(), None
def random_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options):
return np.random.randn(test.sum(), 1), None
def doench_on_fold(feature_sets, train, test, y, y_all, X, dim, dimsum, learn_options):
auto_class_weight = None # 'auto'/None
verbose = False
penalty = [0.005*pow(1.15, x) for x in range(0, 45)] # ian's code: tvals = [0.005*pow(1.15,x) for x in range(0,45)]
y_bin = y_all[learn_options['binary target name']].values[:, None]
label_encoder = sklearn.preprocessing.LabelEncoder()
label_encoder.fit(y_all['Target gene'].values[train])
gene_classes = label_encoder.transform(y_all['Target gene'].values[train])
cv = sklearn.cross_validation.StratifiedKFold(gene_classes, n_folds=10, shuffle=True)
best_penalty = None
cv_results = np.zeros((10, len(penalty)))
for j, split in enumerate(cv):
train_inner, test_inner = split
for i, c in enumerate(penalty):
# fit an L1-penalized SVM classifier
clf = LinearSVC(penalty='l1', C=c, dual=False, class_weight=auto_class_weight)
clf.fit(X[train][train_inner], y_bin[train][train_inner].flatten())
# pass features with non-zero coeff to Logistic with l2 penalty (original code?)
non_zero_coeff = (clf.coef_ != 0.0)
if np.all(non_zero_coeff is False):
# if all are zero, turn one on so as to be able to run the code.
non_zero_coeff[0] = True
clf = LogisticRegression(penalty='l2', class_weight=auto_class_weight)
clf.fit(X[train][train_inner][:, non_zero_coeff.flatten()], y[train][train_inner].flatten())
y_test = clf.predict_proba(X[train][test_inner][:, non_zero_coeff.flatten()])[:, 1]
fpr, tpr, _ = sklearn.metrics.roc_curve(y_bin[train][test_inner], y_test)
assert np.nan not in fpr, "found nan fpr"
assert np.nan not in tpr, "found nan tpr"
roc_auc = sklearn.metrics.auc(fpr, tpr)
if verbose:
print(j, i, roc_auc)
cv_results[j][i] = roc_auc
best_penalty = penalty[np.argmax(np.mean(cv_results, axis=0))]
print("best AUC for penalty: ", np.median(cv_results, axis=0))
clf = LinearSVC(penalty='l1', C=best_penalty, dual=False, class_weight=auto_class_weight)
clf.fit(X[train], y_bin[train].flatten())
non_zero_coeff = (clf.coef_ != 0.0)
clf = LogisticRegression(penalty='l2', class_weight=auto_class_weight)
clf.fit(X[train][:, non_zero_coeff.flatten()], y[train].flatten())
y_pred = clf.predict_proba(X[test][:, non_zero_coeff.flatten()])[:, 1:2]
return y_pred, clf
def sgrna_from_doench_on_fold(feature_sets, train, test, y, y_all, X, dim, dimsum, learn_options):
assert len(list(feature_sets.keys())) == 1, "should only use sgRNA Score here"
assert list(feature_sets.keys())[0] == "sgRNA Score"
y_pred = X[test][:, 0]
return y_pred, None
| mit |
sealhuang/brainCodingToolbox | braincode/vim2/rfencoding.py | 3 | 27156 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import numpy as np
import tables
from scipy import ndimage
from scipy.misc import imsave
import scipy.optimize as opt
from sklearn.cross_decomposition import PLSCanonical
from sklearn.linear_model import LassoCV
from braincode.util import configParser
from braincode.math import parallel_corr2_coef, corr2_coef, ridge
from braincode.math import get_pls_components, rcca
from braincode.math import LinearRegression
from braincode.math.norm import zero_one_norm, zscore
from braincode.pipeline import retinotopy
from braincode.pipeline.base import random_cross_modal_corr
from braincode.vim2 import util as vutil
def check_path(dir_path):
"""Check whether the directory does exist, if not, create it."""
if not os.path.exists(dir_path):
os.mkdir(dir_path, 0755)
def retinotopic_mapping(corr_file, data_dir, vxl_idx=None, figout=False):
"""Make the retinotopic mapping using activation map from CNN."""
if figout:
fig_dir = os.path.join(data_dir, 'fig')
check_path(fig_dir)
# load the cross-correlation matrix from file
corr_mtx = np.load(corr_file, mmap_mode='r')
# set voxel index
if not isinstance(vxl_idx, np.ndarray):
vxl_idx = np.arange(corr_mtx.shape[0])
elif len(vxl_idx) != corr_mtx.shape[0]:
print 'mismatch on voxel number!'
return
else:
print 'voxel index loaded.'
img_size = 55.0
pos_mtx = np.zeros((73728, 2))
pos_mtx[:] = np.nan
for i in range(len(vxl_idx)):
print 'Iter %s of %s' %(i+1, len(vxl_idx)),
tmp = corr_mtx[i, :]
tmp = np.nan_to_num(np.array(tmp))
# significant threshold for one-tail test
tmp[tmp <= 0.019257] = 0
if np.sum(tmp):
mmtx = tmp.reshape(55, 55)
#tmp = tmp.reshape(96, 27, 27)
#mmtx = np.max(tmp, axis=0)
print mmtx.min(), mmtx.max()
if figout:
fig_file = os.path.join(fig_dir, 'v'+str(vxl_idx[i])+'.png')
imsave(fig_file, mmtx)
# get indices of n maximum values
max_n = 20
row_idx, col_idx = np.unravel_index(
np.argsort(mmtx.ravel())[-1*max_n:],
mmtx.shape)
nmtx = np.zeros(mmtx.shape)
nmtx[row_idx, col_idx] = mmtx[row_idx, col_idx]
# center of mass
x, y = ndimage.measurements.center_of_mass(nmtx)
pos_mtx[vxl_idx[i], :] = [x, y]
else:
print ' '
#receptive_field_file = os.path.join(data_dir, 'receptive_field_pos.npy')
#np.save(receptive_field_file, pos_mtx)
#pos_mtx = np.load(receptive_field_file)
# generate retinotopic mapping
base_name = 'train_max' + str(max_n)
prf2visual_angle(pos_mtx, img_size, data_dir, base_name)
def prf2visual_angle(prf_mtx, img_size, out_dir, base_name):
"""Generate retinotopic mapping based on voxels' pRF parameters.
`prf_mtx` is a #voxel x pRF-features matrix, pRF features can be 2 columns
(row, col) of image or 3 columns which adding a third pRF size parameters.
"""
feature_size = prf_mtx.shape[1]
pos_mtx = prf_mtx[:, :2]
# eccentricity
ecc = retinotopy.coord2ecc(pos_mtx, img_size, 20)
vol = ecc.reshape(18, 64, 64)
vutil.save2nifti(vol, os.path.join(out_dir, base_name+'_ecc.nii.gz'))
# angle
angle = retinotopy.coord2angle(pos_mtx, img_size)
vol = angle.reshape(18, 64, 64)
vutil.save2nifti(vol, os.path.join(out_dir, base_name+'_angle.nii.gz'))
# pRF size
if feature_size > 2:
size_angle = retinotopy.get_prf_size(prf_mtx, 55, 20)
vol = size_angle.reshape(18, 64, 64)
vutil.save2nifti(vol, os.path.join(out_dir, base_name+'_size.nii.gz'))
def visual_prf(corr_mtx, vxl_idx, prf_dir):
"""pRF visualization."""
check_path(prf_dir)
prf = np.zeros_like(corr_mtx)
for i in range(len(vxl_idx)):
orig_mtx = corr_mtx[i, :].reshape(55, 55)
orig_file = os.path.join(prf_dir, 'v'+str(vxl_idx[i])+'_orig.png')
imsave(orig_file, orig_mtx)
prf_mtx = orig_mtx.copy()
prf_mtx[prf_mtx<prf_mtx.max()*0.8] = 0
prf_file = os.path.join(prf_dir, 'v'+str(vxl_idx[i])+'_prf.png')
imsave(prf_file, prf_mtx)
prf[i, :] = prf_mtx.flatten()
np.save(os.path.join(prf_dir, 'prf.npy'), prf)
def get_roi_idx(fmri_table, vxl_idx):
"""Get ROI label for each voxel."""
rois = ['v1lh', 'v1rh', 'v2lh', 'v2rh', 'v3lh', 'v3rh', 'v3alh', 'v3arh',
'v3blh', 'v3brh', 'v4lh', 'v4rh', 'MTlh', 'MTrh']
roi_dict = {}
for roi in rois:
roi_mask = fmri_table.get_node('/roi/%s'%(roi))[:].flatten()
roi_idx = np.nonzero(roi_mask==1)[0]
roi_idx = np.intersect1d(roi_idx, vxl_idx)
if roi_idx.sum():
roi_ptr = np.array([np.where(vxl_idx==roi_idx[i])[0][0]
for i in range(len(roi_idx))])
roi_dict[roi] = roi_ptr
return roi_dict
def roi_info(corr_mtx, wt_mtx, fmri_table, mask_idx, out_dir):
"""Get ROI info."""
roi_list = ['v1lh', 'v1rh', 'v2lh', 'v2rh', 'v3lh', 'v3rh',
'v3alh', 'v3arh', 'v3blh', 'v3brh', 'v4lh', 'v4rh',
'MTlh', 'MTrh', 'MTplh', 'MTprh']
fingerprints = np.zeros((wt_mtx.shape[2], len(roi_list)))
for ridx in range(len(roi_list)):
roi_mask = fmri_table.get_node('/roi/%s'%(roi_list[ridx]))[:].flatten()
roi_idx = np.nonzero(roi_mask==1)[0]
roi_idx = np.intersect1d(roi_idx, mask_idx)
roi_ptr = np.array([np.where(mask_idx==roi_idx[i])[0][0]
for i in range(len(roi_idx))])
#-- plot pRF for each voxel
roi_dir = os.path.join(out_dir, roi_list[ridx])
os.system('mkdir %s'%(roi_dir))
for idx in roi_ptr:
tmp = corr_mtx[:, idx]
if np.sum(tmp):
tmp = tmp.reshape(13, 13)
vutil.save_imshow(tmp, os.path.join(roi_dir,
'%s.png'%(mask_idx[idx])))
else:
print 'Drop %s'%(idx)
#-- get feature response figure print
ele_num = 0
fp = np.zeros((fingerprints.shape[0]))
for idx in roi_ptr:
tmp = corr_mtx[:, idx]
# conv1+optical : 0.17419
# norm1 : 0.15906
# norm2 : 0.14636
# conv3 : 0.14502
f = tmp>=0.14502
if f.sum():
ele_num += f.sum()
fp += np.sum(wt_mtx[f, idx, :], axis=0)
fp /= ele_num
fingerprints[:, ridx] = fp
#-- plot fingerprint for each roi
#for i in range(len(roi_list)):
# plt.bar(np.arange(96), fingerprints[:96, i], 0.35)
# plt.savefig('%s.png'%(roi_list[i]))
# plt.close()
np.save(os.path.join(out_dir, 'roi_fingerprints.npy'), fingerprints)
if __name__ == '__main__':
"""Main function."""
# config parser
cf = configParser.Config('config')
root_dir = cf.get('base', 'path')
feat_dir = os.path.join(root_dir, 'sfeatures')
db_dir = os.path.join(root_dir, 'subjects')
# phrase 'test': analyses were only conducted within lV1 for code test
# phrase 'work': for real analyses
phrase = 'test'
# subj config
subj_id = 1
subj_dir = os.path.join(db_dir, 'vS%s'%(subj_id))
#-- load fmri data
fmri_file = os.path.join(subj_dir, 'VoxelResponses.mat')
tf = tables.open_file(fmri_file)
#tf.list_nodes
#-- roi mat to nii
#roi_file = os.path.join(subj_dir, 'S%s_small_roi.nii.gz'%(subj_id))
#vutil.roi2nifti(tf, roi_file, mode='small')
#-- get mean fmri responses
#dataset = 'rt'
#mean_file = os.path.join(subj_dir, 'S%s_mean_%s.nii.gz'%(subj_id, dataset))
#vutil.gen_mean_vol(tf, dataset, mean_file)
#-- create mask
train_fmri_ts = tf.get_node('/rt')[:]
# data.shape = (73728, 7200)
# get non-nan voxel indexs
fmri_s = train_fmri_ts.sum(axis=1)
non_nan_idx = np.nonzero(np.logical_not(np.isnan(fmri_s)))[0]
if phrase=='test':
lv1_mask = tf.get_node('/roi/v1lh')[:].flatten()
vxl_idx = np.nonzero(lv1_mask==1)[0]
# for vS1, lV1 contains 490 non-NaN voxels
vxl_idx = np.intersect1d(vxl_idx, non_nan_idx)
else:
full_mask_file = os.path.join(subj_dir, 'S%s_mask.nii.gz'%(subj_id))
full_mask = vutil.data_swap(full_mask_file).flatten()
full_vxl_idx = np.nonzero(full_mask==1)[0]
vxl_idx = np.intersect1d(full_vxl_idx, non_nan_idx)
#np.save(os.path.join(subj_dir, 'full_vxl_idx.npy'), vxl_idx)
roi_dict = get_roi_idx(tf, vxl_idx)
#np.save(os.path.join(subj_dir, 'roi_idx_pointer.npy'), roi_dict)
#roi_dict = np.load(os.path.join(subj_dir, 'roi_idx_pointer.npy')).item()
#-- load fmri response
# data shape: (#voxel, 7200/540)
train_fmri_ts = tf.get_node('/rt')[:]
train_fmri_ts = np.nan_to_num(train_fmri_ts[vxl_idx])
val_fmri_ts = tf.get_node('/rv')[:]
val_fmri_ts = np.nan_to_num(val_fmri_ts[vxl_idx])
#-- save masked data as npy file
#train_file = os.path.join(subj_dir, 'S%s_train_fmri_lV1.npy'%(subj_id))
#val_file = os.path.join(subj_dir, 'S%s_val_fmri_lV1.npy'%(subj_id))
#np.save(train_file, train_fmri_ts)
#np.save(val_file, val_fmri_ts)
#-- load cnn activation data
# data.shape = (feature_size, x, y, 7200/540)
#train_feat_file = os.path.join(feat_dir, 'conv1_train_trs.npy')
#train_feat_ts = np.load(train_feat_file, mmap_mode='r')
#val_feat_file = os.path.join(feat_dir, 'conv1_val_trs.npy')
#val_feat_ts = np.load(val_feat_file, mmap_mode='r')
#-- 2d gaussian kernel based pRF estimate
prf_dir = os.path.join(subj_dir, 'prf')
check_path(prf_dir)
# parameter config
fwhms = np.arange(1, 11)
# lasso linear regression
vxl_idx = vxl_idx[:10]
file_idx = -1
for i in range(30250):
print '--------------------------'
print 'Kernel %s'%(i+1)
# load CNN features modulated by Gaussian kernels
if i/550 > file_idx:
train_feat_file = os.path.join(feat_dir, 'gaussian_kernels',
'gaussian_conv1_train_trs_%s.npy'%(i/550))
train_feat_ts = np.load(train_feat_file)
val_feat_file = os.path.join(feat_dir, 'gaussian_kernels',
'gaussian_conv1_val_trs_%s.npy'%(i/550))
val_feat_ts = np.load(val_feat_file)
file_idx = i/550
train_x = train_feat_ts[..., i%550]
val_x = val_feat_ts[..., i%550]
# shape of x : (96, 7200/540)
train_x = zscore(train_x).T
val_x = zscore(val_x).T
# output vars
paras = np.zeros((96, 30250, len(vxl_idx)))
val_corr = np.zeros((30250, len(vxl_idx)))
alphas = np.zeros((30250, len(vxl_idx)))
for j in range(len(vxl_idx)):
print 'Voxel %s'%(j+1)
train_y = train_fmri_ts[j]
val_y = val_fmri_ts[j]
lasso_cv = LassoCV(cv=10, n_jobs=4)
lasso_cv.fit(train_x, train_y)
alphas[i, j] = lasso_cv.alpha_
paras[:, i, j] = lasso_cv.coef_
pred_y = lasso_cv.predict(val_x)
val_corr[i, j] = np.corrcoef(val_y, pred_y)[0][1]
print 'Alpha %s, prediction score %s'%(alphas[i, j], val_corr[i, j])
np.save(os.path.join(prf_dir, 'lassoreg_paras.npy'), paras)
np.save(os.path.join(prf_dir, 'lassoreg_pred_corr.npy'), val_corr)
np.save(os.path.join(prf_dir, 'lassoreg_alphas.npy'), alphas)
#-- pRF to retinotopy
#prf_mtx = np.load(os.path.join(prf_dir, 'vxl_prf.npy'))
## generate full voxel feature matrix
#full_prf_mtx = np.zeros((73728, 3))
#full_prf_mtx[:] = np.nan
#for i in range(len(vxl_idx)):
# full_prf_mtx[vxl_idx[i], :] = prf_mtx[i, :]
#prf2visual_angle(full_prf_mtx, 55, prf_dir, 'retinotopy')
#-- feature temporal z-score
#print 'CNN features temporal z-score ...'
## summary features across channels
#train_feat_ts = train_feat_ts.mean(axis=0)
#train_feat_m = train_feat_ts.mean(axis=2, keepdims=True)
#train_feat_s = train_feat_ts.std(axis=2, keepdims=True)
#train_feat_ts = (train_feat_ts-train_feat_m)/(1e-10+train_feat_s)
#val_feat_ts = val_feat_ts.mean(axis=0)
#val_feat_m = val_feat_ts.mean(axis=2, keepdims=True)
#val_feat_s = val_feat_ts.std(axis=2, keepdims=True)
#val_feat_ts = (val_feat_ts-val_feat_m)/(1e-10+val_feat_s)
#print 'Salience features temporal z-score ...'
#train_sal_m = train_sal_ts.mean(axis=2, keepdims=True)
#train_sal_s = train_sal_ts.std(axis=2, keepdims=True)
#train_sal_ts = (train_sal_ts-train_sal_m)/(1e-10+train_sal_s)
#val_sal_m = val_sal_ts.mean(axis=2, keepdims=True)
#val_sal_s = val_sal_ts.std(axis=2, keepdims=True)
#val_sal_ts = (val_sal_ts-val_sal_m)/(1e-10+val_sal_s)
#print 'Salience modulated features temporal z-score ...'
#train_salfeat_ts = train_salfeat_ts.mean(axis=0)
#train_salfeat_m = train_salfeat_ts.mean(axis=2, keepdims=True)
#train_salfeat_s = train_salfeat_ts.std(axis=2, keepdims=True)
#train_salfeat_ts=(train_salfeat_ts-train_salfeat_m)/(1e-10+train_salfeat_s)
#val_salfeat_ts = val_salfeat_ts.mean(axis=0)
#val_salfeat_m = val_salfeat_ts.mean(axis=2, keepdims=True)
#val_salfeat_s = val_salfeat_ts.std(axis=2, keepdims=True)
#val_salfeat_ts = (val_salfeat_ts-val_salfeat_m)/(1e-10+val_salfeat_s)
#-- voxel-wise linear regression
#cross_corr_dir = os.path.join(subj_dir, 'spatial_cross_corr', 'lv1')
#reg_dir = os.path.join(cross_corr_dir, 'linreg_l1')
#check_path(reg_dir)
#corr_mtx = np.load(os.path.join(cross_corr_dir, 'train_conv1_corr.npy'))
#corr_mtx = corr_mtx.reshape(470, 55, 55)
## voxel-wise linear regression
#wts = np.zeros((470, 55, 55, 3))
#train_corr = np.zeros((470, 55, 55))
#val_corr = np.zeros((470, 55, 55))
#wts_mask = np.zeros((470, 3))
#statsp_mask = np.zeros((470, 3))
#train_corr_mask = np.zeros(470,)
#val_corr_mask = np.zeros(470, )
#for i in range(len(vxl_idx)):
# print 'Voxel %s of %s ...'%(i+1, len(vxl_idx))
# prf = corr_mtx[i, ...].copy()
# prf = prf > prf.max()*0.8
# print '%s voxels selected'%(prf.sum())
# if not prf.sum():
# continue
# pos = np.nonzero(prf)
# wts_tmp = np.zeros((pos[0].shape[0], 3))
# statsp_tmp = np.zeros((pos[0].shape[0], 3))
# train_corr_tmp = np.zeros(pos[0].shape[0],)
# val_corr_tmp = np.zeros(pos[0].shape[0],)
# for j in range(pos[0].shape[0]):
# train_Y = train_fmri_ts[i, :]
# val_Y = val_fmri_ts[i, :]
# train_X = np.zeros((7200, 3))
# train_X[:, 0] = train_feat_ts[pos[0][j], pos[1][j], :]
# train_X[:, 1] = train_sal_ts[pos[0][j], pos[1][j], :]
# train_X[:, 2] = train_salfeat_ts[pos[0][j], pos[1][j], :]
# val_X = np.zeros((540, 3))
# val_X[:, 0] = val_feat_ts[pos[0][j], pos[1][j], :]
# val_X[:, 1] = val_sal_ts[pos[0][j], pos[1][j], :]
# val_X[:, 2] = val_salfeat_ts[pos[0][j], pos[1][j], :]
# model = LinearRegression(fit_intercept=False)
# model.fit(train_X, train_Y)
# wts[i, pos[0][j], pos[1][j], :] = model.coef_
# ptrain_Y = model.predict(train_X)
# tcorr = np.corrcoef(ptrain_Y, train_Y)[0][1]
# train_corr[i, pos[0][j], pos[1][j]] = tcorr
# pval_Y = model.predict(val_X)
# vcorr = np.corrcoef(pval_Y, val_Y)[0][1]
# val_corr[i, pos[0][j], pos[1][j]] = vcorr
# wts_tmp[j, :] = model.coef_
# statsp_tmp[j, :] = model.p
# train_corr_tmp[j] = tcorr
# val_corr_tmp[j] = vcorr
# wts_mask[i, :] = wts_tmp.mean(axis=0)
# statsp_mask[i, :] = statsp_tmp.mean(axis=0)
# train_corr_mask[i] = train_corr_tmp.mean()
# val_corr_mask[i] = val_corr_tmp.mean()
#np.save(os.path.join(reg_dir, 'wts.npy'), wts)
#np.save(os.path.join(reg_dir, 'train_corr.npy'), train_corr)
#np.save(os.path.join(reg_dir, 'val_corr.npy'), val_corr)
#np.save(os.path.join(reg_dir, 'wts_mask.npy'), wts_mask)
#np.save(os.path.join(reg_dir, 'stats_p_mask.npy'), statsp_mask)
#np.save(os.path.join(reg_dir, 'train_corr_mask.npy'), train_corr_mask)
#np.save(os.path.join(reg_dir, 'val_corr_mask.npy'), val_corr_mask)
#-- Cross-modality mapping: voxel~CNN feature position correlation
#cross_corr_dir = os.path.join(subj_dir, 'spatial_cross_corr')
#check_path(cross_corr_dir)
#-- features from CNN
#corr_file = os.path.join(cross_corr_dir, 'train_conv1_corr.npy')
#feat_ts = train_feat_ts.sum(axis=0).reshape(3025, 7200)
#parallel_corr2_coef(train_fmri_ts, feat_ts, corr_file, block_size=55)
#-- visual-pRF: select pixels which corr-coef greater than 1/2 maximum
#corr_mtx = np.load(corr_file)
#prf_dir = os.path.join(cross_corr_dir, 'prf')
#visual_prf(corr_mtx, vxl_idx, prf_dir)
#-- categorize voxels based on pRF types
#corr_file = os.path.join(cross_corr_dir, 'train_conv1_corr.npy')
#corr_mtx = np.load(corr_file)
## get pRF by remove non-significant pixels
## two-tailed p < 0.01: r > 0.0302 and r < -0.0302
#ncorr_mtx = corr_mtx.copy()
#ncorr_mtx[(corr_mtx<=0.0302)&(corr_mtx>=-0.0302)] = 0
#prf_max = ncorr_mtx.max(axis=1)
#prf_min = ncorr_mtx.min(axis=1)
#prf_type = np.zeros(corr_mtx.shape[0])
#prf_type[(prf_max>0)&(prf_min>0)] = 1
#prf_type[(prf_max>0)&(prf_min==0)] = 2
#prf_type[(prf_max>0)&(prf_min<0)] = 3
#prf_type[(prf_max==0)&(prf_min<0)] = 4
#prf_type[(prf_max<0)&(prf_min<0)] = 5
#np.save(os.path.join(cross_corr_dir, 'prf_type.npy'), prf_type)
#nii_file = os.path.join(cross_corr_dir, 'prf_type.nii.gz')
#vutil.vxl_data2nifti(prf_type, vxl_idx, nii_file)
#-- pRF stats and visualization for each ROI
#prf_dir = os.path.join(cross_corr_dir, 'prf_figs')
#check_path(prf_dir)
#for roi in roi_dict:
# print '------%s------'%(roi)
# roi_idx = roi_dict[roi]
# # pRF type stats in each ROI
# roi_prf_type = prf_type[roi_idx]
# print 'Voxel number: %s'%(roi_prf_type.shape[0])
# for i in range(5):
# vxl_num = np.sum(roi_prf_type==(i+1))
# vxl_ratio = vxl_num * 100.0 / roi_prf_type.shape[0]
# print '%s, %0.2f'%(vxl_num, vxl_ratio)
# # save pRF as figs
# roi_dir = os.path.join(prf_dir, roi)
# check_path(roi_dir)
# roi_corr_mtx = corr_mtx[roi_idx, :]
# roi_min = roi_corr_mtx.min()
# roi_max = roi_corr_mtx.max()
# for i in roi_idx:
# vxl_prf = corr_mtx[i, :].reshape(55, 55)
# filename = 'v'+str(vxl_idx[i])+'_'+str(int(prf_type[i]))+'.png'
# out_file = os.path.join(roi_dir, filename)
# vutil.save_imshow(vxl_prf, out_file, val_range=(roi_min, roi_max))
#-- get pRF parameters based on 2D Gaussian curve using model fitting
#corr_mtx = np.load(os.path.join(cross_corr_dir, 'train_conv1_corr.npy'))
## last column is curve fitting error based on squared-differnece
#paras = np.zeros((corr_mtx.shape[0], 6))
#for i in range(corr_mtx.shape[0]):
# print i,
# y = corr_mtx[i, :]
# if y.max() >= abs(y.min()):
# x0, y0 = np.unravel_index(np.argmax(y.reshape(55, 55)), (55, 55))
# else:
# x0, y0 = np.unravel_index(np.argmin(y.reshape(55, 55)), (55, 55))
# initial_guess = (x0, y0, 3, 0, 2)
# try:
# popt, pcov = opt.curve_fit(vutil.sugar_gaussian_f, 55, y,
# p0=initial_guess)
# #print popt
# paras[i, :5] = popt
# pred_y = vutil.sugar_gaussian_f(55, *popt)
# paras[i, 5] = np.square(y-pred_y).sum()
# except RuntimeError:
# print 'Error - curve_fit failed'
# paras[i, :] = np.nan
#np.save(os.path.join(cross_corr_dir, 'curve_fit_paras.npy'), paras)
#-- curve-fit pRF visualization for each ROI
#prf_dir = os.path.join(cross_corr_dir, 'fit_prf_figs')
#check_path(prf_dir)
#paras = np.load(os.path.join(cross_corr_dir, 'curve_fit_paras.npy'))
#corr_mtx = np.load(os.path.join(cross_corr_dir, 'train_conv1_corr.npy'))
#prf_type = np.load(os.path.join(cross_corr_dir, 'prf_type.npy'))
#for roi in roi_dict:
# print '------%s------'%(roi)
# roi_idx = roi_dict[roi]
# # save pRF as figs
# roi_dir = os.path.join(prf_dir, roi)
# check_path(roi_dir)
# roi_corr_mtx = corr_mtx[roi_idx, :]
# roi_min = roi_corr_mtx.min()
# roi_max = roi_corr_mtx.max()
# for i in roi_idx:
# if np.isnan(paras[i, 0]):
# continue
# p = paras[i, :]
# vxl_prf = vutil.sugar_gaussian_f(55, *p).reshape(55, 55)
# filename = 'v'+str(vxl_idx[i])+'_'+str(int(prf_type[i]))+'.png'
# out_file = os.path.join(roi_dir, filename)
# vutil.save_imshow(vxl_prf, out_file, val_range=(roi_min, roi_max))
#-- show pRF parameters on cortical surface
#paras = np.load(os.path.join(cross_corr_dir, 'curve_fit_paras.npy'))
#full_prf_mtx = np.zeros((73728, 3))
#full_prf_mtx[:] = np.nan
#for i in range(len(vxl_idx)):
# full_prf_mtx[vxl_idx[i], :] = paras[i, :3]
#prf2visual_angle(full_prf_mtx, 55, cross_corr_dir, 'curve_fit')
#err_file = os.path.join(cross_corr_dir, 'curve_fit_err.nii.gz')
#vutil.vxl_data2nifti(paras[:, 5], vxl_idx, err_file)
#-- Cross-modality mapping: voxel~CNN unit correlation
#cross_corr_dir = os.path.join(subj_dir, 'cross_corr')
#check_path(cross_corr_dir)
# features from CNN
#corr_file = os.path.join(cross_corr_dir, 'train_norm1_corr.npy')
#feat_ts = train_feat_ts.reshape(69984, 7200)
#parallel_corr2_coef(train_fmri_ts, feat_ts, corr_file, block_size=96)
# features from optical flow
#corr_file = os.path.join(cross_corr_dir, 'train_optic_mag_corr.npy')
#feat_ts = tr_mag_ts.reshape(16384, 7200)
#parallel_corr2_coef(train_fmri_ts, feat_ts, corr_file, block_size=55)
#-- random cross-modal correlation
#rand_corr_file = os.path.join(cross_corr_dir, 'rand_train_conv1_corr.npy')
#feat_ts = tr_mag_ts.reshape(16384, 7200)
#random_cross_modal_corr(train_fmri_ts, feat_ts, 1000, 1000, rand_corr_file)
#permutation_stats(np.load(rand_corr_file))
#-- retinotopic mapping based on cross-correlation with norm1
#cross_corr_dir = os.path.join(subj_dir, 'cross_corr')
#retino_dir = os.path.join(cross_corr_dir, 'retinotopic')
#check_path(retino_dir)
#corr_file = os.path.join(cross_corr_dir, 'train_norm1_corr.npy')
#retinotopic_mapping(corr_file, retino_dir, vxl_idx, figout=False)
#-- feature temporal z-score
#print 'CNN features temporal z-score ...'
#train_feat_m = train_feat_ts.mean(axis=3, keepdims=True)
#train_feat_s = train_feat_ts.std(axis=3, keepdims=True)
#train_feat_ts = (train_feat_ts-train_feat_m)/(1e-10+train_feat_s)
#val_feat_ts = (val_feat_ts-train_feat_m)/(1e-10+train_feat_s)
#tmp_train_file = os.path.join(feat_dir, 'train_conv1_trs_z.npy')
#np.save(tmp_train_file, train_feat_ts)
#del train_feat_ts
#tmp_val_file = os.path.join(feat_dir, 'val_norm1_trs_z.npy')
#np.save(tmp_val_file, val_feat_ts)
#del val_feat_ts
#train_feat_ts = np.load(tmp_train_file, mmap_mode='r')
#train_feat_ts = train_feat_ts.reshape(69984, 7200)
#val_feat_ts = np.load(tmp_val_file, mmap_mode='r')
#val_feat_ts = val_feat_ts.reshape(69984, 540)
#-- fmri data z-score
#print 'fmri data temporal z-score'
#m = np.mean(train_fmri_ts, axis=1, keepdims=True)
#s = np.std(train_fmri_ts, axis=1, keepdims=True)
#train_fmri_ts = (train_fmri_ts - m) / (1e-10 + s)
#m = np.mean(val_fmri_ts, axis=1, keepdims=True)
#s = np.std(val_fmri_ts, axis=1, keepdims=True)
#val_fmri_ts = (val_fmri_ts - m) / (1e-10 + s)
#-- Encoding: ridge regression
#ridge_dir = os.path.join(subj_dir, 'ridge')
#check_path(ridge_dir)
#-- layer-wise ridge regression: select cnn units whose correlation with
#-- the given voxel exceeded the half of the maximal correlation within
#-- the layer.
#cross_corr_dir = os.path.join(subj_dir, 'cross_corr')
#cross_corr_file = os.path.join(cross_corr_dir, 'train_norm1_corr.npy')
#cross_corr = np.load(cross_corr_file, mmap_mode='r')
## output config
#ALPHA_NUM = 20
#BOOTS_NUM = 15
#full_vxl_num, feat_num = cross_corr.shape
#vxl_num = len(vxl_idx)
#wt_mtx = np.zeros((vxl_num, feat_num))
#alpha_mtx = np.zeros(vxl_num)
#val_corr_mtx = np.zeros(vxl_num)
##bootstrap_corr_mtx = np.zeros((vxl_num, ALPHA_NUM, BOOTS_NUM))
#bootstrap_corr_mtx = np.zeros((vxl_num, BOOTS_NUM))
## voxel-wise regression
#for i in range(vxl_num):
# print 'Voxel %s in %s'%(i+1, vxl_num)
# v_corr = cross_corr[np.where(full_vxl_idx==vxl_idx[i])[0][0], :]
# feat_idx = v_corr > (v_corr.max()/2)
# print 'Select %s features'%(feat_idx.sum())
# vtrain_feat = train_feat_ts[feat_idx, :]
# vval_feat = val_feat_ts[feat_idx, :]
# vtrain_fmri = np.expand_dims(train_fmri_ts[i, :], axis=0)
# vval_fmri = np.expand_dims(val_fmri_ts[i, :], axis=0)
# wt, val_corr, alpha, bscores, valinds = ridge.bootstrap_ridge(
# vtrain_feat.T, vtrain_fmri.T,
# vval_feat.T, vval_fmri.T,
# alphas=np.arange(100, 2001, 2001/ALPHA_NUM),
# #alphas=np.logspace(-2, 3, ALPHA_NUM),
# nboots=BOOTS_NUM, chunklen=72, nchunks=20,
# single_alpha=False, use_corr=True)
# print 'Alpha: %s'%(alpha)
# print 'Val Corr: %s'%(val_corr)
# wt_mtx[i, feat_idx] = wt.T
# val_corr_mtx[i] = val_corr
# alpha_mtx[i] = alpha
# alpha_idx = np.where(np.arange(100, 2001, 2001/ALPHA_NUM)==alpha)[0][0]
# #alpha_idx = np.where(np.logspace(-2, 3, ALPHA_NUM)==alpha)[0][0]
# bootstrap_corr_mtx[i, :] = bscores[alpha_idx, 0, :]
# #bootstrap_corr_mtx[i, ...] = bscores[:, 0, :]
## save output
#wt_file = os.path.join(ridge_dir, 'norm1_wt.npy')
#alpha_file = os.path.join(ridge_dir, 'norm1_alpha.npy')
#val_corr_file = os.path.join(ridge_dir, 'norm1_val_corr.npy')
#bootstrap_corr_file = os.path.join(ridge_dir, 'norm1_bootstrap_corr.npy')
#np.save(wt_file, wt_mtx)
#np.save(alpha_file, alpha_mtx)
#np.save(val_corr_file, val_corr_mtx)
#np.save(bootstrap_corr_file, bootstrap_corr_mtx)
| bsd-3-clause |
namccart/gnuradio | gr-digital/examples/example_timing.py | 49 | 9180 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
from scipy import fftpack
class example_timing(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise,
foffset, toffset, poffset, mode=0):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
gain = bw
nfilts = 32
rrc_taps_rx = filter.firdes.root_raised_cosine(
nfilts, sps*nfilts, 1.0, rolloff, ntaps*nfilts)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.off = filter.fractional_resampler_cc(0.20, 1.0)
if mode == 0:
self.clk = digital.pfb_clock_sync_ccf(sps, gain, rrc_taps_rx,
nfilts, nfilts//2, 1)
self.taps = self.clk.taps()
self.dtaps = self.clk.diff_taps()
self.delay = int(scipy.ceil(((len(rrc_taps)-1)/2 +
(len(self.taps[0])-1)/2)/float(sps))) + 1
self.vsnk_err = blocks.vector_sink_f()
self.vsnk_rat = blocks.vector_sink_f()
self.vsnk_phs = blocks.vector_sink_f()
self.connect((self.clk,1), self.vsnk_err)
self.connect((self.clk,2), self.vsnk_rat)
self.connect((self.clk,3), self.vsnk_phs)
else: # mode == 1
mu = 0.5
gain_mu = bw
gain_omega = 0.25*gain_mu*gain_mu
omega_rel_lim = 0.02
self.clk = digital.clock_recovery_mm_cc(sps, gain_omega,
mu, gain_mu,
omega_rel_lim)
self.vsnk_err = blocks.vector_sink_f()
self.connect((self.clk,1), self.vsnk_err)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_clk = blocks.vector_sink_c()
self.connect(self.src, self.rrc, self.chn, self.off, self.clk, self.vsnk_clk)
self.connect(self.src, self.vsnk_src)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth (PFB) or gain (M&M) [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.0,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.0,
help="Set the simulation's phase offset [default=%default]")
parser.add_option("-M", "--mode", type="int", default=0,
help="Set the recovery mode (0: polyphase, 1: M&M) [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_timing(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset,
options.mode)
put.run()
if options.mode == 0:
data_src = scipy.array(put.vsnk_src.data()[20:])
data_clk = scipy.array(put.vsnk_clk.data()[20:])
data_err = scipy.array(put.vsnk_err.data()[20:])
data_rat = scipy.array(put.vsnk_rat.data()[20:])
data_phs = scipy.array(put.vsnk_phs.data()[20:])
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
# Plot the IQ symbols
s1 = f1.add_subplot(2,2,1)
s1.plot(data_src.real, data_src.imag, "bo")
s1.plot(data_clk.real, data_clk.imag, "ro")
s1.set_title("IQ")
s1.set_xlabel("Real part")
s1.set_ylabel("Imag part")
s1.set_xlim([-2, 2])
s1.set_ylim([-2, 2])
# Plot the symbols in time
delay = put.delay
m = len(data_clk.real)
s2 = f1.add_subplot(2,2,2)
s2.plot(data_src.real, "bs", markersize=10, label="Input")
s2.plot(data_clk.real[delay:], "ro", label="Recovered")
s2.set_title("Symbols")
s2.set_xlabel("Samples")
s2.set_ylabel("Real Part of Signals")
s2.legend()
# Plot the clock recovery loop's error
s3 = f1.add_subplot(2,2,3)
s3.plot(data_err, label="Error")
s3.plot(data_rat, 'r', label="Update rate")
s3.set_title("Clock Recovery Loop Error")
s3.set_xlabel("Samples")
s3.set_ylabel("Error")
s3.set_ylim([-0.5, 0.5])
s3.legend()
# Plot the clock recovery loop's error
s4 = f1.add_subplot(2,2,4)
s4.plot(data_phs)
s4.set_title("Clock Recovery Loop Filter Phase")
s4.set_xlabel("Samples")
s4.set_ylabel("Filter Phase")
diff_taps = put.dtaps
ntaps = len(diff_taps[0])
nfilts = len(diff_taps)
t = scipy.arange(0, ntaps*nfilts)
f3 = pylab.figure(3, figsize=(12,10), facecolor='w')
s31 = f3.add_subplot(2,1,1)
s32 = f3.add_subplot(2,1,2)
s31.set_title("Differential Filters")
s32.set_title("FFT of Differential Filters")
for i,d in enumerate(diff_taps):
D = 20.0*scipy.log10(1e-20+abs(fftpack.fftshift(fftpack.fft(d, 10000))))
s31.plot(t[i::nfilts].real, d, "-o")
s32.plot(D)
s32.set_ylim([-120, 10])
# If testing the M&M clock recovery loop
else:
data_src = scipy.array(put.vsnk_src.data()[20:])
data_clk = scipy.array(put.vsnk_clk.data()[20:])
data_err = scipy.array(put.vsnk_err.data()[20:])
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
# Plot the IQ symbols
s1 = f1.add_subplot(2,2,1)
s1.plot(data_src.real, data_src.imag, "o")
s1.plot(data_clk.real, data_clk.imag, "ro")
s1.set_title("IQ")
s1.set_xlabel("Real part")
s1.set_ylabel("Imag part")
s1.set_xlim([-2, 2])
s1.set_ylim([-2, 2])
# Plot the symbols in time
s2 = f1.add_subplot(2,2,2)
s2.plot(data_src.real, "bs", markersize=10, label="Input")
s2.plot(data_clk.real, "ro", label="Recovered")
s2.set_title("Symbols")
s2.set_xlabel("Samples")
s2.set_ylabel("Real Part of Signals")
s2.legend()
# Plot the clock recovery loop's error
s3 = f1.add_subplot(2,2,3)
s3.plot(data_err)
s3.set_title("Clock Recovery Loop Error")
s3.set_xlabel("Samples")
s3.set_ylabel("Error")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
PredictiveScienceLab/cluster-opt-bgo | pydes/_core.py | 2 | 10321 | """
Global Optimization of Expensive Functions.
Author:
Ilias Bilionis
Date:
10/15/2014
01/29/2015
"""
__all__ = ['expected_improvement',
'fb_expected_improvement',
'expected_information_gain',
'minimize', 'maximize',
'plot_summary', 'plot_summary_2d']
import GPy
import GPy.inference.mcmc
from GPy.inference.mcmc import HMC
import numpy as np
import math
import scipy
import scipy.stats as stats
from scipy.integrate import quad
#from choldate import choldowndate, cholupdate
from statsmodels.sandbox.distributions.multivariate import mvnormcdf
import math
def remove(mu, S, i):
"""
Remove i element from mu and S.
"""
mu_ni = np.hstack([mu[:i], mu[i+1:]])
S_nini = np.array(np.bmat([[S[:i, :i], S[:i, i+1:]],
[S[i+1:, :i], S[i+1:, i+1:]]]))
return mu_ni, S_nini
def maxpdf(x, mu, S):
s = np.zeros(x.shape[0])
d = mu.shape[0]
for i in xrange(d):
mu_i = mu[i]
S_ii = S[i, i]
mu_ni, S_nini = remove(mu, S, i)
S_ini = np.array(np.bmat([[S[:i, i], S[i+1:, i]]]))
mu_nii = mu_ni[:, None] + np.dot(S_ini.T, x[None, :] - mu_i) / S_ii
S_ninii = S_nini - np.dot(S_ini, S_ini.T) / S_ii
phi_i = norm.pdf(x, loc=mu_i, scale=np.sqrt(S_ii))
Phi_i = np.array([mvnormcdf(x[j], mu_nii[:, j], S_ninii)
for j in xrange(x.shape[0])])
s += phi_i * Phi_i
return s
def expected_improvement(X_design, model, mode='min'):
"""
Compute the Expected Improvement criterion at ``x``.
"""
y = model.Y.flatten()
m_s, v_s = model.predict(X_design)[:2]
m_s = m_s.flatten()
v_s = v_s.flatten()
s_s = np.sqrt(v_s)
if mode == 'min':
m_n = np.min(y)
u = (m_n - m_s) / s_s
elif mode == 'max':
m_n = np.max(y)
u = (m_s - m_n) / s_s
else:
raise NotImplementedError('I do not know what to do with mode %s' %mode)
ei = s_s * (u * stats.norm.cdf(u) + stats.norm.pdf(u))
return ei
def fb_expected_improvement(X_design, model, mode='min', stepsize=1e-2,
num_samples=100):
"""
Compute the fully Bayesian expected improvement criterion.
"""
model.rbf.variance.set_prior(GPy.priors.LogGaussian(0., 1.))
model.rbf.lengthscale.set_prior(GPy.priors.LogGaussian(0., 0.1))
mcmc = HMC(model, stepsize=stepsize)
params = mcmc.sample(num_samples=num_samples)
ei_all = []
for i in xrange(params.shape[0]):
model.rbf.variance = params[i, 0]
model.rbf.lengthscale = params[i, 1]
ei = expected_improvement(X_design, model, mode=mode)
ei_all.append(ei)
ei_all = np.array(ei_all)
ei_fb = ei_all.mean(axis=0)
return ei_fb
def min_qoi(X_design, f):
"""
A QoI that corresponds to the min of the function.
"""
return np.argmin(f, axis=0)
def kl_divergence(g1, g2):
"""
Compute the KL divergence.
"""
f = lambda(x): g1.evaluate([[x]]) * np.log(g1.evaluate([[x]]) / g2.evaluate([[x]]))
return quad(f, 0, 6)
def expected_information_gain(X_design, model, num_Omegas=1000,
num_y=100,
qoi=min_qoi,
qoi_bins=None,
qoi_num_bins=20):
"""
Compute the expected information gain criterion at ``x``.
"""
import matplotlib.pyplot as plt
m_d, K_d = model.predict(X_design, full_cov=True)[:2]
U_d = scipy.linalg.cholesky(K_d, lower=False)
Omegas = np.random.randn(X_design.shape[0], num_Omegas)
delta_y_i = np.random.randn(num_y)
# Find the histogram of Q the current data
S_d = m_d + np.dot(U_d.T, Omegas)
Q_d = qoi(X_design, S_d)
tmp = stats.itemfreq(Q_d)
yy = model.posterior_samples(X_design, 10)
plt.plot(X_design, yy, 'm', linewidth=2)
plt.savefig('examples/samples.png')
plt.clf()
p_d = np.zeros((X_design.shape[0],))
p_d[np.array(tmp[:, 0], dtype='int')] = tmp[:, 1] / np.sum(tmp[:, 1])
if qoi_bins is None and qoi is min_qoi:
#qoi_bins = np.linspace(np.min(Q_d), np.max(Q_d), qoi_num_bins)[None, :]
qoi_bins = np.linspace(X_design[0, 0], X_design[-1, 0], qoi_num_bins)[None, :]
H_d, e_d = np.histogramdd(Q_d, normed=True, bins=qoi_bins)
delta_e_d = e_d[0][1] - e_d[0][0]
#p_d = H_d * delta_e_d
plt.plot(X_design, p_d)
plt.plot(X_design, m_d)
plt.plot(model.X, model.Y, 'ro', markersize=10)
plt.hist(X_design[Q_d, 0], normed=True, alpha=0.5)
plt.savefig('examples/kde_Q.png')
plt.clf()
print 'Entropy:', stats.entropy(p_d)
G = np.zeros((X_design.shape[0],))
p_d += 1e-16
for i in xrange(X_design.shape[0]):
u_di = K_d[:, i] / math.sqrt(K_d[i, i])
u_di = u_di[:, None]
#K_dd = K_d - np.dot(u_di, u_di.T)
#K_dd += np.eye(K_d.shape[0]) * 1e-6
choldowndate(U_d, u_di.flatten().copy())
#U_d = scipy.linalg.cholesky(K_dd, lower=False)
# Pick a value for y:
Omegas = np.random.randn(X_design.shape[0], num_Omegas)
delta_y_i = np.random.randn(num_y)
m_dgi = m_d + delta_y_i * u_di
S_dgi = m_dgi[:, :, None] + np.dot(U_d.T, Omegas)[:, None, :]
#for j in xrange(num_y):
# print S_dgi[:, j, :]
# plt.plot(X_design, S_dgi[:, j, :], 'm', linewidth=0.5)
# plt.plot(model.X, model.likelihood.Y, 'ro', markersize=10)
# plt.savefig('examples/ig_S_' + str(i).zfill(2) + '_' + str(j).zfill(2) + '.png')
# plt.clf()
Q_dgi = qoi(X_design, S_dgi)
#print Q_dgi
#quit()
p_d_i = np.zeros((num_y, X_design.shape[0]))
for j in xrange(num_y):
tmp = stats.itemfreq(Q_dgi[j, :])
p_d_i[j, np.array(tmp[:, 0], dtype='int')] = tmp[:, 1] / np.sum(tmp[:, 1])
p_d_i += 1e-16
G[i] = np.mean([stats.entropy(p_d_i[j, :], p_d) for j in xrange(num_y)])
#G[i] = np.mean([-stats.entropy(p_d_i[j, :]) for j in xrange(num_y)])
#plt.plot(X_design, S_dgi[:, :, 0], 'm', linewidth=0.5)
#plt.plot(X_design, m_d, 'r', linewidth=2)
plt.plot(model.X, np.zeros((model.X.shape[0], 1)), 'ro', markersize=10)
plt.plot(X_design, np.mean(p_d_i, axis=0), 'g', linewidth=2)
plt.savefig('examples/ig_S_' + str(i).zfill(2) + '.png')
plt.clf()
print X_design[i, 0], G[i]
cholupdate(U_d, u_di.flatten().copy())
plt.plot(X_design, G)
plt.savefig('examples/ig_KL.png')
plt.clf()
return G
def plot_summary(f, X_design, model, prefix, G, Gamma_name):
"""
Plot a summary of the current iteration.
"""
import matplotlib.pyplot as plt
X = model.X
y = model.Y
m_s, k_s = model.predict(X_design, full_cov=True)
m_05, m_95 = model.predict_quantiles(X_design)
fig, ax1 = plt.subplots()
ax1.plot(X_design, f(X_design), 'b', linewidth=2)
ax1.plot(X, y, 'go', linewidth=2, markersize=10, markeredgewidth=2)
ax1.plot(X_design, m_s, 'r--', linewidth=2)
ax1.fill_between(X_design.flatten(), m_05.flatten(), m_95.flatten(),
color='grey', alpha=0.5)
ax1.set_ylabel('$f(x)$', fontsize=16)
ax2 = ax1.twinx()
ax2.plot(X_design, G, 'g', linewidth=2)
ax2.set_ylabel('$%s(x)$' % Gamma_name, fontsize=16, color='g')
#ax2.set_ylim([0., 3.])
plt.setp(ax2.get_yticklabels(), color='g')
png_file = prefix + '.png'
print 'Writing:', png_file
plt.savefig(png_file)
plt.clf()
def plot_summary_2d(f, X_design, model, prefix, G, Gamma_name):
"""
Plot a summary of the current iteration.
"""
import matplotlib.pyplot as plt
n = np.sqrt(X_design.shape[0])
X1, X2 = (X_design[:, i].reshape((n, n)) for i in range(2))
GG = G.reshape((n, n))
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.contourf(X1, X2, GG)
fig.colorbar(cax)
ax.set_xlabel('$x_1$', fontsize=16)
ax.set_ylabel('$x_2$', fontsize=16)
plt.savefig(prefix + '_' + Gamma_name + '.png')
plt.clf()
X = model.X
m_s, k_s = model.predict(X_design)
M_s = m_s.reshape((n, n))
S_s = np.sqrt(k_s.reshape((n, n)))
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.contourf(X1, X2, M_s)
fig.colorbar(cax)
ax.plot(X[:, 0], X[:, 1], 'k.', markersize=10)
ax.set_xlabel('$x_1$', fontsize=16)
ax.set_ylabel('$x_2$', fontsize=16)
plt.savefig(prefix + '_mean.png')
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.contourf(X1, X2, S_s)
fig.colorbar(cax)
ax.plot(X[:, 0], X[:, 1], 'k.', markersize=10)
ax.set_xlabel('$x_1$', fontsize=16)
ax.set_ylabel('$x_2$', fontsize=16)
plt.savefig(prefix + '_std.png')
plt.clf()
def minimize(f, X_init, X_design, prefix="minimize", Gamma=expected_improvement,
Gamma_name='EI', max_it=10, tol=1e-1, callback=None):
"""
Optimize f using a limited number of evaluations.
"""
X = X_init
y = np.array([f(X[i, :]) for i in xrange(X.shape[0])])
k = GPy.kern.RBF(X.shape[1], ARD=True)
for count in xrange(max_it):
model = GPy.models.GPRegression(X, y, k)
model.Gaussian_noise.variance.constrain_fixed(1e-6)
model.optimize()
print str(model)
G = Gamma(X_design, model)
if callback is not None:
callback(f, X_design, model,
prefix + '_' + str(count).zfill(2), G, Gamma_name)
i = np.argmax(G)
if G[i] < tol:
print '*** converged'
break
print 'I am adding:', X_design[i:(i+1), :]
print 'which has a G of', G[i]
X = np.vstack([X, X_design[i:(i+1), :]])
y = np.vstack([y, f(X_design[i, :])])
print 'it =', count+1, ', min =', np.min(y), ' arg min =', X[np.argmin(y), :]
return X, y
def maximize(f, X_init, X_design, prefix='maximize', Gamma=expected_improvement,
Gamma_name='EI', max_it=10, tol=1e-1, callback=None):
"""
Maximize the function ``f``.
"""
f_minus = lambda(x) : -f(x)
return minimize(f_minus, X_init, X_design, prefix=prefix, Gamma=Gamma,
Gamma_name=Gamma_name, max_it=max_it, tol=tol)
| mit |
saquiba2/numpy2 | numpy/core/code_generators/ufunc_docstrings.py | 14 | 90528 | """
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
from __future__ import division, absolute_import, print_function
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10])
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, in which to store the results.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
is returned, otherwise an array.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray or scalar
The quotient ``x1/x2``, element-wise. Returns a scalar if
both ``x1`` and ``x2`` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and
division by zero.
Notes
-----
Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting.
Behavior on division by zero can be changed using ``seterr``.
In Python 2, when both ``x1`` and ``x2`` are of an integer type,
``divide`` will behave like ``floor_divide``. In Python 3, it behaves
like ``true_divide``.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types (Python 2 only):
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic (again,
Python 2 only), and does not raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using ``seterr``:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with
magnitude 1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray or scalar
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy instead uses the definition of
`floor` where `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the
inputs.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the C library function fmod, the
remainder has the same sign as the dividend `x1`. It is equivalent to
the Matlab(TM) ``rem`` function and should not be confused with the
Python modulus operator ``x1 % x2``.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Equivalent to the Python ``%`` operator.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors
is bound by conventions. For `fmod`, the sign of result is the sign of
the dividend, while for `remainder` the sign of the result is the sign
of the divisor. The `fmod` function is equivalent to the Matlab(TM)
``rem`` function.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned. In a
two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit
two's-complement system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the
corresponding element of the input is finite; otherwise the values
are False (element is either positive infinity, negative infinity
or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity. Errors result if the
second argument is also supplied when `x` is a scalar input, or if
first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Returns a boolean array of the same shape as `x`, True where ``x ==
+/-inf``, otherwise False.
Parameters
----------
x : array_like
Input values
out : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : bool (scalar) or boolean ndarray
For scalar input, the result is a new boolean with value True if
the input is positive or negative infinity; otherwise the value is
False.
For array input, the result is a boolean array with the same shape
as the input and the values are True where the corresponding
element of the input is positive or negative infinity; elsewhere
the values are False. If a second argument was supplied the result
is stored there. If the type of that array is a numeric type the
result is represented as zeros and ones, if the type is boolean
then as False and True, respectively. The return value `y` is then
a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray or bool
For scalar input, the result is a new boolean with value True if
the input is NaN; otherwise the value is False.
For array input, the result is a boolean array of the same
dimensions as the input and the values are True if the
corresponding element of the input is NaN; otherwise the values are
False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it.
`log10` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small as
to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it.
`log1p` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. They must
be broadcastable to the same shape.
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
Element-wise minimum of two arrays, propagates NaNs.
fmax :
Element-wise maximum of two arrays, ignores NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when
neither x1 nor x2 are nans, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
Element-wise maximum of two arrays, propagates NaNs.
fmin :
Element-wise minimum of two arrays, ignores NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when
neither x1 nor x2 are NaNs, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
Element-wise minimum of two arrays, ignores NaNs.
maximum :
Element-wise maximum of two arrays, propagates NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
Element-wise maximum of two arrays, ignores NaNs.
minimum :
Element-wise minimum of two arrays, propagates NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Numerical negative, element-wise.
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a specific
function for that has been written for consistency with the other
*_like functions. It is only used internally in a limited fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes ``x1 - floor(x1 / x2) * x2``, the result has the same sign as
the divisor `x2`. It is equivalent to the Python modulus operator
``x1 % x2`` and should not be confused with the Matlab(TM) ``rem``
function.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The remainder of the quotient ``x1/x2``, element-wise. Returns a
scalar if both `x1` and `x2` are scalars.
See Also
--------
fmod : Equivalent of the Matlab(TM) ``rem`` function.
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right `x2`. Because the internal
representation of numbers is in binary format, this operation is
equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. nan
is returned for nan inputs.
For complex inputs, the `sign` function returns
``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``.
complex(nan, 0) is returned for complex nan inputs.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Notes
-----
There is more than one definition of sign in common use for complex
numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}`
which is different from a common alternative, :math:`x/|x|`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
>>> np.sign(5-2j)
(1+0j)
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1 : array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and NaN is NaN.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry (the
mathematical study of triangles). Consider a circle of radius 1
centered on the origin. A ray comes in from the :math:`+x` axis, makes
an angle at the origin (measured counter-clockwise from that axis), and
departs from the origin. The :math:`y` coordinate of the outgoing
ray's intersection with the unit circle is the sine of that angle. It
ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The
function has zeroes where the angle is a multiple of :math:`\\pi`.
Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative.
The numerous properties of the sine and related functions are included
in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'cbrt',
"""
Return the cube-root of an array, element-wise.
.. versionadded:: 1.10.0
Parameters
----------
x : array_like
The values whose cube-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the cube
cube-root of each element in `x`.
If `out` was provided, `y` is a reference to it.
Examples
--------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making
``//`` and ``/`` equivalent operators. The default floor division
operation of ``/`` can be replaced by true division with ``from
__future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.
The mantissa is lies in the open interval(-1, 1), while the twos
exponent is a signed integer.
Parameters
----------
x : array_like
Array of numbers to be decomposed.
out1 : ndarray, optional
Output array for the mantissa. Must have the same shape as `x`.
out2 : ndarray, optional
Output array for the exponent. Must have the same shape as `x`.
Returns
-------
(mantissa, exponent) : tuple of ndarrays, (float, int)
`mantissa` is a float array with values between -1 and 1.
`exponent` is an int array which represents the exponent of 2.
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_newdoc('numpy.core.umath', 'ldexp',
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : array_like
Array of multipliers.
x2 : array_like, int
Array of twos exponents.
out : ndarray, optional
Output array for the result.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float32)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
| bsd-3-clause |
kastman/lyman | lyman/visualizations.py | 1 | 27609 | from __future__ import division
from six import string_types
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy import ndimage, signal
import nibabel as nib
class Mosaic(object):
def __init__(self, anat, stat=None, mask=None, n_col=9, step=2,
tight=True, show_mask=True, slice_dir="axial",
anat_lims=None):
"""Plot a mosaic of axial slices through an MRI volume.
Parameters
----------
anat : filename, nibabel image, or array
The anatomical image that will form the background of the mosaic.
If only an array is passed, an identity matrix will be used as
the affine and orientation could be incorrect.
stat : filename, nibabel image, or array
A statistical map to plot as an overlay (which happens by calling
one of the methods). If only an array is passed, it is assumed
to have the same orientation as the anatomy.
mask : filename, nibabel image, or array
A binary image where voxels included in the statistical analysis
are True. This will be used to gray-out voxels in the anatomical
image that are outside the field of view. If you want to overlay
the mask itself, pass it to ``stat``.
n_col : int
Number of columns in the mosaic. This will also determine the size
of the figure (1 inch per column).
step : int
Show every ``step`` slice along the slice_dir in the mosaic.
tight : bool
If True, try to crop panes to focus on the brain volume.
show_mask : bool
If True, gray-out voxels in the anat image that are outside
of the mask image.
slice_dir : axial | coronal | sagital
Direction to slice the mosaic on.
anat_lims : pair of floats
Limits for the anatomical (background) image colormap
"""
# -- Load and reorient the anatomical image
if isinstance(anat, string_types):
anat_img = nib.load(anat)
have_orientation = True
elif isinstance(anat, nib.spatialimages.SpatialImage):
anat_img = anat
have_orientation = True
elif isinstance(anat, np.ndarray):
anat_img = nib.Nifti1Image(anat, np.eye(4))
have_orientation = False
else:
raise TypeError("anat type {} not understood".format(type(anat)))
self.anat_img = nib.as_closest_canonical(anat_img)
self.anat_data = self.anat_img.get_data()
# -- Load and reorient the statistical image
if isinstance(stat, string_types):
stat_img = nib.load(stat)
elif isinstance(stat, nib.spatialimages.SpatialImage):
stat_img = stat
elif isinstance(stat, np.ndarray):
if stat.dtype is np.dtype("bool"):
stat = stat.astype(np.int)
stat_img = nib.Nifti1Image(stat, anat_img.affine, anat_img.header)
elif stat is not None:
raise TypeError("stat type {} not understood".format(type(stat)))
else:
stat_img = None
if stat_img is not None:
self.stat_img = nib.as_closest_canonical(stat_img)
# -- Load and reorient the mask image
if isinstance(mask, string_types):
mask_img = nib.load(mask)
elif isinstance(mask, nib.spatialimages.SpatialImage):
mask_img = mask
elif isinstance(mask, np.ndarray):
if mask.dtype is np.dtype("bool"):
mask = mask.astype(np.int)
mask_img = nib.Nifti1Image(mask, anat_img.affine, anat_img.header)
elif mask is not None:
raise TypeError("mask type {} not understood".format(type(mask)))
else:
mask_img = None
mask_data = None
if mask is not None:
self.mask_img = nib.as_closest_canonical(mask_img)
mask_data = self.mask_img.get_data().astype(bool)
if slice_dir[0] not in "sca":
err = "Slice direction {} not understood".format(slice_dir)
raise ValueError(err)
# Find a field of view that tries to eliminate empty voxels
anat_fov = self.anat_img.get_data() > 1e-5
if tight:
self.fov = anat_fov
if mask is not None:
self.fov &= mask_data
else:
self.fov = np.ones_like(anat_fov, np.bool)
# Save the mosaic parameters
self.n_col = n_col
self.step = step
self.slice_dir = slice_dir
# Define slice objects to crop to the volume
slices, = ndimage.find_objects(self.fov)
self.x_slice, self.y_slice, self.z_slice = slices
# Update the slice on the mosiac axis with steps
slice_ax = dict(s="x", c="y", a="z")[slice_dir[0]]
ms = getattr(self, slice_ax + "_slice")
mosaic_slice = slice(ms.start, ms.stop, step)
setattr(self, slice_ax + "_slice", mosaic_slice)
self.n_slices = (ms.stop - ms.start) // step
# Initialize the figure and plot the constant info
self._setup_figure()
self._plot_anat(anat_lims)
if mask is not None and show_mask:
self._plot_inverse_mask()
# Label the anatomy
if have_orientation:
l_label, r_label = dict(s="PA", c="LR", a="LR")[self.slice_dir[0]]
self.fig.text(.01, .03, l_label, size=14, color="w",
ha="left", va="center")
self.fig.text(.99, .03, r_label, size=14, color="w",
ha="right", va="center")
def _setup_figure(self):
"""Initialize the figure and axes."""
n_row = np.ceil(self.n_slices / self.n_col)
if self.slice_dir.startswith("s"):
slc_i, slc_j = self.y_slice, self.z_slice
elif self.slice_dir.startswith("c"):
slc_i, slc_j = self.x_slice, self.z_slice
elif self.slice_dir.startswith("a"):
slc_i, slc_j = self.x_slice, self.y_slice
nx, ny, _ = self.anat_data[slc_i, slc_j].shape
figsize = self.n_col, (ny / nx) * n_row
plot_kws = dict(nrows=int(n_row), ncols=int(self.n_col),
figsize=figsize, facecolor="0",
subplot_kw=dict(xticks=[], yticks=[]))
self.fig, self.axes = plt.subplots(**plot_kws)
[ax.set_axis_off() for ax in self.axes.flat]
self.fig.subplots_adjust(0, 0, 1, 1, 0, 0)
def _plot_anat(self, lims=None):
"""Plot the anatomy in grayscale."""
anat_data = self.anat_img.get_data()
if lims is None:
vmin, vmax = 0, np.percentile(anat_data[self.fov], 99)
else:
vmin, vmax = lims
anat_fov = anat_data[self.x_slice, self.y_slice, self.z_slice]
self._map("imshow", anat_fov, cmap="gray", vmin=vmin, vmax=vmax)
empty_slices = len(self.axes.flat) - anat_fov.shape[2]
if empty_slices > 0:
i, j, _ = anat_fov.shape
for ax in self.axes.flat[-empty_slices:]:
ax.imshow(np.zeros((i, j)), cmap="gray", vmin=0, vmax=10)
def _plot_inverse_mask(self):
"""Dim the voxels outside of the statistical analysis FOV."""
mask_data = self.mask_img.get_data().astype(np.bool)
anat_data = self.anat_img.get_data()
mask_data = np.where(mask_data | (anat_data < 1e-5), np.nan, 1)
mask_fov = mask_data[self.x_slice, self.y_slice, self.z_slice]
self._map("imshow", mask_fov, cmap="bone", vmin=0, vmax=3,
interpolation="nearest", alpha=.5)
def _map(self, func_name, data, ignore_value_error=False, **kwargs):
"""Apply a named function to a 3D volume of data on each axes."""
transpose_orders = dict(s=(0, 1, 2), c=(1, 0, 2), a=(2, 0, 1))
slice_key = self.slice_dir[0]
slices = data.transpose(*transpose_orders[slice_key])
for slice, ax in zip(slices, self.axes.flat):
func = getattr(ax, func_name)
try:
func(np.rot90(slice), **kwargs)
except ValueError:
if ignore_value_error:
pass
else:
raise
def plot_activation(self, thresh=2, vmin=None, vmax=None, vmax_perc=99,
vfloor=None, pos_cmap="Reds_r", neg_cmap=None,
alpha=1, fmt=".2g"):
"""Plot the stat image as uni- or bi-polar activation with a threshold.
Parameters
----------
thresh : float
Threshold value for the statistic; overlay will not be visible
between -thresh and thresh.
vmin, vmax : floats
The anchor values for the colormap. The same values will be used
for the positive and negative overlay.
vmax_perc : int
The percentile of the data (within the fov and above the threshold)
at which to saturate the colormap by default. Overriden if a there
is a specific value passed for vmax.
vfloor : float or None
If not None, this sets the vmax value, if the value at the provided
vmax_perc does not exceed it.
pos_cmap, neg_cmap : names of colormaps or colormap objects
The colormapping for the positive and negative overlays.
alpha : float
The transparancy of the overlay.
fmt : {}-style format key
Format of the colormap annotation.
"""
stat_data = self.stat_img.get_data()[self.x_slice,
self.y_slice,
self.z_slice]
pos_data = stat_data.copy()
pos_data[pos_data < thresh] = np.nan
if vmin is None:
vmin = thresh
if vmax is None:
calc_data = stat_data[np.abs(stat_data) > thresh]
if calc_data.any():
vmax = np.percentile(np.abs(calc_data), vmax_perc)
else:
vmax = vmin * 2
pos_cmap = self._get_cmap(pos_cmap)
self._map("imshow", pos_data, cmap=pos_cmap,
vmin=vmin, vmax=vmax, alpha=alpha)
if neg_cmap is not None:
thresh, nvmin, nvmax = -thresh, -vmax, -vmin
neg_data = stat_data.copy()
neg_data[neg_data > thresh] = np.nan
neg_cmap = self._get_cmap(neg_cmap)
self._map("imshow", neg_data, cmap=neg_cmap,
vmin=nvmin, vmax=nvmax, alpha=alpha)
self._add_double_colorbar(vmin, vmax, pos_cmap, neg_cmap, fmt)
else:
self._add_single_colorbar(vmin, vmax, pos_cmap, fmt)
def plot_overlay(self, cmap, vmin=None, vmax=None, center=False,
vmin_perc=1, vmax_perc=99, thresh=None,
alpha=1, fmt=".2g", colorbar=True):
"""Plot the stat image as a single overlay with a threshold.
Parameters
----------
cmap : name of colormap or colormap object
The colormapping for the overlay.
vmin, vmax : floats
The anchor values for the colormap. The same values will be used
for the positive and negative overlay.
center : bool
If true, center the colormap. This respects the larger absolute
value from the other (vmin, vmax) arguments, but overrides the
smaller one.
vmin_perc, vmax_perc : ints
The percentiles of the data (within fov and above threshold)
that will be anchor points for the colormap by default. Overriden
if specific values are passed for vmin or vmax.
thresh : float
Threshold value for the statistic; overlay will not be visible
between -thresh and thresh.
alpha : float
The transparancy of the overlay.
fmt : {}-style format string
Format of the colormap annotation.
colorbar : bool
If true, add a colorbar.
"""
stat_data = self.stat_img.get_data()[self.x_slice,
self.y_slice,
self.z_slice]
if hasattr(self, "mask_img"):
fov = self.mask_img.get_data()[self.x_slice,
self.y_slice,
self.z_slice].astype(bool)
else:
fov = np.ones_like(stat_data).astype(bool)
if vmin is None:
vmin = np.percentile(stat_data[fov], vmin_perc)
if vmax is None:
if stat_data.any():
vmax = np.percentile(stat_data[fov], vmax_perc)
else:
vmax = vmin * 2
if center:
vabs = max(np.abs(vmin), vmax)
vmin, vmax = -vabs, vabs
if thresh is not None:
stat_data[stat_data < thresh] = np.nan
stat_data[~fov] = np.nan
cmap = self._get_cmap(cmap)
self._map("imshow", stat_data, cmap=cmap,
vmin=vmin, vmax=vmax, alpha=alpha)
if colorbar:
self._add_single_colorbar(vmin, vmax, cmap, fmt)
def plot_mask(self, color="#dd2222", alpha=.66):
"""Plot the statistical volume as a binary mask."""
mask_data = self.stat_img.get_data()[self.x_slice,
self.y_slice,
self.z_slice]
bool_mask = mask_data.astype(bool)
mask_data = bool_mask.astype(np.float)
mask_data[~bool_mask] = np.nan
cmap = mpl.colors.ListedColormap([color])
self._map("imshow", mask_data, cmap=cmap, vmin=.5, vmax=1.5,
interpolation="nearest", alpha=alpha)
def plot_mask_edges(self, color="#dd2222", linewidth=1):
"""Plot the edges of possibly multiple masks to show overlap."""
cmap = mpl.colors.ListedColormap([color])
slices = self.stat_img.get_data()[self.x_slice,
self.y_slice,
self.z_slice]
self._map("contour", slices, ignore_value_error=True,
levels=[0, 1], cmap=cmap, vmin=0, vmax=1,
linewidths=linewidth)
def map(self, func_name, data, thresh=None, **kwargs):
"""Map a dataset across the mosaic of axes.
Parameters
----------
func_name : str
Name of a pyplot function.
data : filename, nibabel image, or array
Dataset to plot.
thresh : float
Don't map voxels in ``data`` below this threshold.
kwargs : key, value mappings
Other keyword arguments are passed to the plotting function.
"""
if isinstance(data, string_types):
data_img = nib.load(data)
elif isinstance(data, np.ndarray):
data_img = nib.Nifti1Image(data, np.eye(4))
else:
data_img = data
data_img = nib.as_closest_canonical(data_img)
data = data_img.get_data()
data = data.astype(np.float)
if thresh is not None:
data[data < thresh] = np.nan
data = data[self.x_slice, self.y_slice, self.z_slice]
self._map(func_name, data, **kwargs)
def _pad_for_cbar(self):
"""Add extra space to the bottom of the figure for the colorbars."""
w, h = self.fig.get_size_inches()
cbar_inches = .3
self.fig.set_size_inches(w, h + cbar_inches)
cbar_height = cbar_inches / (h + cbar_inches)
self.fig.subplots_adjust(0, cbar_height, 1, 1)
# Needed so things look nice in the notebook
bg_ax = self.fig.add_axes([0, 0, 1, cbar_height])
bg_ax.set_axis_off()
bg_ax.pcolormesh(np.array([[1]]), cmap="Greys", vmin=0, vmax=1)
return cbar_height
def _add_single_colorbar(self, vmin, vmax, cmap, fmt):
"""Add colorbars for a single overlay."""
cbar_height = self._pad_for_cbar()
cbar_ax = self.fig.add_axes([.3, .01, .4, cbar_height - .01])
cbar_ax.set_axis_off()
bar_data = np.linspace(0, 1, 256).reshape(1, 256)
cbar_ax.pcolormesh(bar_data, cmap=cmap)
if fmt is not None:
fmt = "{:" + fmt + "}"
kws = dict(y=.005 + cbar_height * .5,
color="white", size=14, va="center")
self.fig.text(.29, s=fmt.format(vmin), ha="right", **kws)
self.fig.text(.71, s=fmt.format(vmax), ha="left", **kws)
def _add_double_colorbar(self, vmin, vmax, pos_cmap, neg_cmap, fmt):
"""Add colorbars for a positive and a negative overlay."""
cbar_height = self._pad_for_cbar()
bar_data = np.linspace(0, 1, 256).reshape(1, 256)
pos_ax = self.fig.add_axes([.55, .01, .3, cbar_height - .01])
pos_ax.set_axis_off()
pos_ax.pcolormesh(bar_data, cmap=pos_cmap)
neg_ax = self.fig.add_axes([.15, .01, .3, cbar_height - .01])
neg_ax.set_axis_off()
neg_ax.pcolormesh(bar_data, cmap=neg_cmap)
if fmt is not None:
fmt = "{:" + fmt + "}"
kws = dict(y=.005 + cbar_height * .5,
color="white", size=14, va="center")
self.fig.text(.54, s=fmt.format(vmin), ha="right", **kws)
self.fig.text(.86, s=fmt.format(vmax), ha="left", **kws)
self.fig.text(.14, s=fmt.format(-vmax), ha="right", **kws)
self.fig.text(.46, s=fmt.format(-vmin), ha="left", **kws)
def _get_cmap(self, cmap):
"""Parse a string spec of a cubehelix palette."""
if isinstance(cmap, string_types):
if cmap.startswith("cube"):
if cmap.endswith("_r"):
reverse = False
cmap = cmap[:-2]
else:
reverse = True
_, start, rot = cmap.split(":")
cube_rgb = mpl._cm.cubehelix(s=float(start), r=float(rot))
cube_cmap = mpl.colors.LinearSegmentedColormap(cmap, cube_rgb)
lut = cube_cmap(np.linspace(.95, 0, 256))
if reverse:
lut = lut[::-1]
cmap = mpl.colors.ListedColormap(lut)
return cmap
def savefig(self, fname, close=False, **kwargs):
"""Save the figure; optionally close it."""
self.fig.savefig(fname, facecolor="0", edgecolor="0", **kwargs)
if close:
self.close()
def close(self):
"""Close the figure."""
plt.close(self.fig)
class CarpetPlot(object):
components = [
"cortex", "subgm", "brainstem", "cerebellum",
"cortwm", "deepwm", "cerebwm", "csf"
]
def __init__(self, data, seg, mc_params=None, smooth_fwhm=5,
vlim=None, title=None):
"""Heatmap rendering of an fMRI timeseries for quality control.
The Freesurfer segmentation is used to organize data by different
components of the brain.
Instantiating the class will load, preprocess, and plot the data.
Parameters
----------
data : filename or nibabel image
4D time series data to plot.
wmparc : filename or nibabel image
Freesurfer wmparc image in functional space.
mc_params : filename or DataFrame, optional
Text file or array of realignment parameters. If present, the time
series of framewise displacements will be shown at the top of the
figure.
smooth_fwhm : float or None, optional
Size of the smoothing kernel, in mm, to apply. Smoothing is
restricted within the mask for each component (cortex, cerebellum,
etc.). Smoothing reduces white noise and makes global image
artifacts much more apparent. Set to None to skip smoothing.
vlim : None or int, optional
Colormap limits (will be symmetric) in percent signal change units.
title : string
Title to show at the top of the plot.
Attributes
----------
fig : matplotlib Figure
axes : dict of matplotlib Axes
segdata : dict of arrays with data in the main plot
fd : 1d array of framewise displacements
"""
# TODO Accept mean_img and use that to convert to pct change if present
# TODO accept a lut? (Also make the anat segmentation generate one)
# Load the timeseries data
if isinstance(data, str):
img = nib.load(data)
else:
img = data
data = img.get_data().astype(np.float)
# Load the Freesurfer parcellation
if isinstance(seg, str):
seg = nib.load(seg).get_data()
else:
seg = seg.get_data()
# Use header geometry to convert smoothing sigma from mm to voxels
sx, sy, sz, _ = img.header.get_zooms()
voxel_sizes = sx, sy, sz
if smooth_fwhm is not None:
if smooth_fwhm > 0:
smooth_sigma = np.divide(smooth_fwhm / 2.355, voxel_sizes)
else:
smooth_sigma = None
# Preprocess and segment the data
masks, brain = self.define_masks(seg)
data[brain] = self.percent_change(data[brain])
data[brain] = signal.detrend(data[brain])
data = self.smooth_data(data, masks, smooth_sigma)
segdata = self.segment_data(data, masks)
fd = self.framewise_displacement(mc_params)
# Get a default limit for the colormap
if vlim is None:
sd = np.percentile(segdata["cortex"].std(axis=1), 95)
vlim = int(np.round(sd))
# Make the plot
fig, axes = self.setup_figure()
self.fig, self.axes = fig, axes
self.plot_fd(axes["motion"], fd)
self.plot_data(axes, segdata, vlim)
if title is not None:
fig.suptitle(title)
# Store useful attributes
self.segdata = segdata
self.fd = fd
def savefig(self, fname, close=True, **kwargs):
self.fig.savefig(fname, **kwargs)
if close:
self.close()
def close(self):
plt.close(self.fig)
def percent_change(self, data):
"""Convert to percent signal change over the mean for each voxel."""
null = data.mean(axis=-1) == 0
with np.errstate(all="ignore"):
data /= data.mean(axis=-1, keepdims=True)
data -= 1
data *= 100
data[null] = 0
return data
def define_masks(self, seg):
"""Create masks for anatomical components using Freesurfer labeling."""
masks = {c: seg == i for i, c in enumerate(self.components, 1)}
brain = seg > 0
return masks, brain
def smooth_data(self, data, masks, sigma):
"""Smooth the 4D image separately within each component."""
if sigma is None:
return data
for comp, mask in masks.items():
data[mask] = self._smooth_within_mask(data, mask, sigma)
return data
def _smooth_within_mask(self, data, mask, sigmas):
"""Smooth each with a Gaussian kernel, restricted to a mask."""
# TODO move this to a central lyman function?
comp_data = data * np.expand_dims(mask, -1)
for f in range(comp_data.shape[-1]):
comp_data[..., f] = ndimage.gaussian_filter(comp_data[..., f],
sigmas)
smooth_mask = ndimage.gaussian_filter(mask.astype(float), sigmas)
with np.errstate(all="ignore"):
comp_data = comp_data / np.expand_dims(smooth_mask, -1)
return comp_data[mask]
def segment_data(self, data, masks):
"""Convert the 4D data image into a set of 2D matrices."""
segdata = {comp: data[mask] for comp, mask in masks.items()}
return segdata
def framewise_displacement(self, realign_params):
"""Compute the time series of framewise displacements."""
if isinstance(realign_params, str):
rp = pd.read_csv(realign_params)
elif isinstance(realign_params, pd.DataFrame):
rp = realign_params
else:
return None
r = rp.filter(regex="rot").values
t = rp.filter(regex="trans").values
s = r * 50
ad = np.hstack([s, t])
rd = np.abs(np.diff(ad, axis=0))
fd = np.sum(rd, axis=1)
return fd
def setup_figure(self):
"""Initialize and organize the matplotlib objects."""
width, height = 8, 10
f = plt.figure(figsize=(width, height), facecolor="0")
gs = plt.GridSpec(nrows=2, ncols=2,
left=.07, right=.98,
bottom=.05, top=.96,
wspace=0, hspace=.02,
height_ratios=[.1, .9],
width_ratios=[.01, .99])
ax_i = f.add_subplot(gs[1, 1])
ax_m = f.add_subplot(gs[0, 1], sharex=ax_i)
ax_c = f.add_subplot(gs[1, 0], sharey=ax_i)
ax_b = f.add_axes([.035, .35, .0125, .2])
ax_i.set(xlabel="Volume", yticks=[])
ax_m.set(ylabel="FD (mm)")
ax_c.set(xticks=[])
axes = dict(image=ax_i, motion=ax_m, comp=ax_c, cbar=ax_b)
return f, axes
def plot_fd(self, ax, fd):
"""Show a line plot of the framewise displacement data."""
if fd is None:
fd = []
ax.set(ylim=(0, .5))
ax.plot(np.arange(1, len(fd) + 1), fd, lw=1.5, color=".15")
ax.set(ylabel="FD (mm)", ylim=(0, None))
for label in ax.get_xticklabels():
label.set_visible(False)
def plot_data(self, axes, segdata, vlim):
"""Draw the elements corresponding to the image data."""
# Concatenate and plot the time series data
plot_data = np.vstack([segdata[comp] for comp in self.components])
axes["image"].imshow(plot_data, cmap="gray", vmin=-vlim, vmax=vlim,
aspect="auto", rasterized=True)
# Separate the anatomical components
sizes = [len(segdata[comp]) for comp in self.components]
for y in np.cumsum(sizes)[:-1]:
axes["image"].axhline(y, c="w", lw=1)
# Add colors to identify the anatomical components
comp_ids = np.vstack([
np.full((len(segdata[comp]), 1), i, dtype=np.int)
for i, comp in enumerate(self.components, 1)
])
comp_colors = ['#3b5f8a', '#5b81b1', '#7ea3d1', '#a8c5e9',
'#ce8186', '#b8676d', '#9b4e53', '#fbdd7a']
comp_cmap = mpl.colors.ListedColormap(comp_colors)
axes["comp"].imshow(comp_ids,
vmin=1, vmax=len(self.components),
aspect="auto", rasterized=True,
cmap=comp_cmap)
# Add the colorbar
xx = np.expand_dims(np.linspace(1, 0, 100), -1)
ax = axes["cbar"]
ax.imshow(xx, aspect="auto", cmap="gray")
ax.set(xticks=[], yticks=[], ylabel="Percent signal change")
ax.text(0, -2, "$+${}".format(vlim),
ha="center", va="bottom", clip_on=False)
ax.text(0, 103, "$-${}".format(vlim),
ha="center", va="top", clip_on=False)
| bsd-3-clause |
kmather73/zipline | zipline/examples/pairtrade.py | 16 | 5197 | #!/usr/bin/env python
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logbook
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
from datetime import datetime
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.transforms import batch_transform
from zipline.utils.factory import load_from_yahoo
@batch_transform
def ols_transform(data, sid1, sid2):
"""Computes regression coefficient (slope and intercept)
via Ordinary Least Squares between two SIDs.
"""
p0 = data.price[sid1]
p1 = sm.add_constant(data.price[sid2], prepend=True)
slope, intercept = sm.OLS(p0, p1).fit().params
return slope, intercept
class Pairtrade(TradingAlgorithm):
"""Pairtrading relies on cointegration of two stocks.
The expectation is that once the two stocks drifted apart
(i.e. there is spread), they will eventually revert again. Thus,
if we short the upward drifting stock and long the downward
drifting stock (in short, we buy the spread) once the spread
widened we can sell the spread with profit once they converged
again. A nice property of this algorithm is that we enter the
market in a neutral position.
This specific algorithm tries to exploit the cointegration of
Pepsi and Coca Cola by estimating the correlation between the
two. Divergence of the spread is evaluated by z-scoring.
"""
def initialize(self, window_length=100):
self.spreads = []
self.invested = 0
self.window_length = window_length
self.ols_transform = ols_transform(refresh_period=self.window_length,
window_length=self.window_length)
self.PEP = self.symbol('PEP')
self.KO = self.symbol('KO')
def handle_data(self, data):
######################################################
# 1. Compute regression coefficients between PEP and KO
params = self.ols_transform.handle_data(data, self.PEP, self.KO)
if params is None:
return
intercept, slope = params
######################################################
# 2. Compute spread and zscore
zscore = self.compute_zscore(data, slope, intercept)
self.record(zscores=zscore)
######################################################
# 3. Place orders
self.place_orders(data, zscore)
def compute_zscore(self, data, slope, intercept):
"""1. Compute the spread given slope and intercept.
2. zscore the spread.
"""
spread = (data[self.PEP].price -
(slope * data[self.KO].price + intercept))
self.spreads.append(spread)
spread_wind = self.spreads[-self.window_length:]
zscore = (spread - np.mean(spread_wind)) / np.std(spread_wind)
return zscore
def place_orders(self, data, zscore):
"""Buy spread if zscore is > 2, sell if zscore < .5.
"""
if zscore >= 2.0 and not self.invested:
self.order(self.PEP, int(100 / data[self.PEP].price))
self.order(self.KO, -int(100 / data[self.KO].price))
self.invested = True
elif zscore <= -2.0 and not self.invested:
self.order(self.PEP, -int(100 / data[self.PEP].price))
self.order(self.KO, int(100 / data[self.KO].price))
self.invested = True
elif abs(zscore) < .5 and self.invested:
self.sell_spread()
self.invested = False
def sell_spread(self):
"""
decrease exposure, regardless of position long/short.
buy for a short position, sell for a long.
"""
ko_amount = self.portfolio.positions[self.KO].amount
self.order(self.KO, -1 * ko_amount)
pep_amount = self.portfolio.positions[self.PEP].amount
self.order(self.PEP, -1 * pep_amount)
if __name__ == '__main__':
logbook.StderrHandler().push_application()
start = datetime(2000, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_from_yahoo(stocks=['PEP', 'KO'], indexes={},
start=start, end=end)
pairtrade = Pairtrade()
results = pairtrade.run(data)
data['spreads'] = np.nan
ax1 = plt.subplot(211)
# TODO Bugged - indices are out of bounds
# data[[pairtrade.PEPsid, pairtrade.KOsid]].plot(ax=ax1)
plt.ylabel('price')
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = plt.subplot(212, sharex=ax1)
results.zscores.plot(ax=ax2, color='r')
plt.ylabel('zscored spread')
plt.gcf().set_size_inches(18, 8)
| apache-2.0 |
jmd-dk/concept | concept/tests/multigrid/analyze.py | 1 | 7114 | # This file has to be run in pure Python mode!
# Imports from the CO𝘕CEPT code
from commons import *
plt = get_matplotlib().pyplot
# Absolute path and name of the directory of this file
this_dir = os.path.dirname(os.path.realpath(__file__))
this_test = os.path.basename(this_dir)
masterprint(f'Analysing {this_test} data ...')
############################################
# Upstream/global (power spectrum) subtest #
############################################
subtest = 'upstream_global'
subtest_dir = f'{this_dir}/{subtest}'
masterprint(f'Analysing {subtest} data ...')
# Read in power spectra
powerspecs = {}
for f in glob(f'{subtest_dir}/powerspec*'):
if f.endswith('.png'):
continue
gridsize = int(os.path.basename(f).split('_')[1])
powerspecs[gridsize] = data = {}
data['k'], data['modes'], data['P'] = np.loadtxt(f, unpack=True)
powerspecs = {gridsize: powerspecs[gridsize] for gridsize in sorted(powerspecs.keys())}
# Two power spectra should be identical up until the Nyquist frequency
# of the spectrum with lower grid size.
# Check that this is so for the power spectra with the
# small and the medium grid sizes, and for the power spectra
# with the medium and the large grid sizes.
def check(data_small, data_large, gridsize_small):
nyquist_small = gridsize_small//2
k_nyquist_small = 2*π/boxsize*nyquist_small
n = sum(data_small['k'] < k_nyquist_small) - 1
for quantity in ('k', 'modes', 'P'):
if any(data_large[quantity][:n] != data_small[quantity][:n]):
abort(
f'Found different {quantity} below Nyquist frequency '
f'of the power spectra in "{subtest_dir}". '
f'See the plots in "{subtest_dir}" for a visualization.'
)
gen = iter(powerspecs.items())
gridsize_small, data_small = next(gen)
gridsize_middl, data_middl = next(gen)
gridsize_large, data_large = next(gen)
check(data_small, data_middl, gridsize_small)
check(data_middl, data_large, gridsize_middl)
# Done analysing this subtest
masterprint('done')
###################################
# Phase shift (2D render) subtest #
###################################
subtest = 'phaseshift'
subtest_dir = f'{this_dir}/{subtest}'
masterprint(f'Analyzing {subtest} data ...')
# Read in 2D renders
render2Ds = {}
for f in glob(f'{subtest_dir}/render2D*'):
if f.endswith('.png'):
continue
gridsize = int(os.path.basename(f).split('_')[1].removesuffix('.hdf5'))
with open_hdf5(f, mode='r') as hdf5_file:
render2Ds[gridsize] = hdf5_file['data'][...]
# Convert from image to data coordinates
render2Ds[gridsize] = render2Ds[gridsize][::-1, :].transpose()
# Convert from mass to density
render2Ds[gridsize] /= (boxsize/gridsize)**2*boxsize
render2Ds = {gridsize: render2Ds[gridsize] for gridsize in sorted(render2Ds.keys())}
# Check that the 2D renders are homogeneous along the y direction
for gridsize, render2D in render2Ds.items():
for i in range(render2D.shape[0]):
if len(set(render2D[i, :])) != 1:
abort(
f'2D render with grid size {gridsize} is '
f'inhomogeneous along the y direction. '
f'See the 2D renders in "{subtest_dir}" for a visualization.'
)
# Plot the 1D cross sections of the 2D renders
# along the x direction, revealing the sines.
plot_file = f'{subtest_dir}/plot.png'
sines = {}
for (gridsize, render2D), linestyle in zip(render2Ds.items(), ('-', '--', ':')):
x = (0.5 + arange(gridsize))*boxsize/gridsize
y = render2D[:, 0]
sines[gridsize] = (x, y)
plt.plot(x, y, linestyle, label=f'gridsize {gridsize}')
plt.xlabel(rf'$x\, [\mathrm{{{unit_length}}}]$')
plt.ylabel(
r'$\rho$ $\mathrm{{[{}\, m_{{\odot}}\, {}^{{-3}}]}}$'
.format(
significant_figures(
1/units.m_sun,
3,
fmt='tex',
incl_zeros=False,
scientific=False,
),
unit_length,
)
)
plt.legend()
plt.savefig(plot_file)
# Check whether the sines are in phase
rel_tol = 1e-9
extrema = {}
for gridsize, (x, y) in sines.items():
# Find index of first trough
safety = 1e-6
miny = min(y)
height = max(y) - miny
for index in range(gridsize):
if y[index] <= miny*(1 + safety):
break
# Store height of troughs and peaks
troughs = y[index::gridsize//2]
peaks = y[index+gridsize//4::gridsize//2]
extrema[gridsize] = (troughs, peaks)
for troughs1, peaks1 in extrema.values():
break
for gridsize, (troughs, peaks) in extrema.items():
if (
not np.allclose(troughs, troughs1, rel_tol, 0)
or not np.allclose(peaks , peaks1 , rel_tol, 0)
):
abort(
f'Erroneous phase shift obtained through grid scaling. '
f'See the plot "{plot_file}" along with the 2D renders '
f'in "{subtest_dir}" for a visualization.'
)
# Done analysing this subtest
masterprint('done')
###################################################
# Upstream/global/downstream (simulation) subtest #
###################################################
subtest = 'upstream_global_downstream'
subtest_dir = f'{this_dir}/{subtest}'
masterprint(f'Analyzing {subtest} data ...')
# Read in power spectra
powerspecs = {}
for f in glob(f'{subtest_dir}/powerspec*'):
if f.endswith('.png'):
continue
gridsize = int(os.path.basename(f).split('_')[1])
powerspecs[gridsize] = np.loadtxt(f)
powerspecs = {gridsize: powerspecs[gridsize] for gridsize in sorted(powerspecs.keys())}
# Check that the power spectra are identical
data0, data1 = powerspecs.values()
data0[np.isnan(data0)] = -1
data1[np.isnan(data1)] = -1
if np.any(data0 != data1):
abort(
f'The two power spectra within "{subtest_dir}" '
f'should be identical but are not. '
f'See the plots in "{subtest_dir}" for a visualization.'
)
# Done analyzing this subtest
masterprint('done')
############################################
# Number of processes (simulation) subtest #
############################################
subtest = 'nprocs'
subtest_dir = f'{this_dir}/{subtest}'
masterprint(f'Analyzing {subtest} data ...')
# Read in power spectra
powerspecs = {}
for f in glob(f'{subtest_dir}/*/powerspec*'):
if f.endswith('.png'):
continue
n = int(os.path.basename(os.path.dirname(f)))
powerspecs[n] = np.loadtxt(f)
powerspecs = {n: powerspecs[n] for n in sorted(powerspecs.keys())}
# Check that the power spectra are identical
for powerspec1 in powerspecs.values():
break
powerspec1[np.isnan(powerspec1)] = -1
for powerspec in powerspecs.values():
powerspec[np.isnan(powerspec)] = -1
if np.any(powerspec != powerspec1):
abort(
f'The power spectra of the different subdirectories '
f'within "{subtest_dir}" should all be identical but are not. '
f'See the plots in the subdirectories of "{subtest_dir}" '
f'for a visualization.'
)
# Done analyzing this subtest
masterprint('done')
# Done analyzing
masterprint('done')
| gpl-3.0 |
pb-pravin/data-science-from-scratch | code/linear_algebra.py | 49 | 3637 | # -*- coding: iso-8859-15 -*-
from __future__ import division # want 3 / 2 == 1.5
import re, math, random # regexes, math functions, random numbers
import matplotlib.pyplot as plt # pyplot
from collections import defaultdict, Counter
from functools import partial
#
# functions for working with vectors
#
def vector_add(v, w):
"""adds two vectors componentwise"""
return [v_i + w_i for v_i, w_i in zip(v,w)]
def vector_subtract(v, w):
"""subtracts two vectors componentwise"""
return [v_i - w_i for v_i, w_i in zip(v,w)]
def vector_sum(vectors):
return reduce(vector_add, vectors)
def scalar_multiply(c, v):
return [c * v_i for v_i in v]
def vector_mean(vectors):
"""compute the vector whose i-th element is the mean of the
i-th elements of the input vectors"""
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors))
def dot(v, w):
"""v_1 * w_1 + ... + v_n * w_n"""
return sum(v_i * w_i for v_i, w_i in zip(v, w))
def sum_of_squares(v):
"""v_1 * v_1 + ... + v_n * v_n"""
return dot(v, v)
def magnitude(v):
return math.sqrt(sum_of_squares(v))
def squared_distance(v, w):
return sum_of_squares(vector_subtract(v, w))
def distance(v, w):
return math.sqrt(squared_distance(v, w))
#
# functions for working with matrices
#
def shape(A):
num_rows = len(A)
num_cols = len(A[0]) if A else 0
return num_rows, num_cols
def get_row(A, i):
return A[i]
def get_column(A, j):
return [A_i[j] for A_i in A]
def make_matrix(num_rows, num_cols, entry_fn):
"""returns a num_rows x num_cols matrix
whose (i,j)-th entry is entry_fn(i, j)"""
return [[entry_fn(i, j) for j in range(num_cols)]
for i in range(num_rows)]
def is_diagonal(i, j):
"""1's on the 'diagonal', 0's everywhere else"""
return 1 if i == j else 0
identity_matrix = make_matrix(5, 5, is_diagonal)
# user 0 1 2 3 4 5 6 7 8 9
#
friendships = [[0, 1, 1, 0, 0, 0, 0, 0, 0, 0], # user 0
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0], # user 1
[1, 1, 0, 1, 0, 0, 0, 0, 0, 0], # user 2
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0], # user 3
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0], # user 4
[0, 0, 0, 0, 1, 0, 1, 1, 0, 0], # user 5
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 6
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 7
[0, 0, 0, 0, 0, 0, 1, 1, 0, 1], # user 8
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]] # user 9
#####
# DELETE DOWN
#
def matrix_add(A, B):
if shape(A) != shape(B):
raise ArithmeticError("cannot add matrices with different shapes")
num_rows, num_cols = shape(A)
def entry_fn(i, j): return A[i][j] + B[i][j]
return make_matrix(num_rows, num_cols, entry_fn)
def make_graph_dot_product_as_vector_projection(plt):
v = [2, 1]
w = [math.sqrt(.25), math.sqrt(.75)]
c = dot(v, w)
vonw = scalar_multiply(c, w)
o = [0,0]
plt.arrow(0, 0, v[0], v[1],
width=0.002, head_width=.1, length_includes_head=True)
plt.annotate("v", v, xytext=[v[0] + 0.1, v[1]])
plt.arrow(0 ,0, w[0], w[1],
width=0.002, head_width=.1, length_includes_head=True)
plt.annotate("w", w, xytext=[w[0] - 0.1, w[1]])
plt.arrow(0, 0, vonw[0], vonw[1], length_includes_head=True)
plt.annotate(u"(v•w)w", vonw, xytext=[vonw[0] - 0.1, vonw[1] + 0.1])
plt.arrow(v[0], v[1], vonw[0] - v[0], vonw[1] - v[1],
linestyle='dotted', length_includes_head=True)
plt.scatter(*zip(v,w,o),marker='.')
plt.axis('equal')
plt.show()
| unlicense |
zhuolinho/linphone | submodules/mswebrtc/webrtc/tools/cpu/cpu_mon.py | 6 | 2057 | #!/usr/bin/env python
#
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import psutil
import sys
import numpy
from matplotlib import pyplot
class CpuSnapshot(object):
def __init__(self, label):
self.label = label
self.samples = []
def Capture(self, sample_count):
print ('Capturing %d CPU samples for %s...' %
((sample_count - len(self.samples)), self.label))
while len(self.samples) < sample_count:
self.samples.append(psutil.cpu_percent(1.0, False))
def Text(self):
return ('%s: avg=%s, median=%s, min=%s, max=%s' %
(self.label, numpy.average(self.samples),
numpy.median(self.samples),
numpy.min(self.samples), numpy.max(self.samples)))
def Max(self):
return numpy.max(self.samples)
def GrabCpuSamples(sample_count):
print 'Label for snapshot (enter to quit): '
label = raw_input().strip()
if len(label) == 0:
return None
snapshot = CpuSnapshot(label)
snapshot.Capture(sample_count)
return snapshot
def main():
print 'How many seconds to capture per snapshot (enter for 60)?'
sample_count = raw_input().strip()
if len(sample_count) > 0 and int(sample_count) > 0:
sample_count = int(sample_count)
else:
print 'Defaulting to 60 samples.'
sample_count = 60
snapshots = []
while True:
snapshot = GrabCpuSamples(sample_count)
if snapshot == None:
break
snapshots.append(snapshot)
if len(snapshots) == 0:
print 'no samples captured'
return -1
pyplot.title('CPU usage')
for s in snapshots:
pyplot.plot(s.samples, label=s.Text(), linewidth=2)
pyplot.legend()
pyplot.show()
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 |
theodoregoetz/colormap | cmap-generator.py | 1 | 19669 | import matplotlib as mpl
mpl.use('wxAgg')
import numpy as np
import warnings
import wx
from copy import copy
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as Canvas
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.figure import Figure, SubplotParams
from numpy import random as rand
from scipy import interpolate
from skimage import color
rand.seed(1)
class OrderedBoundedPoints(object):
def __init__(self, x, y, extent):
self.xmin, self.xmax = extent[:2]
self.ymin, self.ymax = extent[2:]
self.x = self._clip_x(np.asarray(x))
self.y = self._clip_y(np.asarray(y))
self._reorder()
self._cache = None
def save_cache(self):
self._cache = (self.x.copy(), self.y.copy())
def load_cache(self):
if self._cache is None:
return False
else:
self.x, self.y = self._cache
self._cache = None
return True
def pick(self, x, y):
i = ((self.x - x)**2 + (self.y - y)**2).argmin()
return i, self.x[i], self.y[i]
def move(self, i, x=None, y=None):
if x is not None and i not in (0, len(self.x) - 1):
self.x[i] = self._clip_x(x)
if y is not None:
self.y[i] = self._clip_y(y)
self._redistribute()
self._reorder()
return self._moved_index(i)
def add(self, x, y=None):
self.x = np.concatenate([[self._clip_x(x)], self.x])
if y is None:
y = 0.5 * (self.ymin + self.ymax)
self.y = np.concatenate([[self._clip_y(y)], self.y])
self._redistribute()
self._reorder()
return self._moved_index(0)
def delete(self, i):
if i in (0, len(self.x) - 1):
return False
else:
self.x = np.delete(self.x, i)
self.y = np.delete(self.y, i)
return True
def _clip_x(self, x):
return np.clip(x, self.xmin, self.xmax)
def _clip_y(self, y):
return np.clip(y, self.ymin, self.ymax)
def _reorder(self):
self._order = None
if len(self.x) > 2:
if any(self.x[:-1] > self.x[1:]):
self._order = np.argsort(self.x)
self.x = self.x[self._order]
self.y = self.y[self._order]
def _redistribute(self):
if len(self.x) > 2:
tol = 0.001
dmin = 0.01 * (self.xmax - self.xmin)
close = np.isclose(self.x[:-1], self.x[1:])
if close[-1]:
dx = tol * abs(self.x[-1] - self.x[-3])
self.x[-2] = self.x[-1] - max(dx, dmin)
close = np.argwhere(close[:-1]).flatten()
if len(close):
dx = tol * np.abs(self.x[close + 2] - self.x[close])
self.x[close + 1] = self.x[close] + np.maximum(dx, dmin)
def _moved_index(self, i):
if self._order is None:
return i
else:
inew = np.argwhere(self._order == i).flat
return i if i in inew else inew[0]
class BoundedSpline(OrderedBoundedPoints):
def __init__(self, spline_npoints, *args, **kwargs):
super().__init__(*args, **kwargs)
self.spline_x = np.linspace(self.xmin, self.xmax, spline_npoints)
self.update_spline()
def update_spline(self):
self.spline = interpolate.PchipInterpolator(self.x, self.y)
#self.spline = interpolate.CubicSpline(self.x, self.y, bc_type='natural')
@property
def spline_y(self):
return np.clip(self.spline(self.spline_x), self.ymin, self.ymax)
def load_cache(self):
ret = super().load_cache()
if ret:
self.update_spline()
return ret
def move(self, *args, **kwargs):
ret = super().move(*args, **kwargs)
self.update_spline()
return ret
def add(self, *args, **kwargs):
ret = super().add(*args, **kwargs)
self.update_spline()
return ret
def delete(self, *args, **kwargs):
ret = super().delete(*args, **kwargs)
if ret:
self.update_spline()
return ret
def __call__(self, x):
return self.spline(x)
class ColorMap(object):
def __init__(self, x, colors,
delta_e=color.deltaE_ciede94,
smoothing=0,
npoints=256,
imdim=(40, 64)):
self.x = x
self.colors = np.asarray(colors).reshape(3, -1)
self.delta_e = delta_e
self.smoothing = smoothing
self.npoints = npoints
self.imdim = imdim
self.splines = [
BoundedSpline(npoints, self.x, self.colors[0], (0, 1, 0, 100)),
BoundedSpline(npoints, self.x, self.colors[1], (0, 1, -128, 128)),
BoundedSpline(npoints, self.x, self.colors[2], (0, 1, -128, 128))]
ncorrpoints = 100
self._xx = np.linspace(0, 1, ncorrpoints)
self._colors_uncorr = np.empty((ncorrpoints, 3))
for i in (0, 1, 2):
self._update_uncorrected_colors(i)
self._update_x_spline()
self._cmap_x = np.linspace(0, 1, self.npoints)
self._cmap_colors = np.empty((npoints, 1, 3))
self._ll = np.linspace( 0, 100, self.imdim[1]).reshape((-1,1))
self._aa = np.linspace(-128, 128, self.imdim[1]).reshape((-1,1))
self._bb = np.linspace(-128, 128, self.imdim[1]).reshape((-1,1))
self._imx = np.linspace(0, 1, self.imdim[0])
self._labdata = np.empty((3, self.imdim[1], self.imdim[0], 3))
self._rgbdata = np.empty((3, self.imdim[1], self.imdim[0], 3), dtype=np.uint8)
self._labdata[0, :, :, 0] = self._ll
self._labdata[1, :, :, 1] = self._aa
self._labdata[2, :, :, 2] = self._bb
def _update_uncorrected_colors(self, i):
self._colors_uncorr[..., i] = self.splines[i](self._xx)
def _update_x_spline(self):
if self.delta_e is None:
self._x_spline = lambda x: x
else:
de = self.delta_e(self._colors_uncorr[:-1], self._colors_uncorr[1:])
de[de < 0.001] = 0.001
de_csum = de.cumsum() - de[0]
de_csum /= de_csum[-1]
self._x_spline = interpolate.UnivariateSpline(de_csum, self._xx[1:],
s=self.smoothing)
def update(self):
for i in (0,1,2):
self._update_uncorrected_colors(i)
self._update_x_spline()
def rgb(self):
for i, s in enumerate(self.splines):
self._cmap_colors[..., 0, i] = s(self._x_spline(self._cmap_x))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return color.lab2rgb(self._cmap_colors).squeeze()
def imdata(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x = self._imx
l = self.splines[0](x)
a = self.splines[1](x)
b = self.splines[2](x)
self._labdata[0, :, :, 1] = a
self._labdata[0, :, :, 2] = b
self._rgbdata[0, :, :, :] = (color.lab2rgb(self._labdata[0]) * 0xff).astype(np.uint8)
self._labdata[1, :, :, 0] = l
self._labdata[1, :, :, 2] = b
self._rgbdata[1, :, :, :] = (color.lab2rgb(self._labdata[1]) * 0xff).astype(np.uint8)
self._labdata[2, :, :, 0] = l
self._labdata[2, :, :, 1] = a
self._rgbdata[2, :, :, :] = (color.lab2rgb(self._labdata[2]) * 0xff).astype(np.uint8)
return self._rgbdata
class SplinePlot(wx.Panel):
def __init__(self, parent, spline, xlabel=None, ylabel=None):
super().__init__(parent)
self.parent = parent
self.spline = spline
self.init_canvas(xlabel, ylabel)
self.layout()
self.connect()
def init_canvas(self, xlabel, ylabel):
xdpi, ydpi = wx.ScreenDC().GetPPI()
self.figure = Figure(figsize=(1, 1), dpi=xdpi, tight_layout=True,
subplotpars=SubplotParams(left=0.01, right=0.99,
bottom=0.01, top=0.99))
self.axes = self.figure.add_subplot(1,1,1)
self.axes.set_xlabel(xlabel)
self.axes.set_ylabel(ylabel)
tol = 0.02
dx = tol * (self.spline.xmax - self.spline.xmin)
dy = tol * (self.spline.ymax - self.spline.ymin)
self.axes.set_xlim(self.spline.xmin - dx, self.spline.xmax + dx)
self.axes.set_ylim(self.spline.ymin - dy, self.spline.ymax + dy)
self.axes.autoscale(False)
self.axes.xaxis.set_visible(False)
self.axes.yaxis.set_ticks([])
self.axes.yaxis.set_ticklabels([])
self.plt, = self.axes.plot(self.spline.x, self.spline.y,
linestyle='none', marker='o', color='black')
self.spline_plt, = self.axes.plot(self.spline.spline_x,
self.spline.spline_y, color='black')
self.canvas = Canvas(self, wx.ID_ANY, self.figure)
def layout(self):
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.canvas, 1, wx.ALL | wx.EXPAND, 0)
self.SetSizerAndFit(vbox)
def connect(self):
self._dragging = False
self._index = None
self.canvas.mpl_connect('button_press_event',
lambda e: self.on_button_press(e))
self.canvas.mpl_connect('motion_notify_event',
lambda e: self.on_motion_notify(e))
self.canvas.mpl_connect('button_release_event',
lambda e: self.on_button_release(e))
self.canvas.mpl_connect('figure_leave_event',
lambda e: self.on_figure_leave(e))
def on_button_press(self, event):
if event.inaxes:
if event.button == 1:
self.spline.save_cache()
self._dragging = True
self._index = self.pick(event)
if self._index is None:
self.add(event.xdata, event.ydata)
else:
self.on_motion_notify(event)
elif event.button == 3:
idx = self.pick(event)
if idx is not None:
self.delete(idx)
def add(self, x, y=None):
i = self.spline.add(x, y)
if self._dragging:
self._index = i
self.update()
def delete(self, idx):
if self.spline.delete(idx):
self.update()
def on_motion_notify(self, event):
if self._dragging:
if self._index is not None:
if event.inaxes:
x, y = event.xdata, event.ydata
else:
x, y = self.axes.transData.inverted().transform((event.x, event.y))
ret = self._index, x, y
self._index = self.spline.move(self._index, x, y)
self.update()
return ret
def on_button_release(self, event):
if self._dragging:
self._dragging = False
self._index = None
def on_figure_leave(self, event):
if self._dragging:
self._dragging = False
self._index = None
if self.spline.load_cache():
self.update()
def pick(self, event):
ipick, xpick, ypick = self.spline.pick(event.xdata, event.ydata)
xpx, ypx = self.axes.transData.transform((xpick, ypick))
distsq = (xpx - event.x)**2 + (ypx - event.y)**2
if distsq < 60**2:
return ipick
def update(self):
self.plt.set_data(self.spline.x, self.spline.y)
self.spline_plt.set_data(self.spline.spline_x, self.spline.spline_y)
class ColorMapSplinePlot(SplinePlot):
def on_button_press(self, event):
if event.inaxes:
if event.button == 1:
for p in self.parent.plots:
if self.GetId() != p.GetId():
p.spline.save_cache()
ret = super().on_button_press(event)
if event.button == 3:
ret = super().on_button_press(event)
self.parent.update()
def add(self, x, y):
super().add(x, y)
for p in self.parent.plots:
if self.GetId() != p.GetId():
SplinePlot.add(p, x)
self.parent.update()
def delete(self, idx):
super().delete(idx)
for p in self.parent.plots:
if self.GetId() != p.GetId():
SplinePlot.delete(p, idx)
self.parent.update()
def on_motion_notify(self, event):
res = super().on_motion_notify(event)
if res is not None:
idx, x, y = res
for p in self.parent.plots:
if self.GetId() != p.GetId():
p.spline.move(idx, x)
p.update()
self.parent.update()
def on_figure_leave(self, event):
if self._dragging:
for p in self.parent.plots:
if self.GetId() != p.GetId():
if p.spline.load_cache():
p.update()
super().on_figure_leave(event)
self.parent.update()
class DECorrPlot(wx.Panel):
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.init_canvas()
self.layout()
def init_canvas(self):
xdpi, ydpi = wx.ScreenDC().GetPPI()
self.figure = Figure(figsize=(1, 1), dpi=xdpi, tight_layout=True,
subplotpars=SubplotParams(left=0.01, right=0.99,
bottom=0.01, top=0.99))
self.axes = self.figure.add_subplot(1,1,1)
self.axes.set_xlabel(r'uncorrected')
self.axes.set_ylabel(r'corrected')
self.axes.set_xlim(0, 1)
self.axes.set_ylim(-0.1, 1.1)
self.axes.autoscale(False)
self.axes.xaxis.set_ticks([0,0.5,1])
self.axes.yaxis.set_ticks([0,0.5,1])
self._x = np.linspace(0, 1, 300)
self.plot, = self.axes.plot((0,1), (0,1), color='black')
self.canvas = Canvas(self, wx.ID_ANY, self.figure)
self.update(lambda x: x)
def layout(self):
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.canvas, 1, wx.ALL | wx.EXPAND, 0)
self.SetSizerAndFit(vbox)
self.Layout()
def update(self, spline):
self.plot.set_data(self._x, spline(self._x))
self.canvas.draw()
class CombPlot(wx.Panel):
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.init_canvas()
self.layout()
def init_canvas(self):
xdpi, ydpi = wx.ScreenDC().GetPPI()
self.figure = Figure(figsize=(1, 1), dpi=xdpi, tight_layout=True,
subplotpars=SubplotParams(left=0.01, right=0.99,
bottom=0.01, top=0.99))
self.axes = self.figure.add_subplot(1,1,1)
nperiods, npoints, amplitude = 80, 50, 70
x = np.linspace(0, nperiods * 2 * np.pi, npoints * nperiods)
y = np.linspace(0, amplitude, npoints * 10)
X, Y = np.meshgrid(x, y)
img = X + Y * np.sin(X) * (Y**2 / Y.max()**2)
self.plot = self.axes.imshow(img, origin='lower', aspect='auto',
vmin=x[0], vmax=x[-1])
self.axes.set_axis_off()
self.canvas = Canvas(self, wx.ID_ANY, self.figure)
def layout(self):
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.canvas, 1, wx.ALL | wx.EXPAND, 0)
self.SetSizerAndFit(vbox)
self.Layout()
def update(self, cmap):
self.plot.set_cmap(LinearSegmentedColormap.from_list('_', cmap.rgb()))
self.canvas.draw()
class ColorMapControlPlots(wx.Panel):
def __init__(self, parent):
super().__init__(parent)
# Macaw
colors = np.array(
[[ 12, 0, 0],
[ 57, -37, -2],
[ 46, -32, 48],
[ 65, 25, 70],
[ 72, 18, 76],
[ 81, 0, 0],
[ 99, 0, 0]]).transpose()
# Test
#colors = np.array(
# [[ 50, 0, 0],
# [ 50, 0, 0],
# [ 50, 0, 0],
# [ 50, 0, 0],
# [ 0, 0, 0],
# [ 50, 0, 0],
# [ 50, 0, 0],
# [ 50, 0, 0],
# [ 50, 0, 0],
# [100, 0, 0],
# [ 50, 0, 0],
# [ 50, 0, 0],
# [ 50, 0, 0],
# [ 50, 0, 0]]).transpose()
## Test
#colors = np.array(
# [[ 0, 50, 0],
# [ 50, 50, 0],
# [100, 0, 50],
# [ 50, -50, 50],
# [ 0, -50, 50]]).transpose()
x = np.linspace(0, 1, len(colors[0]))
self.cmap = ColorMap(x, colors)
self.plots = [
ColorMapSplinePlot(self, self.cmap.splines[0], ylabel='lightness'),
ColorMapSplinePlot(self, self.cmap.splines[1], ylabel='green-red'),
ColorMapSplinePlot(self, self.cmap.splines[2], ylabel='blue-yellow')]
self.implots = []
for imdata, plot in zip(self.cmap.imdata(), self.plots):
extent = list(plot.axes.get_xlim()) + list(plot.axes.get_ylim())
self.implots.append(plot.axes.imshow(imdata, extent=extent,
origin='lower', aspect='auto',
zorder=-1,
interpolation='gaussian'))
self.de_corr_plot = DECorrPlot(self)
self.de_corr_plot.update(self.cmap._x_spline)
self.comb_plot = CombPlot(self)
self.comb_plot.update(self.cmap)
self.layout()
def layout(self):
vbox0 = wx.BoxSizer(wx.VERTICAL)
hbox0 = wx.BoxSizer(wx.HORIZONTAL)
vbox1 = wx.BoxSizer(wx.VERTICAL)
vbox1.Add(self.plots[0], 1, wx.ALL | wx.EXPAND)
vbox1.Add(self.plots[1], 1, wx.ALL | wx.EXPAND)
vbox1.Add(self.plots[2], 1, wx.ALL | wx.EXPAND)
hbox0.Add(vbox1, 2, wx.ALL | wx.EXPAND)
vbox2 = wx.BoxSizer(wx.VERTICAL)
vbox2.Add(self.de_corr_plot, 1, wx.ALL | wx.EXPAND)
vbox2.Add(wx.Panel(self), 2, wx.ALL | wx.EXPAND)
hbox0.Add(vbox2, 1, wx.ALL | wx.EXPAND)
vbox0.Add(hbox0, 3, wx.ALL | wx.EXPAND)
vbox0.Add(self.comb_plot, 1, wx.ALL | wx.EXPAND)
self.SetSizerAndFit(vbox0)
self.Layout()
def update(self):
for im, data in zip(self.implots, self.cmap.imdata()):
im.set_array(data)
im.figure.canvas.draw()
self.cmap.update()
self.de_corr_plot.update(self.cmap._x_spline)
self.comb_plot.update(self.cmap)
class MainFrame(wx.Frame):
def __init__(self):
super().__init__(None, wx.ID_ANY, 'Main Window', size=(750, 600))
self.cmapctl = ColorMapControlPlots(self)
self.layout()
self.cmapctl.update()
def layout(self):
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(self.cmapctl, 1, wx.ALL | wx.EXPAND)
self.SetSizer(hbox)
self.Layout()
class Application(wx.App):
def OnInit(self):
mainFrame = MainFrame()
mainFrame.Show(True)
return True
if __name__ == '__main__':
app = Application(False)
app.MainLoop()
| gpl-3.0 |
rbalda/neural_ocr | env/lib/python2.7/site-packages/mpl_toolkits/mplot3d/art3d.py | 7 | 25559 | #!/usr/bin/python
# art3d.py, original mplot3d version by John Porter
# Parts rewritten by Reinier Heeres <[email protected]>
# Minor additions by Ben Axelrod <[email protected]>
'''
Module containing 3D artist code and functions to convert 2D
artists into 3D versions which can be added to an Axes3D.
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import zip
from matplotlib import lines, text as mtext, path as mpath, colors as mcolors
from matplotlib import artist
from matplotlib.collections import Collection, LineCollection, \
PolyCollection, PatchCollection, PathCollection
from matplotlib.cm import ScalarMappable
from matplotlib.patches import Patch
from matplotlib.colors import Normalize
from matplotlib.cbook import iterable
import warnings
import numpy as np
import math
from . import proj3d
def norm_angle(a):
"""Return angle between -180 and +180"""
a = (a + 360) % 360
if a > 180:
a = a - 360
return a
def norm_text_angle(a):
"""Return angle between -90 and +90"""
a = (a + 180) % 180
if a > 90:
a = a - 180
return a
def get_dir_vector(zdir):
if zdir == 'x':
return np.array((1, 0, 0))
elif zdir == 'y':
return np.array((0, 1, 0))
elif zdir == 'z':
return np.array((0, 0, 1))
elif zdir is None:
return np.array((0, 0, 0))
elif iterable(zdir) and len(zdir) == 3:
return zdir
else:
raise ValueError("'x', 'y', 'z', None or vector of length 3 expected")
class Text3D(mtext.Text):
'''
Text object with 3D position and (in the future) direction.
'''
def __init__(self, x=0, y=0, z=0, text='', zdir='z', **kwargs):
'''
*x*, *y*, *z* Position of text
*text* Text string to display
*zdir* Direction of text
Keyword arguments are passed onto :func:`~matplotlib.text.Text`.
'''
mtext.Text.__init__(self, x, y, text, **kwargs)
self.set_3d_properties(z, zdir)
def set_3d_properties(self, z=0, zdir='z'):
x, y = self.get_position()
self._position3d = np.array((x, y, z))
self._dir_vec = get_dir_vector(zdir)
self.stale = True
def draw(self, renderer):
proj = proj3d.proj_trans_points([self._position3d, \
self._position3d + self._dir_vec], renderer.M)
dx = proj[0][1] - proj[0][0]
dy = proj[1][1] - proj[1][0]
if dx==0. and dy==0.:
# atan2 raises ValueError: math domain error on 0,0
angle = 0.
else:
angle = math.degrees(math.atan2(dy, dx))
self.set_position((proj[0][0], proj[1][0]))
self.set_rotation(norm_text_angle(angle))
mtext.Text.draw(self, renderer)
self.stale = False
def text_2d_to_3d(obj, z=0, zdir='z'):
"""Convert a Text to a Text3D object."""
obj.__class__ = Text3D
obj.set_3d_properties(z, zdir)
class Line3D(lines.Line2D):
'''
3D line object.
'''
def __init__(self, xs, ys, zs, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.lines.Line2D`.
'''
lines.Line2D.__init__(self, [], [], *args, **kwargs)
self._verts3d = xs, ys, zs
def set_3d_properties(self, zs=0, zdir='z'):
xs = self.get_xdata()
ys = self.get_ydata()
try:
# If *zs* is a list or array, then this will fail and
# just proceed to juggle_axes().
zs = float(zs)
zs = [zs for x in xs]
except TypeError:
pass
self._verts3d = juggle_axes(xs, ys, zs, zdir)
self.stale = True
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_data(xs, ys)
lines.Line2D.draw(self, renderer)
self.stale = False
def line_2d_to_3d(line, zs=0, zdir='z'):
'''
Convert a 2D line to 3D.
'''
line.__class__ = Line3D
line.set_3d_properties(zs, zdir)
def path_to_3d_segment(path, zs=0, zdir='z'):
'''Convert a path to a 3D segment.'''
if not iterable(zs):
zs = np.ones(len(path)) * zs
seg = []
pathsegs = path.iter_segments(simplify=False, curves=False)
for (((x, y), code), z) in zip(pathsegs, zs):
seg.append((x, y, z))
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d
def paths_to_3d_segments(paths, zs=0, zdir='z'):
'''
Convert paths from a collection object to 3D segments.
'''
if not iterable(zs):
zs = np.ones(len(paths)) * zs
segments = []
for path, pathz in zip(paths, zs):
segments.append(path_to_3d_segment(path, pathz, zdir))
return segments
def path_to_3d_segment_with_codes(path, zs=0, zdir='z'):
'''Convert a path to a 3D segment with path codes.'''
if not iterable(zs):
zs = np.ones(len(path)) * zs
seg = []
codes = []
pathsegs = path.iter_segments(simplify=False, curves=False)
for (((x, y), code), z) in zip(pathsegs, zs):
seg.append((x, y, z))
codes.append(code)
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d, codes
def paths_to_3d_segments_with_codes(paths, zs=0, zdir='z'):
'''
Convert paths from a collection object to 3D segments with path codes.
'''
if not iterable(zs):
zs = np.ones(len(paths)) * zs
segments = []
codes_list = []
for path, pathz in zip(paths, zs):
segs, codes = path_to_3d_segment_with_codes(path, pathz, zdir)
segments.append(segs)
codes_list.append(codes)
return segments, codes_list
class Line3DCollection(LineCollection):
'''
A collection of 3D lines.
'''
def __init__(self, segments, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.collections.LineCollection`.
'''
LineCollection.__init__(self, segments, *args, **kwargs)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def set_segments(self, segments):
'''
Set 3D segments
'''
self._segments3d = np.asanyarray(segments)
LineCollection.set_segments(self, [])
def do_3d_projection(self, renderer):
'''
Project the points according to renderer matrix.
'''
xyslist = [
proj3d.proj_trans_points(points, renderer.M) for points in
self._segments3d]
segments_2d = [list(zip(xs, ys)) for (xs, ys, zs) in xyslist]
LineCollection.set_segments(self, segments_2d)
# FIXME
minz = 1e9
for (xs, ys, zs) in xyslist:
minz = min(minz, min(zs))
return minz
def draw(self, renderer, project=False):
if project:
self.do_3d_projection(renderer)
LineCollection.draw(self, renderer)
def line_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a LineCollection to a Line3DCollection object."""
segments3d = paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Line3DCollection
col.set_segments(segments3d)
class Patch3D(Patch):
'''
3D patch object.
'''
def __init__(self, *args, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_3d_properties(self, verts, zs=0, zdir='z'):
if not iterable(zs):
zs = np.ones(len(verts)) * zs
self._segment3d = [juggle_axes(x, y, z, zdir) \
for ((x, y), z) in zip(verts, zs)]
self._facecolor3d = Patch.get_facecolor(self)
def get_path(self):
return self._path2d
def get_facecolor(self):
return self._facecolor2d
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = list(zip(*s))
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(list(zip(vxs, vys)))
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def draw(self, renderer):
Patch.draw(self, renderer)
class PathPatch3D(Patch3D):
'''
3D PathPatch object.
'''
def __init__(self, path, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, **kwargs)
self.set_3d_properties(path, zs, zdir)
def set_3d_properties(self, path, zs=0, zdir='z'):
Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir)
self._code3d = path.codes
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = list(zip(*s))
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(list(zip(vxs, vys)), self._code3d)
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def get_patch_verts(patch):
"""Return a list of vertices for the path of a patch."""
trans = patch.get_patch_transform()
path = patch.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
else:
return []
def patch_2d_to_3d(patch, z=0, zdir='z'):
"""Convert a Patch to a Patch3D object."""
verts = get_patch_verts(patch)
patch.__class__ = Patch3D
patch.set_3d_properties(verts, z, zdir)
def pathpatch_2d_to_3d(pathpatch, z=0, zdir='z'):
"""Convert a PathPatch to a PathPatch3D object."""
path = pathpatch.get_path()
trans = pathpatch.get_patch_transform()
mpath = trans.transform_path(path)
pathpatch.__class__ = PathPatch3D
pathpatch.set_3d_properties(mpath, z, zdir)
class Patch3DCollection(PatchCollection):
'''
A collection of 3D patches.
'''
def __init__(self, *args, **kwargs):
"""
Create a collection of flat 3D patches with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of patches in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PatchCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
self._depthshade = kwargs.pop('depthshade', True)
PatchCollection.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
self.stale = True
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.colorConverter.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.colorConverter.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PatchCollection.set_offsets(self, list(zip(vxs, vys)))
if vzs.size > 0:
return min(vzs)
else:
return np.nan
class Path3DCollection(PathCollection):
'''
A collection of 3D paths.
'''
def __init__(self, *args, **kwargs):
"""
Create a collection of flat 3D paths with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of paths in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PathCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
self._depthshade = kwargs.pop('depthshade', True)
PathCollection.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
self.stale = True
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.colorConverter.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.colorConverter.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PathCollection.set_offsets(self, list(zip(vxs, vys)))
if vzs.size > 0 :
return min(vzs)
else :
return np.nan
def patch_collection_2d_to_3d(col, zs=0, zdir='z', depthshade=True):
"""
Convert a :class:`~matplotlib.collections.PatchCollection` into a
:class:`Patch3DCollection` object
(or a :class:`~matplotlib.collections.PathCollection` into a
:class:`Path3DCollection` object).
Keywords:
*za* The location or locations to place the patches in the
collection along the *zdir* axis. Defaults to 0.
*zdir* The axis in which to place the patches. Default is "z".
*depthshade* Whether to shade the patches to give a sense of depth.
Defaults to *True*.
"""
if isinstance(col, PathCollection):
col.__class__ = Path3DCollection
elif isinstance(col, PatchCollection):
col.__class__ = Patch3DCollection
col._depthshade = depthshade
col.set_3d_properties(zs, zdir)
class Poly3DCollection(PolyCollection):
'''
A collection of 3D polygons.
'''
def __init__(self, verts, *args, **kwargs):
'''
Create a Poly3DCollection.
*verts* should contain 3D coordinates.
Keyword arguments:
zsort, see set_zsort for options.
Note that this class does a bit of magic with the _facecolors
and _edgecolors properties.
'''
zsort = kwargs.pop('zsort', True)
PolyCollection.__init__(self, verts, *args, **kwargs)
self.set_zsort(zsort)
self._codes3d = None
_zsort_functions = {
'average': np.average,
'min': np.min,
'max': np.max,
}
def set_zsort(self, zsort):
'''
Set z-sorting behaviour:
boolean: if True use default 'average'
string: 'average', 'min' or 'max'
'''
if zsort is True:
zsort = 'average'
if zsort is not False:
if zsort in self._zsort_functions:
zsortfunc = self._zsort_functions[zsort]
else:
return False
else:
zsortfunc = None
self._zsort = zsort
self._sort_zpos = None
self._zsortfunc = zsortfunc
self.stale = True
def get_vector(self, segments3d):
"""Optimize points for projection"""
si = 0
ei = 0
segis = []
points = []
for p in segments3d:
points.extend(p)
ei = si+len(p)
segis.append((si, ei))
si = ei
if len(segments3d) > 0 :
xs, ys, zs = list(zip(*points))
else :
# We need this so that we can skip the bad unpacking from zip()
xs, ys, zs = [], [], []
ones = np.ones(len(xs))
self._vec = np.array([xs, ys, zs, ones])
self._segis = segis
def set_verts(self, verts, closed=True):
'''Set 3D vertices.'''
self.get_vector(verts)
# 2D verts will be updated at draw time
PolyCollection.set_verts(self, [], closed)
def set_verts_and_codes(self, verts, codes):
'''Sets 3D vertices with path codes'''
# set vertices with closed=False to prevent PolyCollection from
# setting path codes
self.set_verts(verts, closed=False)
# and set our own codes instead.
self._codes3d = codes
def set_3d_properties(self):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
self._sort_zpos = None
self.set_zsort(True)
self._facecolors3d = PolyCollection.get_facecolors(self)
self._edgecolors3d = PolyCollection.get_edgecolors(self)
self._alpha3d = PolyCollection.get_alpha(self)
self.stale = True
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def do_3d_projection(self, renderer):
'''
Perform the 3D projection for this object.
'''
# FIXME: This may no longer be needed?
if self._A is not None:
self.update_scalarmappable()
self._facecolors3d = self._facecolors
txs, tys, tzs = proj3d.proj_transform_vec(self._vec, renderer.M)
xyzlist = [(txs[si:ei], tys[si:ei], tzs[si:ei])
for si, ei in self._segis]
# This extra fuss is to re-order face / edge colors
cface = self._facecolors3d
cedge = self._edgecolors3d
if len(cface) != len(xyzlist):
cface = cface.repeat(len(xyzlist), axis=0)
if len(cedge) != len(xyzlist):
if len(cedge) == 0:
cedge = cface
cedge = cedge.repeat(len(xyzlist), axis=0)
# if required sort by depth (furthest drawn first)
if self._zsort:
indices = range(len(xyzlist))
z_segments_2d = [(self._zsortfunc(zs), list(zip(xs, ys)), fc, ec,
idx) for (xs, ys, zs), fc, ec, idx in
zip(xyzlist, cface, cedge, indices)]
z_segments_2d.sort(key=lambda x: x[0], reverse=True)
else:
raise ValueError("whoops")
segments_2d = [s for z, s, fc, ec, idx in z_segments_2d]
if self._codes3d is not None:
codes = [self._codes3d[idx] for z, s, fc, ec, idx in z_segments_2d]
PolyCollection.set_verts_and_codes(self, segments_2d, codes)
else:
PolyCollection.set_verts(self, segments_2d)
self._facecolors2d = [fc for z, s, fc, ec, idx in z_segments_2d]
if len(self._edgecolors3d) == len(cface):
self._edgecolors2d = [ec for z, s, fc, ec, idx in z_segments_2d]
else:
self._edgecolors2d = self._edgecolors3d
# Return zorder value
if self._sort_zpos is not None:
zvec = np.array([[0], [0], [self._sort_zpos], [1]])
ztrans = proj3d.proj_transform_vec(zvec, renderer.M)
return ztrans[2][0]
elif tzs.size > 0 :
# FIXME: Some results still don't look quite right.
# In particular, examine contourf3d_demo2.py
# with az = -54 and elev = -45.
return np.min(tzs)
else :
return np.nan
def set_facecolor(self, colors):
PolyCollection.set_facecolor(self, colors)
self._facecolors3d = PolyCollection.get_facecolor(self)
set_facecolors = set_facecolor
def set_edgecolor(self, colors):
PolyCollection.set_edgecolor(self, colors)
self._edgecolors3d = PolyCollection.get_edgecolor(self)
set_edgecolors = set_edgecolor
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float or *None*.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = mcolors.colorConverter.to_rgba_array(
self._facecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
self._edgecolors = mcolors.colorConverter.to_rgba_array(
self._edgecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
self.stale = True
def get_facecolors(self):
return self._facecolors2d
get_facecolor = get_facecolors
def get_edgecolors(self):
return self._edgecolors2d
get_edgecolor = get_edgecolors
def draw(self, renderer):
return Collection.draw(self, renderer)
def poly_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a PolyCollection to a Poly3DCollection object."""
segments_3d, codes = paths_to_3d_segments_with_codes(col.get_paths(),
zs, zdir)
col.__class__ = Poly3DCollection
col.set_verts_and_codes(segments_3d, codes)
col.set_3d_properties()
def juggle_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that 2D xs, ys can be plotted in the plane
orthogonal to zdir. zdir is normally x, y or z. However, if zdir
starts with a '-' it is interpreted as a compensation for rotate_axes.
"""
if zdir == 'x':
return zs, xs, ys
elif zdir == 'y':
return xs, zs, ys
elif zdir[0] == '-':
return rotate_axes(xs, ys, zs, zdir)
else:
return xs, ys, zs
def rotate_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that the axes are rotated with zdir along
the original z axis. Prepending the axis with a '-' does the
inverse transform, so zdir can be x, -x, y, -y, z or -z
"""
if zdir == 'x':
return ys, zs, xs
elif zdir == '-x':
return zs, xs, ys
elif zdir == 'y':
return zs, xs, ys
elif zdir == '-y':
return ys, zs, xs
else:
return xs, ys, zs
def iscolor(c):
try:
if len(c) == 4 or len(c) == 3:
if iterable(c[0]):
return False
if hasattr(c[0], '__float__'):
return True
except:
return False
return False
def get_colors(c, num):
"""Stretch the color argument to provide the required number num"""
if type(c) == type("string"):
c = mcolors.colorConverter.to_rgba(c)
if iscolor(c):
return [c] * num
if len(c) == num:
return c
elif iscolor(c):
return [c] * num
elif len(c) == 0: #if edgecolor or facecolor is specified as 'none'
return [[0,0,0,0]] * num
elif iscolor(c[0]):
return [c[0]] * num
else:
raise ValueError('unknown color format %s' % c)
def zalpha(colors, zs):
"""Modify the alphas of the color list according to depth"""
# FIXME: This only works well if the points for *zs* are well-spaced
# in all three dimensions. Otherwise, at certain orientations,
# the min and max zs are very close together.
# Should really normalize against the viewing depth.
colors = get_colors(colors, len(zs))
if zs.size > 0 :
norm = Normalize(min(zs), max(zs))
sats = 1 - norm(zs) * 0.7
colors = [(c[0], c[1], c[2], c[3] * s) for c, s in zip(colors, sats)]
return colors
| mit |
jwkanggist/EveryBodyTensorFlow | lab11_runTFcheckDropOut_spiraldata.py | 1 | 14842 | #-*- coding: utf-8 -*-
#! /usr/bin/env python
'''
#------------------------------------------------------------
filename: lab11_runTFcheckDropOut_spiraldata.py
This script wants to see how the Dropout technique can
mitigate Gradient Vanishing problem in
A Multi-Hidden Layers Fully Connected Neural Network.
Applying "batch normalization" to the lab10
This example data set is using two class spiral data.
written by Jaewook Kang @ Jan 2018
#------------------------------------------------------------
'''
from os import getcwd
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas import DataFrame
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_io
# reading data set from csv file ==========================
xsize = 2
ysize = 2
data = pd.read_csv('./data/twospirals_N5000.csv')
data.columns=['xdata1','xdata2','tdata']
permutation_index = np.random.permutation(data.index)
permutated_data = data.reindex(permutation_index)
permutated_data.columns=['xdata1','xdata2','tdata']
x_data = np.zeros([permutated_data.xdata1.size,xsize])
x_data[:,0] = permutated_data.xdata1.values
x_data[:,1] = permutated_data.xdata2.values
t_data = np.zeros([permutated_data.tdata.size,ysize])
t_data[:,0] = permutated_data.tdata.values
t_data[:,1] = np.invert(permutated_data.tdata.values) + 2
total_size = permutated_data.xdata1.size
training_size = int(np.floor(permutated_data.xdata1.size * 0.8))
validation_size = total_size - training_size
# data dividing
x_training_data = x_data[0:training_size,:]
t_training_data = t_data[0:training_size,:]
x_validation_data = x_data[training_size:-1,:]
t_validation_data = t_data[training_size:-1,:]
# configure training parameters =====================================
learning_rate = 5E-3
training_epochs = 5000
batch_size = 500
display_step = 1
total_batch = int(training_size / batch_size)
# dropout_rate = 0 --> no dropout
# dropout_rate = 1 --> no nodes to work
dropoutrate_in_training = 0.2
# computational TF graph construction ================================
# Network Parameters
n_hidden_1 = 10 # 1st layer number of neurons
n_hidden_2 = 7 # 2nd layer number of neurons
n_hidden_3 = 7 # 3rd layer number of neurons
n_hidden_4 = 4 # 4rd layer number of neurons
n_hidden_5 = 4 # 5rd layer number of neurons
num_input = xsize # two-dimensional input X = [1x2]
num_classes = ysize # 2 class
#-------------------------------
# tf Graph input
X = tf.placeholder(tf.float32, [None, num_input])
Y = tf.placeholder(tf.float32, [None, num_classes])
# droprate must be given by placeholder
# since the network in validation do not require dropout nodes in layers.
dropoutrate_io = tf.placeholder(tf.float32)
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),
'h4': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4])),
'h5': tf.Variable(tf.random_normal([n_hidden_4, n_hidden_5])),
'out':tf.Variable(tf.random_normal([n_hidden_5, num_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'b3': tf.Variable(tf.random_normal([n_hidden_3])),
'b4': tf.Variable(tf.random_normal([n_hidden_4])),
'b5': tf.Variable(tf.random_normal([n_hidden_5])),
'out': tf.Variable(tf.random_normal([num_classes]))
}
# Create model
def neural_net(x,dropoutrate_io):
# Input fully connected layer with 10 neurons
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# layer_1 = tf.nn.dropout(layer_1,keep_prob=dropoutrate_io)
# Hidden fully connected layer with 7 neurons
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
layer_2 = tf.layers.dropout(inputs=layer_2,rate= dropoutrate_io)
# Hidden fully connected layer with 7 neurons
layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
layer_3 = tf.nn.relu(layer_3)
layer_3 = tf.layers.dropout(inputs=layer_3,rate= dropoutrate_io)
# Hidden fully connected layer with 4 neurons
layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])
layer_4 = tf.nn.relu(layer_4)
layer_4 = tf.layers.dropout(inputs=layer_4,rate = dropoutrate_io)
# Hidden fully connected layer with 4 neurons
layer_5 = tf.add(tf.matmul(layer_4, weights['h5']), biases['b5'])
layer_5 = tf.nn.relu(layer_5)
layer_5 = tf.layers.dropout(inputs=layer_5, rate=dropoutrate_io)
# Output fully connected layer with a neuron for each class
out_layer = tf.matmul(layer_5, weights['out']) + biases['out']
return out_layer
# Construct model
logits = neural_net(x=X, \
dropoutrate_io= dropoutrate_io)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
## when you use AdamOptimizer, instead of SGD, the error rate immediately becomes near zero.
# optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
errRatebyTrainingSet = np.zeros(training_epochs)
errRatebyValidationSet = np.zeros(training_epochs)
# for visualization of vanishing gradient problem
grad_wrt_weight_layer1_tensor = tf.gradients(cost,weights['h1'],\
name='grad_wrt_weight_layer1')
grad_wrt_weight_layer2_tensor = tf.gradients(cost,weights['h2'],\
name='grad_wrt_weight_layer2')
grad_wrt_weight_layer3_tensor = tf.gradients(cost,weights['h3'],\
name='grad_wrt_weight_layer3')
grad_wrt_weight_layer4_tensor = tf.gradients(cost,weights['h4'],\
name='grad_wrt_weight_layer4')
grad_wrt_weight_layer5_tensor = tf.gradients(cost,weights['h5'],\
name='grad_wrt_weight_layer5')
grad_wrt_weight_layer1_iter = np.zeros([total_batch,1])
grad_wrt_weight_layer2_iter = np.zeros([total_batch,1])
grad_wrt_weight_layer3_iter = np.zeros([total_batch,1])
grad_wrt_weight_layer4_iter = np.zeros([total_batch,1])
grad_wrt_weight_layer5_iter = np.zeros([total_batch,1])
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training ===============================================
with tf.Session() as sess:
# Run the initializer
sess.run(init)
print("--------------------------------------------")
for epoch in range(training_epochs):
avg_cost = 0.
weight_array = list()
for i in range(total_batch):
data_start_index = i * batch_size
data_end_index = (i + 1) * batch_size
# feed traing data --------------------------
batch_xs = x_training_data[data_start_index:data_end_index, :]
batch_ts = t_training_data[data_start_index:data_end_index, :]
#----------------------------------------------
# Run optimization op (backprop) and cost op (to get loss value)
# feedign training data
_, local_batch_cost = sess.run([optimizer,cost], \
feed_dict={X: batch_xs,\
Y: batch_ts,\
dropoutrate_io: dropoutrate_in_training})
if epoch == training_epochs - 1:
# print ('Gradient calculation to see gradient vanishing problem')
_, grad_wrt_weight_layer1 = sess.run([optimizer,grad_wrt_weight_layer1_tensor], \
feed_dict={X: batch_xs,\
Y: batch_ts, \
dropoutrate_io: dropoutrate_in_training})
_, grad_wrt_weight_layer2 = sess.run([optimizer,grad_wrt_weight_layer2_tensor], \
feed_dict={X: batch_xs,\
Y: batch_ts, \
dropoutrate_io: dropoutrate_in_training})
_, grad_wrt_weight_layer3 = sess.run([optimizer,grad_wrt_weight_layer3_tensor], \
feed_dict={X: batch_xs,\
Y: batch_ts, \
dropoutrate_io: dropoutrate_in_training})
_, grad_wrt_weight_layer4 = sess.run([optimizer,grad_wrt_weight_layer4_tensor], \
feed_dict={X: batch_xs,\
Y: batch_ts, \
dropoutrate_io: dropoutrate_in_training})
_, grad_wrt_weight_layer5 = sess.run([optimizer,grad_wrt_weight_layer5_tensor], \
feed_dict={X: batch_xs,\
Y: batch_ts, \
dropoutrate_io: dropoutrate_in_training})
grad_wrt_weight_layer1 = np.array(grad_wrt_weight_layer1)
grad_wrt_weight_layer2 = np.array(grad_wrt_weight_layer2)
grad_wrt_weight_layer3 = np.array(grad_wrt_weight_layer3)
grad_wrt_weight_layer4 = np.array(grad_wrt_weight_layer4)
grad_wrt_weight_layer5 = np.array(grad_wrt_weight_layer5)
grad_wrt_weight_layer1 = grad_wrt_weight_layer1.reshape(grad_wrt_weight_layer1.shape[1],
grad_wrt_weight_layer1.shape[2])
grad_wrt_weight_layer2 = grad_wrt_weight_layer2.reshape(grad_wrt_weight_layer2.shape[1],
grad_wrt_weight_layer2.shape[2])
grad_wrt_weight_layer3 = grad_wrt_weight_layer3.reshape(grad_wrt_weight_layer3.shape[1],
grad_wrt_weight_layer3.shape[2])
grad_wrt_weight_layer4 = grad_wrt_weight_layer4.reshape(grad_wrt_weight_layer4.shape[1],
grad_wrt_weight_layer4.shape[2])
grad_wrt_weight_layer5 = grad_wrt_weight_layer5.reshape(grad_wrt_weight_layer5.shape[1],
grad_wrt_weight_layer5.shape[2])
grad_wrt_weight_layer1_iter[i] = grad_wrt_weight_layer1.mean()
grad_wrt_weight_layer2_iter[i] = grad_wrt_weight_layer2.mean()
grad_wrt_weight_layer3_iter[i] = grad_wrt_weight_layer3.mean()
grad_wrt_weight_layer4_iter[i] = grad_wrt_weight_layer4.mean()
grad_wrt_weight_layer5_iter[i] = grad_wrt_weight_layer5.mean()
# Compute average loss
avg_cost += local_batch_cost / total_batch
# print ("At %d-th batch in %d-epoch, avg_cost = %f" % (i,epoch,avg_cost) )
# Display logs per epoch step
if display_step == 0:
continue
elif (epoch + 1) % display_step == 0:
# print("Iteration:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
batch_train_xs = x_training_data
batch_train_ys = t_training_data
batch_valid_xs = x_validation_data
batch_valid_ys = t_validation_data
# for error rate evaluation, the dropout rate must be 1.0
errRatebyTrainingSet[epoch] = 1.0 - accuracy.eval(feed_dict={X: batch_train_xs, \
Y: batch_train_ys,\
dropoutrate_io: 0.0}, \
session=sess)
errRatebyValidationSet[epoch] = 1.0 - accuracy.eval(feed_dict={X: batch_valid_xs, \
Y: batch_valid_ys,\
dropoutrate_io: 0.0},\
session=sess)
print("Training set Err rate: %s" % errRatebyTrainingSet[epoch])
print("Validation set Err rate: %s" % errRatebyValidationSet[epoch])
print("--------------------------------------------")
print("Optimization Finished!")
# Calculate accuracy for test images
##-------------------------------------------
# Training result visualization ===============================================
hfig1= plt.figure(1,figsize=[10,10])
plt.scatter(data.xdata1.values[0:int(data.xdata1.size/2)],\
data.xdata2.values[0:int(data.xdata1.size/2)], \
color='b',label='class0')
plt.scatter(data.xdata1.values[int(data.xdata1.size/2)+2:-1],\
data.xdata2.values[int(data.xdata1.size/2)+2:-1], \
color='r',label='class1')
plt.title('Two Spiral data Example')
plt.legend()
hfig2 = plt.figure(2,figsize=(10,10))
batch_index = np.array([elem for elem in range(total_batch)])
plt.plot(batch_index,grad_wrt_weight_layer1_iter,label='layer1',color='b',marker='o')
plt.plot(batch_index,grad_wrt_weight_layer4_iter,label='layer4',color='y',marker='o')
plt.plot(batch_index,grad_wrt_weight_layer5_iter,label='layer5',color='r',marker='o')
plt.legend()
plt.title('Dropout = (%s), Weight Gradient over minibatch iter' % dropoutrate_in_training)
plt.xlabel('minibatch iter')
plt.ylabel('Weight Gradient')
hfig3 = plt.figure(3,figsize=(10,10))
epoch_index = np.array([elem for elem in range(training_epochs)])
plt.plot(epoch_index,errRatebyTrainingSet,label='Training data',color='r',marker='o')
plt.plot(epoch_index,errRatebyValidationSet,label='Validation data',color='b',marker='x')
plt.legend()
plt.title('Dropout = (%s), Train/Valid Err' % dropoutrate_in_training)
plt.xlabel('Iteration epoch')
plt.ylabel('error Rate')
plt.show()
| unlicense |
daniaki/Enrich2 | enrich2/libraries/barcodevariant.py | 1 | 8178 | # Copyright 2016-2017 Alan F Rubin, Daniel C Esposito
#
# This file is part of Enrich2.
#
# Enrich2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Enrich2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Enrich2. If not, see <http://www.gnu.org/licenses/>.
"""
Enrich2 libraries barcodevariant module
=======================================
Contains the concrete class ``BcvSeqLib`` which represents a sequencing
library containing variants which are also barcode sequences.
"""
import logging
import pandas as pd
from ..base.utils import compute_md5, log_message
from ..libraries.barcodemap import BarcodeMap
from .barcode import BarcodeSeqLib
from .variant import VariantSeqLib
__all__ = [
"BcvSeqLib"
]
class BcvSeqLib(VariantSeqLib, BarcodeSeqLib):
"""
Class for counting variant data from barcoded sequencing libraries.
Creating a :py:class:`~enrich2.libraries.barcodevariant.BcvSeqLib` requires
a valid *config* object with an ``'barcodes'`` entry and information
about the wild type sequence.
The ``barcode_map`` keyword argument can be used to pass an existing
:py:class:`~enrich2.libraries.barcodemap.BarcodeMap`. Ensuring this is the
right BarcodeMap is the responsibility of the caller.
Class Attributes
----------------
treeview_class_name : `str`
String used to render object in the GUI.
Attributes
----------
barcode_map : :py:class:`~enrich2.libraries.barcodemap.BarcodeMap`
Barcode map associated with the library.
Methods
-------
configure
Configures the object from an dictionary loaded from a configuration
file.
serialize
Returns a `dict` with all configurable attributes stored that can
be used to reconfigure a new instance.
calculate
Counts the barcodes and combines them into variant counts using a
:py:class:`~enrich2.libraries.barcodemap.BarcodeMap` object.
See Also
--------
:py:class:`~enrich2.libraries.variant.VariantSeqLib`
:py:class:`~enrich2.libraries.barcode.BarcodeSeqLib`
"""
treeview_class_name = "Barcoded Variant SeqLib"
def __init__(self):
VariantSeqLib.__init__(self)
BarcodeSeqLib.__init__(self)
self.barcode_map = None
def configure(self, cfg, barcode_map=None):
"""
Set up the object using the config object *cfg*, usually derived from
a ``.json`` file.
Parameters
----------
cfg : `dict` or :py:class:`~enrich2.config.types.BcvSeqLibConfiguration`
The object to configure this instance with.
barcode_map : :py:class:`~enrich2.libraries.barcodemap.BarcodeMap`
An existing barcode map associated with the library.
"""
from ..config.types import BcvSeqLibConfiguration
if isinstance(cfg, dict):
init_fastq = bool(cfg.get('fastq', {}).get("reads", ""))
cfg = BcvSeqLibConfiguration(cfg, init_fastq)
elif not isinstance(cfg, BcvSeqLibConfiguration):
raise TypeError("`cfg` was neither a "
"BcvSeqLibConfiguration or dict.")
VariantSeqLib.configure(self, cfg)
BarcodeSeqLib.configure(self, cfg)
if barcode_map is not None:
if barcode_map.filename == cfg.barcodes_cfg.map_file:
self.barcode_map = barcode_map
else:
raise ValueError("Attempted to assign non-matching "
"barcode map [{}]".format(self.name))
else:
self.barcode_map = BarcodeMap(
cfg.barcodes_cfg.map_file, is_variant=True
)
def serialize(self):
"""
Format this object (and its children) as a config object suitable for
dumping to a config file.
Returns
-------
`dict`
Attributes of this instance and that of inherited classes
in a dictionary.
"""
cfg = VariantSeqLib.serialize(self)
cfg.update(BarcodeSeqLib.serialize(self))
# required for creating new objects in GUI
if self.barcode_map is not None:
cfg['barcodes']['map file'] = self.barcode_map.filename
cfg['barcodes']['map file md5'] = compute_md5(
self.barcode_map.filename)
return cfg
def calculate(self):
"""
Counts the barcodes using :py:meth:`BarcodeSeqLib.count`
and combines them into variant counts using the
:py:class:`~enrich2.libraries.barcodemap.BarcodeMap`
"""
if not self.check_store("/main/variants/counts"):
BarcodeSeqLib.calculate(self) # count the barcodes
df_dict = dict()
barcode_variants = dict()
log_message(
logging_callback=logging.info,
msg="Converting barcodes to variants",
extra={'oname': self.name}
)
# store mapped barcodes
self.save_filtered_counts(
label='barcodes',
query="index in {} & count >= {}".format(
list(self.barcode_map.keys()), self.barcode_min_count
)
)
# count variants associated with the barcodes
max_mut_barcodes = 0
max_mut_variants = 0
for bc, count in self.store['/main/barcodes/counts'].iterrows():
count = count['count']
variant = self.barcode_map[bc]
mutations = self.count_variant(variant)
if mutations is None: # variant has too many mutations
max_mut_barcodes += 1
max_mut_variants += count
if self.report_filtered:
self.report_filtered_variant(variant, count)
else:
try:
df_dict[mutations] += count
except KeyError:
df_dict[mutations] = count
barcode_variants[bc] = mutations
# save counts, filtering based on the min count
counts = {
k: v for k, v in df_dict.items()
if v >= self.variant_min_count
}
self.save_counts('variants', counts, raw=False)
del df_dict
# write the active subset of the BarcodeMap to the store
barcodes = list(barcode_variants.keys())
data = {'value': [barcode_variants[bc] for bc in barcodes]}
barcode_variants = pd.DataFrame(data, index=barcodes)
del barcodes
barcode_variants.sort_values('value', inplace=True)
self.store.put(
key="/raw/barcodemap",
value=barcode_variants,
data_columns=barcode_variants.columns
)
del barcode_variants
if self.aligner is not None:
log_message(
logging_callback=logging.info,
msg="Aligned {} variants".format(self.aligner.calls),
extra={'oname': self.name}
)
self.aligner_cache = None
# self.report_filter_stats()
log_message(
logging_callback=logging.info,
msg="Removed {} unique barcodes ({} total variants) with "
"excess mutations".format(
max_mut_barcodes, max_mut_variants),
extra={'oname': self.name}
)
self.save_filter_stats()
self.count_synonymous()
| gpl-3.0 |
jereze/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
ibmets/toptrumps | training.py | 1 | 5552 | # external dependencies
from sklearn import tree
from timeit import default_timer as timer
from random import randint, sample, shuffle
from os.path import isfile
import os
import csv
import pylru
# local dependencies
import bots
import decks
# how many can we fit into memory?
ML_MODEL_CACHE_SIZE = 50
# cache of previously trained models in memory
models = pylru.lrucache(ML_MODEL_CACHE_SIZE)
#
# train a new model using the training data collected
#
def trainModel (botname, deckname):
global models
# start timing, so we know how long we're spending doing training
start = timer()
# get the path of the CSV with the training data
trainingdata = bots.getFileName(botname, deckname)
# no training data available
if isfile(trainingdata) == False:
return None
# train a decision tree
data = []
target = []
headers = None
trainingrecords = 0
with open(trainingdata, "rb") as trainingfile:
reader = csv.reader(trainingfile, delimiter=",")
headers = reader.next()
for row in reader:
data.append(row[:-1])
target.append(row[-1])
trainingrecords += 1
dt = tree.DecisionTreeClassifier()
dt.fit(data, target)
# stop timing
end = timer()
# cache the decision tree so we can reuse it without training
models[trainingdata] = dt
return { "model" : dt, "training" : { "time" : (end - start), "hands" : len(data) } }
#
# gets the predictive model - fetching a previously trained model
# if one is available in the cache, or creating a new one otherwise
def getModel (botname, deckname):
global models
trainingdata = bots.getFileName(botname, deckname)
try:
return models[trainingdata]
except KeyError:
newtraining = trainModel(botname, deckname)
if newtraining is None:
return None
else:
return newtraining["model"]
def compareNumbers (first, second):
if first > second:
return 1
elif first < second:
return -1
else:
return 0
def sortChoices (first, second):
if first["outcome"] == 1 and second["outcome"] == 1:
return compareNumbers(first["confidence"], second["confidence"])
elif first["outcome"] == 1:
return 1
elif second["outcome"] == 1:
return -1
elif first["outcome"] == 0 and second["outcome"] == 0:
return compareNumbers(second["confidence"], first["confidence"])
elif first["outcome"] == 0:
return 1
elif second["outcome"] == 0:
return -1
else:
return compareNumbers(second["confidence"], first["confidence"])
def randomChoice (rules):
choice = sample(rules.keys(), 1)[0]
print "Computer chose : %s" % (choice)
return choice
def predict (botname, deckname, card):
model = getModel(botname, deckname)
deck = decks.get(deckname)
rules = deck["rules"]
if model is None:
return { "confidence" : 0, "choice" : randomChoice(rules) }
data = []
optionidx = 0
for option in rules.keys():
optionData = []
for attribute in rules.keys():
optionData.append(card[attribute])
optionData.append(optionidx)
optionidx += 1
data.append(optionData)
predictions = model.predict_proba(data)
print predictions
if len(predictions) != len(rules.keys()):
print "Insufficient training. Resorting to random."
return { "confidence" : 0, "choice" : randomChoice(rules) }
choices = []
optionidx = 0
for option in rules.keys():
print option
print len(predictions[optionidx])
# we need predictions for win/loss/draw
# which only happen once all options have been
# represented in the training data
if len(predictions[optionidx]) != 3:
print "Insufficient training. Resorting to random."
return { "confidence" : 0, "choice" : randomChoice(rules) }
loss_prob = predictions[optionidx][0]
draw_prob = predictions[optionidx][1]
win_prob = predictions[optionidx][2]
print "probability of losing " + str(predictions[optionidx][0])
print "probability of drawing " + str(predictions[optionidx][1])
print "probability of winning " + str(predictions[optionidx][2])
if win_prob >= draw_prob and win_prob > loss_prob:
choices.append({
"choice" : option,
"outcome" : 1,
"confidence" : win_prob * 100
})
elif (draw_prob > win_prob and draw_prob > loss_prob) or (win_prob == loss_prob and win_prob > draw_prob):
choices.append({
"choice" : option,
"outcome" : 0,
"confidence" : draw_prob * 100
})
else:
choices.append({
"choice" : option,
"outcome" : -1,
"confidence" : loss_prob * 100
})
optionidx += 1
sortedChoices = sorted(choices, cmp=sortChoices, reverse=True)
choice = sortedChoices[0]["choice"]
print "Computer chose : %s" % (choice)
outcome = ""
if sortedChoices[0]["outcome"] == 1:
outcome = "win"
elif sortedChoices[0]["outcome"] == 0:
outcome = "at least draw"
else:
outcome = "draw"
message = "(expecting that it would %s with %d percent confidence)"
print message % (outcome, sortedChoices[0]["confidence"])
return sortedChoices[0]
| mit |
dmnfarrell/mirnaseq | setup.py | 2 | 1729 | from setuptools import setup
import sys,os
inst_requires = ['numpy>=1.10',
'pandas>=0.20',
'seaborn>=0.7',
'scikit-learn>=0.23.2',
'pyfaidx>=0.5.4',
'pysam>=0.10.0',
'HTSeq>=0.6',
'bx-python>=0.5',
'forgi==1.1',
'logging_exceptions']
major, minor, micro = sys.version_info[:3]
if major == '2':
inst_requires.append('future')
setup(
name = 'smallrnaseq',
version = '0.6.0',
description = 'Package for short RNA-seq analysis',
long_description = 'smallrnaseq is a Python package for processing of small RNA seq data.',
url='https://github.com/dmnfarrell/smallrnaseq',
license='GPL v3',
author = 'Damien Farrell',
author_email = '[email protected]',
packages = ['smallrnaseq'],
package_data={'smallrnaseq': ['data/*.*','data/de_example/*','*.R']},
install_requires=inst_requires,
entry_points = {
'console_scripts': [
'smallrnaseq=smallrnaseq.app:main',
'mirdeep2=smallrnaseq.mirdeep2:main']
},
classifiers = ['Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 2.7',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'License :: OSI Approved :: Apache Software License',
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research'],
keywords = ['rna','sequencing','mirdeep2','biology','scientific'],
)
| gpl-3.0 |
rmkoesterer/uga | uga/RunSnvplot.py | 1 | 32158 | ## Copyright (c) 2015 Ryan Koesterer GNU General Public License v3
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd
import numpy as np
import scipy.stats as scipy
from uga import Parse
import pysam
import math
from uga import Process
import readline
import rpy2.robjects as ro
from rpy2.robjects import pandas2ri
import logging
import re
pd.options.mode.chained_assignment = None
pandas2ri.activate()
logging.basicConfig(format='%(asctime)s - %(processName)s - %(name)s - %(message)s',level=logging.DEBUG)
logger = logging.getLogger("RunSnvplot")
def RunSnvplot(args):
cfg = Parse.generate_snvplot_cfg(args)
Parse.print_snvplot_options(cfg)
if not cfg['debug']:
logging.disable(logging.CRITICAL)
ro.r('suppressMessages(library(ggplot2))')
ro.r('suppressMessages(library(grid))')
ro.r('suppressMessages(library(RColorBrewer))')
handle=pysam.TabixFile(filename=cfg['file'],parser=pysam.asVCF())
header = [x for x in handle.header]
skip_rows = len(header)-1
cols = header[-1].split()
pcols = cfg['pcol'].split(',')
cols_extract = [cfg['chrcol'],cfg['bpcol']] + pcols
if cfg['qq_strat_freq']:
if cfg['freqcol'] not in cols:
print(Process.Error("frequency column " + cfg['freqcol'] + " not found, unable to proceed with frequency stratified plots").out)
return 1
else:
cols_extract = cols_extract + [cfg['freqcol']]
print("frequency column " + cfg['freqcol'] + " found")
if cfg['qq_strat_mac']:
if cfg['maccol'] not in cols:
print(Process.Error("minor allele count column " + cfg['maccol'] + " not found, unable to proceed with minor allele count stratified plots").out)
return 1
else:
cols_extract = cols_extract + [cfg['maccol']]
print("minor allele count column " + cfg['maccol'] + " found")
print("importing data")
r = pd.read_table(cfg['file'],sep='\t',skiprows=skip_rows,usecols=cols_extract,compression='gzip')
print(str(r.shape[0]) + " total variants found")
for pcol in pcols:
print("plotting p-values for column " + pcol + " ...")
extract_cols = [cfg['chrcol'],cfg['bpcol'],pcol]
if cfg['freqcol'] in r:
extract_cols = extract_cols + [cfg['freqcol']]
if cfg['maccol'] in r:
extract_cols = extract_cols + [cfg['maccol']]
results = r[extract_cols]
results.dropna(inplace=True)
results = results[(results[pcol] > 0) & (results[pcol] <= 1)].reset_index(drop=True)
print(" " + str(results.shape[0]) + " variants with plottable p-values")
results['logp'] = -1 * np.log10(results[pcol]) + 0.0
ro.globalenv['results'] = results
l = np.median(scipy.chi2.ppf([1-x for x in results[pcol].tolist()], df=1))/scipy.chi2.ppf(0.5,1)
# in R: median(qchisq(results$p, df=1, lower.tail=FALSE))/qchisq(0.5,1)
print(" genomic inflation (all variants) = " + str(l))
if cfg['qq']:
print(" generating standard qq plot")
print(" minimum p-value: " + str(np.min(results[pcol])))
a = -1 * np.log10(ro.r('ppoints(' + str(len(results.index)) + ')'))
a.sort()
results.sort_values(by=['logp'], inplace=True)
print(" maximum -1*log10(p-value): " + str(np.max(results['logp'])))
ci_upper = -1 * np.log10(scipy.beta.ppf(0.95, list(range(1,len(results[pcol]) + 1)), list(range(len(results[pcol]),0,-1))))
ci_upper.sort()
ci_lower = -1 * np.log10(scipy.beta.ppf(0.05, list(range(1,len(results[pcol]) + 1)), list(range(len(results[pcol]),0,-1))))
ci_lower.sort()
ro.globalenv['df'] = ro.DataFrame({'a': ro.FloatVector(a), 'b': ro.FloatVector(results['logp']), 'ci_lower': ro.FloatVector(ci_lower), 'ci_upper': ro.FloatVector(ci_upper)})
dftext_label = 'lambda %~~% ' + str(round(l,3))
ro.globalenv['dftext'] = ro.DataFrame({'x': ro.r('Inf'), 'y': 0.5, 'lab': dftext_label})
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq.tiff')
elif cfg['ext'] == 'png':
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,units="in",bg="white",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq.png')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq.eps')
else:
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq.pdf')
ro.r("""
gp<-ggplot(df)
pp<-gp +
aes_string(x='a',y='b') +
geom_ribbon(aes_string(x='a',ymin='ci_lower',ymax='ci_upper'), data=df, alpha=0.25, fill='black') +
geom_point(size=2) +
geom_abline(intercept=0, slope=1, alpha=0.5) +
scale_x_discrete(expression(Expected~~-log[10](italic(p)))) +
scale_y_discrete(expression(Observed~~-log[10](italic(p)))) +
coord_fixed() +
theme_bw(base_size = 12) +
geom_text(aes_string(x='x', y='y', label='lab'), data = dftext, colour="black", vjust=0, hjust=1, size = 4, parse=TRUE) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14), legend.position = 'none',
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.text = element_text(size=12))
%s
""" % (ggsave))
if np.max(results['logp']) > cfg['crop']:
print(" generating cropped standard qq plot")
ro.r('df$b[df$b > ' + str(cfg['crop']) + ']<-' + str(cfg['crop']))
ro.r('df$shape<-0')
ro.r('df$shape[df$b == ' + str(cfg['crop']) + ']<-1')
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq.cropped.tiff')
elif cfg['ext'] == 'png':
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,units="in",bg="white",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq.cropped.png')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq.cropped.eps')
else:
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq.cropped.pdf')
ro.r("""
gp<-ggplot(df)
pp<-gp +
aes_string(x='a',y='b') +
geom_ribbon(aes_string(x='a',ymin='ci_lower',ymax='ci_upper'), data=df, alpha=0.25, fill='black') +
geom_point(aes(shape=factor(shape)),size=2) +
geom_abline(intercept=0, slope=1, alpha=0.5) +
scale_x_discrete(expression(Expected~~-log[10](italic(p)))) +
scale_y_discrete(expression(Observed~~-log[10](italic(p)))) +
coord_fixed() +
theme_bw(base_size = 12) +
geom_text(aes_string(x='x', y='y', label='lab'), data = dftext, colour="black", vjust=0, hjust=1, size = 4, parse=TRUE) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14), legend.position = 'none',
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.text = element_text(size=12))
%s
""" % (ggsave))
def ppoints(n, a):
try:
n = np.float(len(n))
except TypeError:
n = np.float(n)
return (np.arange(n) + 1 - a)/(n + 1 - 2*a)
if cfg['qq_strat_freq']:
print(" generating frequency stratified qq plot")
strat_ticks = np.sort([np.float(x) for x in cfg['freq_ticks'].split(',')])
results['UGA___QQ_BIN___'] = 0
for i in range(len(strat_ticks)):
results.loc[(results[cfg['freqcol']] >= strat_ticks[i]) & (results[cfg['freqcol']] <= 1-strat_ticks[i]),'UGA___QQ_BIN___'] = i+1
bin_values = results['UGA___QQ_BIN___'].value_counts()
for i in range(len(strat_ticks)+1):
if i not in bin_values.index:
bin_values[i] = 0
counts = pd.DataFrame(bin_values)
counts['lambda'] = np.nan
results['description'] = 'NA'
for i in range(len(strat_ticks)+1):
if counts.loc[i,'UGA___QQ_BIN___'] > 0:
counts.loc[i,'lambda'] = np.median(scipy.chi2.ppf([1-x for x in results[pcol][results['UGA___QQ_BIN___'] == i].tolist()], df=1))/scipy.chi2.ppf(0.5,1)
else:
counts.loc[i,'lambda'] = np.nan
if i == 0:
results.loc[results['UGA___QQ_BIN___'] == i,'description'] = "(0," + str(strat_ticks[i]) + ") ~" + str(round(counts.loc[i,'lambda'],3))
print(" MAF (0," + str(strat_ticks[i]) + "): n=" + str(np.int(counts.loc[i,'UGA___QQ_BIN___'])) + ", lambda=" + str(counts.loc[i,'lambda']))
elif i < len(strat_ticks):
results.loc[results['UGA___QQ_BIN___'] == i,'description'] = "[" + str(strat_ticks[i-1]) + "," + str(strat_ticks[i]) + ") ~" + str(round(counts.loc[i,'lambda'],3))
print(" MAF [" + str(strat_ticks[i-1]) + "," + str(strat_ticks[i]) + "): n=" + str(np.int(counts.loc[i,'UGA___QQ_BIN___'])) + ", lambda=" + str(counts.loc[i,'lambda']))
else:
results.loc[results['UGA___QQ_BIN___'] == i,'description'] = "[" + str(strat_ticks[i-1]) + ",0.5] ~" + str(round(counts.loc[i,'lambda'],3))
print(" MAF [" + str(strat_ticks[i-1]) + ",0.5]: n=" + str(np.int(counts.loc[i,'UGA___QQ_BIN___'])) + ", lambda=" + str(counts.loc[i,'lambda']))
results.sort_values(['UGA___QQ_BIN___','logp'],inplace=True)
results['expected'] = 0
for i in counts.index:
if counts.loc[i,'UGA___QQ_BIN___'] > 0:
results.loc[results['UGA___QQ_BIN___'] == i,'expected'] = np.sort(-1 * np.log10(ppoints(len(results.loc[results['UGA___QQ_BIN___'] == i,'expected']),0)))
ro.globalenv['df'] = ro.DataFrame({'expected': ro.FloatVector(results['expected']), 'logp': ro.FloatVector(results['logp']), 'UGA___QQ_BIN___': ro.IntVector(results['UGA___QQ_BIN___']), 'description': ro.StrVector(results['description'])})
ro.r("df<-df[order(df$UGA___QQ_BIN___),]")
ro.r("df$description<-ordered(df$description,levels=unique(df$description))")
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat_freq.tiff')
elif cfg['ext'] == 'png':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat_freq.png')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat_freq.eps')
else:
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat_freq.pdf')
ro.r("""
gp<-ggplot(df, aes_string(x='expected',y='logp')) +
geom_point(aes_string(color='description'), size=2) +
scale_colour_manual(values=colorRampPalette(brewer.pal(9,"Blues"))(length(unique(df$description))+2)[3:(length(unique(df$description))+2)]) +
geom_abline(intercept=0, slope=1, alpha=0.5) +
scale_x_discrete(expression(Expected~~-log[10](italic(p)))) +
scale_y_discrete(expression(Observed~~-log[10](italic(p)))) +
coord_fixed() +
theme_bw(base_size = 12) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14), legend.title = element_blank(),
legend.key.height = unit(0.1,"in"), legend.text = element_text(size=6), legend.key = element_blank(), legend.justification = c(0,1),
legend.position = c(0,1), panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.text = element_text(size=12))
%s
""" % (ggsave))
if np.max(results['logp']) > cfg['crop']:
print(" generating cropped frequency stratified qq plot")
ro.r('df$logp[df$logp > ' + str(cfg['crop']) + ']<-' + str(cfg['crop']))
ro.r('df$shape<-0')
ro.r('df$shape[df$logp == ' + str(cfg['crop']) + ']<-1')
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat_freq.cropped.tiff')
elif cfg['ext'] == 'png':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat_freq.cropped.png')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat_freq.cropped.eps')
else:
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat_freq.cropped.pdf')
ro.r("""
gp<-ggplot(df, aes_string(x='expected',y='logp')) +
geom_point(aes(shape=factor(shape), color=description), size=2) +
scale_colour_manual(values=colorRampPalette(brewer.pal(9,"Blues"))(length(unique(df$description))+2)[3:(length(unique(df$description))+2)]) +
geom_abline(intercept=0, slope=1, alpha=0.5) +
scale_x_discrete(expression(Expected~~-log[10](italic(p)))) +
scale_y_discrete(expression(Observed~~-log[10](italic(p)))) +
coord_fixed() +
theme_bw(base_size = 12) +
guides(shape=FALSE) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14), legend.title = element_blank(),
legend.key.height = unit(0.1,"in"), legend.text = element_text(size=6), legend.key = element_blank(), legend.justification = c(0,1),
legend.position = c(0,1), panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.text = element_text(size=12))
%s
""" % (ggsave))
if cfg['qq_strat_mac']:
print(" generating minor allele count stratified qq plot")
strat_ticks = np.sort([np.float(x) for x in cfg['mac_ticks'].split(',')])
results['UGA___QQ_BIN___'] = 0
for i in range(len(strat_ticks)):
results.loc[results[cfg['maccol']] >= strat_ticks[i],'UGA___QQ_BIN___'] = i+1
bin_values = results['UGA___QQ_BIN___'].value_counts()
for i in range(len(strat_ticks)+1):
if i not in bin_values.index:
bin_values[i] = 0
counts = pd.DataFrame(bin_values)
counts['lambda'] = 0
results['description'] = 'NA'
for i in np.sort(counts.index):
if counts.loc[i,'UGA___QQ_BIN___'] > 0:
counts.loc[i,'lambda'] = np.median(scipy.chi2.ppf([1-x for x in results[pcol][results['UGA___QQ_BIN___'] == i].tolist()], df=1))/scipy.chi2.ppf(0.5,1)
else:
counts.loc[i,'lambda'] = np.nan
if i == 0:
results.loc[results['UGA___QQ_BIN___'] == i,'description'] = "(0," + str(int(strat_ticks[i])) + ") ~" + str(round(counts.loc[i,'lambda'],3))
print(" MAC (0," + str(int(strat_ticks[i])) + "): n=" + str(np.int(counts.loc[i,'UGA___QQ_BIN___'])) + ", lambda=" + str(counts.loc[i,'lambda']))
elif i < len(strat_ticks):
results.loc[results['UGA___QQ_BIN___'] == i,'description'] = "[" + str(int(strat_ticks[i-1])) + "," + str(int(strat_ticks[i])) + ") ~" + str(round(counts.loc[i,'lambda'],3))
print(" MAC [" + str(int(strat_ticks[i-1])) + "," + str(int(strat_ticks[i])) + "): n=" + str(np.int(counts.loc[i,'UGA___QQ_BIN___'])) + ", lambda=" + str(counts.loc[i,'lambda']))
else:
results.loc[results['UGA___QQ_BIN___'] == i,'description'] = "[" + str(int(strat_ticks[i-1])) + ",...] ~" + str(round(counts.loc[i,'lambda'],3))
print(" MAC [" + str(int(strat_ticks[i-1])) + ",...]: n=" + str(np.int(counts.loc[i,'UGA___QQ_BIN___'])) + ", lambda=" + str(counts.loc[i,'lambda']))
results.sort_values(['UGA___QQ_BIN___','logp'],inplace=True)
results['expected'] = 0
for i in counts.index:
results.loc[results['UGA___QQ_BIN___'] == i,'expected'] = np.sort(-1 * np.log10(ppoints(len(results.loc[results['UGA___QQ_BIN___'] == i,'expected']),0)))
ro.globalenv['df'] = ro.DataFrame({'expected': ro.FloatVector(results['expected']), 'logp': ro.FloatVector(results['logp']), 'UGA___QQ_BIN___': ro.IntVector(results['UGA___QQ_BIN___']), 'description': ro.StrVector(results['description'])})
ro.r("df<-df[order(df$UGA___QQ_BIN___),]")
ro.r("df$description<-ordered(df$description,levels=unique(df$description))")
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat_mac.tiff')
elif cfg['ext'] == 'png':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat_mac.png')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat_mac.eps')
else:
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat_mac.pdf')
ro.r("""
gp<-ggplot(df, aes_string(x='expected',y='logp')) +
geom_point(aes_string(color='description'), size=2) +
scale_colour_manual(values=colorRampPalette(brewer.pal(9,"Blues"))(length(unique(df$description))+2)[3:(length(unique(df$description))+2)]) +
geom_abline(intercept=0, slope=1, alpha=0.5) +
scale_x_discrete(expression(Expected~~-log[10](italic(p)))) +
scale_y_discrete(expression(Observed~~-log[10](italic(p)))) +
coord_fixed() +
theme_bw(base_size = 12) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14), legend.title = element_blank(),
legend.key.height = unit(0.1,"in"), legend.text = element_text(size=6), legend.key = element_blank(), legend.justification = c(0,1),
legend.position = c(0,1), panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.text = element_text(size=12))
%s
""" % (ggsave))
if np.max(results['logp']) > cfg['crop']:
print(" generating cropped frequency stratified qq plot")
ro.r('df$logp[df$logp > ' + str(cfg['crop']) + ']<-' + str(cfg['crop']))
ro.r('df$shape<-0')
ro.r('df$shape[df$logp == ' + str(cfg['crop']) + ']<-1')
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat_mac.cropped.tiff')
elif cfg['ext'] == 'png':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat_mac.cropped.png')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat_mac.cropped.eps')
else:
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat_mac.cropped.pdf')
ro.r("""
gp<-ggplot(df, aes_string(x='expected',y='logp')) +
geom_point(aes(shape=factor(shape), color=description), size=2) +
scale_colour_manual(values=colorRampPalette(brewer.pal(9,"Blues"))(length(unique(df$description))+2)[3:(length(unique(df$description))+2)]) +
geom_abline(intercept=0, slope=1, alpha=0.5) +
scale_x_discrete(expression(Expected~~-log[10](italic(p)))) +
scale_y_discrete(expression(Observed~~-log[10](italic(p)))) +
coord_fixed() +
theme_bw(base_size = 12) +
guides(shape=FALSE) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14), legend.title = element_blank(),
legend.key.height = unit(0.1,"in"), legend.text = element_text(size=6), legend.key = element_blank(), legend.justification = c(0,1),
legend.position = c(0,1), panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.text = element_text(size=12))
%s
""" % (ggsave))
if cfg['mht']:
print(" generating standard manhattan plot")
print(" minimum p-value: " + str(np.min(results[pcol])))
print(" maximum -1*log10(p-value): " + str(np.max(results['logp'])))
if cfg['gc'] and l > 1:
print(" adjusting p-values for genomic inflation for p-value column " + pcol)
results[pcol]=2 * scipy.norm.cdf(-1 * np.abs(scipy.norm.ppf(0.5*results[pcol]) / math.sqrt(l)))
print(" minimum post-gc adjustment p-value: " + str(np.min(results[pcol])))
print(" maximum post-gc adjustment -1*log10(p-value): " + str(np.max(results['logp'])))
else:
print(" skipping genomic inflation correction")
print(" calculating genomic positions")
results.sort_values(by=[cfg['chrcol'],cfg['bpcol']], inplace=True)
ticks = []
lastbase = 0
results['gpos'] = 0
nchr = len(list(np.unique(results[cfg['chrcol']].values)))
chrs = np.unique(results[cfg['chrcol']].values)
if cfg['color']:
colours = ["#08306B","#41AB5D","#000000","#F16913","#3F007D","#EF3B2C","#08519C","#238B45","#252525","#D94801","#54278F","#CB181D","#2171B5","#006D2C","#525252","#A63603","#6A51A3","#A50F15","#4292C6","#00441B","#737373","#7F2704","#807DBA","#67000D"]
else:
colours = ["#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3"]
if nchr == 1:
results['gpos'] = results[cfg['bpcol']]
results['colours'] = "#08589e"
if results['gpos'].max() - results['gpos'].min() <= 1000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 100 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 10000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 1000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 100000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 10000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 200000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 20000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 300000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 30000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 400000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 40000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 500000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 50000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 600000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 60000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 700000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 70000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 800000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 80000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 900000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 90000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 1000000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 100000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 10000000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 1000000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 100000000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 10000000 == 0]
elif results['gpos'].max() - results['gpos'].min() > 100000000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 25000000 == 0]
else:
results['colours'] = "#000000"
for i in range(len(chrs)):
print(" processed chromosome " + str(int(chrs[i])))
if i == 0:
results.loc[results[cfg['chrcol']] == chrs[i],'gpos'] = results.loc[results[cfg['chrcol']] == chrs[i],cfg['bpcol']]
else:
lastbase = lastbase + results.loc[results[cfg['chrcol']] == chrs[i-1],cfg['bpcol']].iloc[-1]
results.loc[results[cfg['chrcol']] == chrs[i],'gpos'] = (results.loc[results[cfg['chrcol']] == chrs[i],cfg['bpcol']]) + lastbase
if results.loc[results[cfg['chrcol']] == chrs[i]].shape[0] > 1:
ticks.append(results.loc[results[cfg['chrcol']] == chrs[i],'gpos'].iloc[0] + (results.loc[results[cfg['chrcol']] == chrs[i],'gpos'].iloc[-1] - results.loc[results[cfg['chrcol']] == chrs[i],'gpos'].iloc[0])/2)
else:
ticks.append(results.loc[results[cfg['chrcol']] == chrs[i],'gpos'].iloc[0])
results.loc[results[cfg['chrcol']] == chrs[i],'colours'] = colours[int(chrs[i])]
results['logp'] = -1 * np.log10(results[pcol])
if results.shape[0] >= 1000000:
sig = 5.4e-8
else:
sig = 0.05 / results.shape[0]
print(" significance level set to p-value = " + str(sig) + " (-1*log10(p-value) = " + str(-1 * np.log10(sig)) + ")")
print(" " + str(len(results[pcol][results[pcol] <= sig])) + " genome wide significant variants")
chr = results[cfg['chrcol']][0]
maxy=int(max(np.ceil(-1 * np.log10(sig)),np.ceil(results['logp'].max())))
if maxy > 20:
y_breaks = list(range(0,maxy,5))
y_labels = list(range(0,maxy,5))
else:
y_breaks = list(range(0,maxy))
y_labels = list(range(0,maxy))
ro.globalenv['df'] = ro.DataFrame({'gpos': ro.FloatVector(results['gpos']), 'logp': ro.FloatVector(results['logp']), 'colours': ro.FactorVector(results['colours'])})
ro.globalenv['ticks'] = ro.FloatVector(ticks)
ro.globalenv['labels'] = ro.Vector(["{:,}".format(x/1000) for x in ticks])
ro.globalenv['colours'] = ro.StrVector(colours)
ro.globalenv['chrs'] = ro.FloatVector(chrs)
ro.r('save.image("R.img")')
print(" generating manhattan plot")
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=gp,width=16,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.mht.tiff')
elif cfg['ext'] == 'png':
ggsave = 'ggsave(filename="%s",plot=gp,width=16,height=4,units="in",bg="white",dpi=300)' % (cfg['out'] + '.' + pcol + '.mht.png')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=gp,width=16,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.mht.eps')
else:
ggsave = 'ggsave(filename="%s",plot=gp,width=16,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.mht.pdf')
if nchr == 1:
ro.r("""
gp<-ggplot(df, aes_string(x='gpos',y='logp')) +
geom_hline(yintercept = -1 * log10(%g),colour="#B8860B", linetype=5, size = 0.25) +
geom_point(size=1.5) +
scale_x_continuous(expression(Chromosome~~%d~~(kb)),breaks=ticks,labels=labels) + \
scale_y_continuous(expression(-log[10](italic(p))),breaks=seq(0,%d,1),limits=c(0,%d)) + \
theme_bw(base_size = 8) + \
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14),
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.title = element_text(size=10),
axis.text = element_text(size=12), legend.position = 'none')
%s
""" % (sig, chr, maxy, maxy, ggsave))
else:
ro.r("""
gp = ggplot(df, aes_string(x='gpos',y='logp',colour='colours')) +
geom_hline(yintercept = -1 * log10(%g),colour="#B8860B", linetype=5, size = 0.25) +
geom_point(size=1.5) +
scale_colour_manual(values=colours) +
scale_x_continuous(expression(Chromosome),breaks=ticks,labels=chrs) +
scale_y_continuous(expression(-log[10](italic(p))),breaks=seq(0,%d,1),limits=c(0,%d)) +
theme_bw(base_size = 8) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14),
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.title = element_text(size=10),
axis.text = element_text(size=12), legend.position = 'none')
%s
""" % (sig, maxy, maxy, ggsave))
if maxy > cfg['crop']:
maxy = cfg['crop']
ro.r('df$logp[df$logp > ' + str(cfg['crop']) + ']<-' + str(cfg['crop']))
ro.r('df$shape<-0')
ro.r('df$shape[df$logp == ' + str(cfg['crop']) + ']<-1')
print(" generating cropped manhattan plot")
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=gp,width=16,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.mht.cropped.tiff')
elif cfg['ext'] == 'png':
ggsave = 'ggsave(filename="%s",plot=gp,width=16,height=4,units="in",bg="white",dpi=300)' % (cfg['out'] + '.' + pcol + '.mht.cropped.png')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=gp,width=16,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.mht.cropped.eps')
else:
ggsave = 'ggsave(filename="%s",plot=gp,width=16,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.mht.cropped.pdf')
if nchr == 1:
ro.r("""
gp<-ggplot(df, aes_string(x='gpos',y='logp')) +
geom_hline(yintercept = -1 * log10(%g),colour="#B8860B", linetype=5, size = 0.25) +
geom_point(aes(shape=factor(shape)),size=1.5) +
scale_x_continuous(expression(Chromosome~~%d~~(kb)),breaks=ticks,labels=labels) +
scale_y_continuous(expression(-log[10](italic(p))),breaks=seq(0,%d,1),limits=c(0,%d)) +
theme_bw(base_size = 8) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14),
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.title = element_text(size=10),
axis.text = element_text(size=12), legend.position = 'none')
%s
""" % (sig, chr, maxy, maxy, ggsave))
else:
ro.r("""
gp = ggplot(df, aes_string(x='gpos',y='logp',colour='colours')) +
geom_hline(yintercept = -1 * log10(%g),colour="#B8860B", linetype=5, size = 0.25) +
geom_point(aes(shape=factor(shape)),size=1.5) +
scale_colour_manual(values=colours) +
scale_x_continuous(expression(Chromosome),breaks=ticks,labels=chrs) +
scale_y_continuous(expression(-log[10](italic(p))),breaks=seq(0,%d,1),limits=c(0,%d)) +
theme_bw(base_size = 8) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14),
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.title = element_text(size=8),
axis.text = element_text(size=12), legend.position = 'none')
%s
""" % (sig, maxy, maxy, ggsave))
print("process complete")
return 0
| gpl-3.0 |
alekz112/statsmodels | statsmodels/examples/tsa/arma_plots.py | 33 | 2516 | '''Plot acf and pacf for some ARMA(1,1)
'''
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.tsa.arima_process as tsp
from statsmodels.sandbox.tsa.fftarma import ArmaFft as FftArmaProcess
import statsmodels.tsa.stattools as tss
from statsmodels.graphics.tsaplots import plotacf
np.set_printoptions(precision=2)
arcoefs = [0.9, 0., -0.5] #[0.9, 0.5, 0.1, 0., -0.5]
macoefs = [0.9, 0., -0.5] #[0.9, 0.5, 0.1, 0., -0.5]
nsample = 1000
nburnin = 1000
sig = 1
fig = plt.figure(figsize=(8, 13))
fig.suptitle('ARMA: Autocorrelation (left) and Partial Autocorrelation (right)')
subplotcount = 1
nrows = 4
for arcoef in arcoefs[:-1]:
for macoef in macoefs[:-1]:
ar = np.r_[1., -arcoef]
ma = np.r_[1., macoef]
#y = tsp.arma_generate_sample(ar,ma,nsample, sig, burnin)
#armaprocess = FftArmaProcess(ar, ma, nsample) #TODO: make n optional
#armaprocess.plot4()
armaprocess = tsp.ArmaProcess(ar, ma)
acf = armaprocess.acf(20)[:20]
pacf = armaprocess.pacf(20)[:20]
ax = fig.add_subplot(nrows, 2, subplotcount)
plotacf(acf, ax=ax)
## ax.set_title('Autocorrelation \nar=%s, ma=%rs' % (ar, ma),
## size='xx-small')
ax.text(0.7, 0.6, 'ar =%s \nma=%s' % (ar, ma),
transform=ax.transAxes,
horizontalalignment='left', #'right',
size='xx-small')
ax.set_xlim(-1,20)
subplotcount +=1
ax = fig.add_subplot(nrows, 2, subplotcount)
plotacf(pacf, ax=ax)
## ax.set_title('Partial Autocorrelation \nar=%s, ma=%rs' % (ar, ma),
## size='xx-small')
ax.text(0.7, 0.6, 'ar =%s \nma=%s' % (ar, ma),
transform=ax.transAxes,
horizontalalignment='left', #'right',
size='xx-small')
ax.set_xlim(-1,20)
subplotcount +=1
axs = fig.axes
### turn of the 2nd column y tick labels
##for ax in axs[1::2]:#[:,1].flat:
## for label in ax.get_yticklabels(): label.set_visible(False)
# turn off all but the bottom xtick labels
for ax in axs[:-2]:#[:-1,:].flat:
for label in ax.get_xticklabels(): label.set_visible(False)
# use a MaxNLocator on the first column y axis if you have a bunch of
# rows to avoid bunching; example below uses at most 3 ticks
import matplotlib.ticker as mticker
for ax in axs: #[::2]:#[:,1].flat:
ax.yaxis.set_major_locator( mticker.MaxNLocator(3 ))
plt.show()
| bsd-3-clause |
mojoboss/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
xiaohan2012/snpp | snpp/utils/matrix.py | 1 | 2902 | import numpy as np
from scipy.sparse import csr_matrix, issparse, dok_matrix, isspmatrix_dok
from sklearn.cross_validation import train_test_split
def indexed_entries(sparse_matrix):
"""
Args:
Return:
list of (row_id, col_id, value)
"""
if not isspmatrix_dok(sparse_matrix):
sparse_matrix = sparse_matrix.todok()
return ((i, j, sparse_matrix[i, j])
for i, j in zip(*sparse_matrix.nonzero()))
def zero(m):
"""return the zero-valued entries' indices
"""
return (m == 0).nonzero()
def difference_ratio(M1, M2):
assert M1.shape == M2.shape
_, idx = np.nonzero(M1 != M2)
return len(idx) / M1.size
def difference_ratio_sparse(M1, M2):
"""different ratio on nonzero elements
"""
assert issparse(M1)
assert issparse(M2)
assert M1.shape == M2.shape
assert M1.nnz == M2.nnz
s1 = set(indexed_entries(M1))
s2 = set(indexed_entries(M2))
return 1 - len(s1.intersection(s2)) / M1.nnz
def save_sparse_csr(filename, array):
np.savez(filename,
data=array.data,
indices=array.indices,
indptr=array.indptr,
shape=array.shape)
def load_sparse_csr(filename):
loader = np.load(filename)
return csr_matrix((loader['data'],
loader['indices'],
loader['indptr']),
shape=loader['shape'])
def _make_matrix(items, shape):
idx1, idx2, data = zip(*items)
return csr_matrix((data, (idx1, idx2)), shape=shape)
def split_train_dev_test(m, weights=[0.8, 0.1, 0.1]):
"""
Returns:
three matrices whose nnz satistify weights
"""
assert len(weights) == 3
assert abs(sum(weights) - 1.0) < 0.00001
assert issparse(m)
entries = indexed_entries(m)
remain_sum = np.sum(weights[1:])
train, dev_test = train_test_split(list(entries), train_size=weights[0], test_size=remain_sum)
dev, test = train_test_split(dev_test,
train_size=weights[1] / remain_sum,
test_size=weights[2] / remain_sum)
return (_make_matrix(train, m.shape),
_make_matrix(dev, m.shape),
_make_matrix(test, m.shape))
def split_train_test(m, weights=[0.9, 0.1]):
assert len(weights) == 2
assert abs(sum(weights) - 1.0) < 0.00001
assert issparse(m)
entries = indexed_entries(m)
train, test = train_test_split(list(entries), train_size=weights[0], test_size=weights[1])
return (_make_matrix(train, m.shape),
_make_matrix(test, m.shape))
def delete_csr_entries(m, idxs):
"""idxs: list of (row_id, col_id) to delete
not inplace
"""
m_new = dok_matrix(m.shape)
idxs = set(idxs)
for i, j in zip(*m.nonzero()):
if (i, j) not in idxs:
m_new[i, j] = m[i, j]
return m_new.tocsr()
| mit |
equialgo/scikit-learn | sklearn/base.py | 7 | 19499 | """Base classes for all estimators."""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import copy
import warnings
import numpy as np
from scipy import sparse
from .externals import six
from .utils.fixes import signature
from . import __version__
##############################################################################
def _first_and_last_element(arr):
"""Returns first and last element of numpy array or sparse matrix."""
if isinstance(arr, np.ndarray) or hasattr(arr, 'data'):
# numpy array or sparse matrix with .data attribute
data = arr.data if sparse.issparse(arr) else arr
return data.flat[0], data.flat[-1]
else:
# Sparse matrices without .data attribute. Only dok_matrix at
# the time of writing, in this case indexing is fast
return arr[0, 0], arr[-1, -1]
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator : estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe : boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if param1 is param2:
# this should always happen
continue
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
and (_first_and_last_element(param1) ==
_first_and_last_element(param2))
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and (_first_and_last_element(param1) ==
_first_and_last_element(param2))
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
# fall back on standard equality
equality_test = param1 == param2
if equality_test:
warnings.warn("Estimator %s modifies parameters in __init__."
" This behavior is deprecated as of 0.18 and "
"support for this behavior will be removed in 0.20."
% type(estimator).__name__, DeprecationWarning)
else:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params : dict
The dictionary to pretty print
offset : int
The offset in characters to add at the begin of each line.
printer : callable
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
def __getstate__(self):
if type(self).__module__.startswith('sklearn.'):
return dict(self.__dict__.items(), _sklearn_version=__version__)
else:
return dict(self.__dict__.items())
def __setstate__(self, state):
if type(self).__module__.startswith('sklearn.'):
pickle_version = state.pop("_sklearn_version", "pre-0.18")
if pickle_version != __version__:
warnings.warn(
"Trying to unpickle estimator {0} from version {1} when "
"using version {2}. This might lead to breaking code or "
"invalid results. Use at your own risk.".format(
self.__class__.__name__, pickle_version, __version__),
UserWarning)
self.__dict__.update(state)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight,
multioutput='variance_weighted')
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the i'th bicluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
class DensityMixin(object):
"""Mixin class for all density estimators in scikit-learn."""
_estimator_type = "DensityEstimator"
def score(self, X, y=None):
"""Returns the score of the model on the data X
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
score : float
"""
pass
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor."""
return getattr(estimator, "_estimator_type", None) == "regressor"
| bsd-3-clause |
nikitasingh981/scikit-learn | sklearn/linear_model/passive_aggressive.py | 14 | 12060 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int, RandomState instance or None, optional, default=None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
.. versionadded:: 0.17
parameter *class_weight* to automatically weight samples.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None, average=False):
super(PassiveAggressiveClassifier, self).__init__(
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
average=average,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int, RandomState instance or None, optional, default=None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False,
average=False):
super(PassiveAggressiveRegressor, self).__init__(
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start,
average=average)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause |
ashariati/rpg_svo | svo_analysis/src/svo_analysis/analyse_dataset.py | 17 | 1178 | # -*- coding: utf-8 -*-
import associate
import numpy as np
import matplotlib.pyplot as plt
import yaml
def loadDataset(filename):
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
D = np.array([[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"], dtype=np.float64)
return D
dataset_dir = '/home/cforster/Datasets/SlamBenchmark/px4_r2'
trajectory_data = dataset_dir+'/groundtruth.txt'
stepsize = 10
# load dataset
data = loadDataset(trajectory_data)
n = data.shape[0]
steps = np.arange(0,n,stepsize)
# compute trajectory length
last_pos = data[0,1:4]
trajectory_length = 0
for i in steps[1:]:
new_pos = data[i,1:4]
trajectory_length += np.linalg.norm(new_pos-last_pos)
last_pos = new_pos
print 'trajectory lenght = ' + str(trajectory_length) + 'm'
print 'height mean = ' + str(np.mean(data[:,3])) + 'm'
print 'height median = ' + str(np.median(data[:,3])) + 'm'
print 'height std = ' + str(np.std(data[:,3])) + 'm'
print 'duration = ' + str(data[-1,0]-data[0,0]) + 's'
print 'speed = ' + str(trajectory_length/(data[-1,0]-data[0,0])) + 'm/s'
| gpl-3.0 |
mkoledoye/mds_examples | core/mds.py | 2 | 22924 | '''
Multi-dimensional Scaling (MDS)
variant:
-- classical -> _smacof_single
-- anchored -> _smacof_with_anchors_single
-- MDS-RFID -> _smacof_with_distance_recovery_single
'''
from __future__ import division
import operator
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.metrics import euclidean_distances
from sklearn.utils import check_random_state, check_array, check_symmetric
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed
from sklearn.isotonic import IsotonicRegression
# modifications were made to the original code from sklearn.manifold.MDS
'''
New BSD License
Copyright (c) 2007-2016 The scikit-learn developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of the Scikit-learn Developers nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
'''
def _smacof_with_anchors_single(config, similarities, metric=True, n_components=2, init=None,
max_iter=300, verbose=0, eps=1e-3, random_state=None):
"""
Computes multidimensional scaling using SMACOF algorithm
Parameters
----------
config : Config object
configuration object for anchor-tag deployment parameters
similarities: symmetric ndarray, shape [n * n]
similarities between the points
metric: boolean, optional, default: True
compute metric or nonmetric SMACOF algorithm
n_components: int, optional, default: 2
number of dimension in which to immerse the similarities
overwritten if initial array is provided.
init: {None or ndarray}, optional
if None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array
max_iter: int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose: int, optional, default: 0
level of verbosity
eps: float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
X: ndarray (n_samples, n_components), float
coordinates of the n_samples points in a n_components-space
stress_: float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
n_iter : int
Number of iterations run
last_positions: ndarray [X1,...,Xn]
An array of computed Xs.
"""
NO_OF_TAGS, NO_OF_ANCHORS = config.no_of_tags, config.no_of_anchors
similarities = check_symmetric(similarities, raise_exception=True)
n_samples = similarities.shape[0]
random_state = check_random_state(random_state)
sim_flat = ((1 - np.tri(n_samples)) * similarities).ravel()
sim_flat_w = sim_flat[sim_flat != 0]
if init is None:
# Randomly choose initial configuration
X = random_state.rand(n_samples * n_components)
X = X.reshape((n_samples, n_components))
# uncomment the following if weight matrix W is not hollow
#X[:-2] = Xa
else:
# overrides the parameter p
n_components = init.shape[1]
if n_samples != init.shape[0]:
raise ValueError("init matrix should be of shape (%d, %d)" %
(n_samples, n_components))
X = init
old_stress = None
ir = IsotonicRegression()
# setup weight matrix
weights = np.ones((n_samples, n_samples))
if getattr(config, 'missingdata', None):
weights[-NO_OF_TAGS:, -NO_OF_TAGS:] = 0
diag = np.arange(n_samples)
weights[diag, diag] = 0
last_n_configs = []
Xa = config.anchors
for it in range(max_iter):
# Compute distance and monotonic regression
dis = euclidean_distances(X)
if metric:
disparities = similarities
else:
dis_flat = dis.ravel()
# similarities with 0 are considered as missing values
dis_flat_w = dis_flat[sim_flat != 0]
# Compute the disparities using a monotonic regression
disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
disparities = dis_flat.copy()
disparities[sim_flat != 0] = disparities_flat
disparities = disparities.reshape((n_samples, n_samples))
disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) /
(disparities ** 2).sum())
# Compute stress
stress = (weights.ravel()*(dis.ravel() - disparities.ravel()) ** 2).sum() / 2
#stress = ((dis[:-NO_OF_TAGS, -NO_OF_TAGS:].ravel() - disparities[:-NO_OF_TAGS, -NO_OF_TAGS:].ravel()) ** 2).sum()
# Update X using the Guttman transform
dis[dis == 0] = 1e5
ratio = weights*disparities / dis
B = - ratio
B[diag, diag] = 0
B[diag, diag] = -B.sum(axis=1)
# Apply update to only tag configuration since anchor config is already known
V = - weights
V[diag, diag] += weights.sum(axis=1)
# V_inv = np.linalg.pinv(V)
V12 = V[-NO_OF_TAGS:, :-NO_OF_TAGS]
B11 = B[-NO_OF_TAGS:, -NO_OF_TAGS:]
Zu = X[-NO_OF_TAGS:]
B12 = B[-NO_OF_TAGS:, :-NO_OF_TAGS]
V11_inv = np.linalg.inv(V[-NO_OF_TAGS:, -NO_OF_TAGS:])
Xu = V11_inv.dot(B11.dot(Zu) + (B12 - V12).dot(Xa))
# merge known anchors config with new tags config
X = np.concatenate((Xa, Xu))
last_n_configs.append(X)
#X = (1/n_samples)*B.dot(X)
#dis = np.sqrt((X ** 2).sum(axis=1)).sum()
dis = (weights*dis**2).sum() / 2
if verbose >= 2:
print('it: %d, stress %s' % (it, stress))
if old_stress is not None:
if(old_stress - stress / dis) < eps:
if verbose:
print('breaking at iteration %d with stress %s' % (it,
stress))
break
old_stress = stress / dis
return X, stress, it + 1, np.array(last_n_configs)
def _smacof_single(config, similarities, metric=True, n_components=2, init=None,
max_iter=300, verbose=0, eps=1e-3, random_state=None, estimated_dist_weights=0):
"""
Computes multidimensional scaling using SMACOF algorithm
Parameters
----------
config : Config object
configuration object for anchor-tag deployment parameters
similarities: symmetric ndarray, shape [n * n]
similarities between the points
metric: boolean, optional, default: True
compute metric or nonmetric SMACOF algorithm
n_components: int, optional, default: 2
number of dimension in which to immerse the similarities
overwritten if initial array is provided.
init: {None or ndarray}, optional
if None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array
max_iter: int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose: int, optional, default: 0
level of verbosity
eps: float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
X: ndarray (n_samples, n_components), float
coordinates of the n_samples points in a n_components-space
stress_: float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
n_iter : int
Number of iterations run
last_positions: ndarray [X1,...,Xn]
An array of computed Xs.
"""
NO_OF_TAGS, NO_OF_ANCHORS = config.no_of_tags, config.no_of_anchors
similarities = check_symmetric(similarities, raise_exception=True)
n_samples = similarities.shape[0]
random_state = check_random_state(random_state)
sim_flat = ((1 - np.tri(n_samples)) * similarities).ravel()
sim_flat_w = sim_flat[sim_flat != 0]
if init is None:
# Randomly choose initial configuration
X = random_state.rand(n_samples * n_components)
X = X.reshape((n_samples, n_components))
else:
# overrides the parameter p
n_components = init.shape[1]
if n_samples != init.shape[0]:
raise ValueError("init matrix should be of shape (%d, %d)" %
(n_samples, n_components))
X = init
old_stress = None
ir = IsotonicRegression()
# setup weight matrix
weights = np.ones((n_samples, n_samples))
weights[-NO_OF_TAGS:, -NO_OF_TAGS:] = estimated_dist_weights
diag = np.arange(n_samples)
weights[diag, diag] = 0
last_n_configs = []
for it in range(max_iter):
# Compute distance and monotonic regression
dis = euclidean_distances(X)
if metric:
disparities = similarities
else:
dis_flat = dis.ravel()
# similarities with 0 are considered as missing values
dis_flat_w = dis_flat[sim_flat != 0]
# Compute the disparities using a monotonic regression
disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
disparities = dis_flat.copy()
disparities[sim_flat != 0] = disparities_flat
disparities = disparities.reshape((n_samples, n_samples))
disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) /
(disparities ** 2).sum())
# Compute stress
stress = (weights.ravel()*(dis.ravel() - disparities.ravel()) ** 2).sum() / 2
#print(((dis[-2:, -2:].ravel() - disparities[-2:, -2:].ravel()) ** 2).sum())
# Update X using the Guttman transform
dis[dis == 0] = 1e5
ratio = disparities / dis
B = - ratio
B[diag, diag] += ratio.sum(axis=1)
# Apply update to only tag configuration since anchor config is already known
V = - weights
V[diag, diag] += weights.sum(axis=1)
V_inv = np.linalg.pinv(V)
X = V_inv.dot(np.dot(B, X))
last_n_configs.append(X)
stress = (weights.ravel()*(dis.ravel() - disparities.ravel()) ** 2).sum() / 2
#dis = np.sqrt((X ** 2).sum(axis=1)).sum()
dis[np.arange(n_samples), np.arange(n_samples)] = 0
dis = (weights*dis**2).sum() / 2
#dis = np.sqrt((X ** 2).sum(axis=1)).sum()
if verbose >= 2:
print('it: %d, stress %s' % (it, stress))
if old_stress is not None:
if(old_stress - stress / dis) < eps:
if verbose:
print('breaking at iteration %d with stress %s' % (it,
stress))
break
old_stress = stress / dis
return X, stress, it + 1, np.array(last_n_configs)
def _smacof_with_distance_recovery_single(config, similarities, *args, **kwargs):
recover_tag_distances(config, similarities)
return _smacof_single(config, similarities, estimated_dist_weights=0.7, *args, **kwargs)
def _classical_mds_with_distance_recovery_single(config, prox_arr, *args, **kwargs):
mds_RFID.recover_tag_distances(prox_arr)
# Apply double centering
sz = prox_arr.shape[0]
cent_arr = np.eye(sz) - np.ones(sz)/sz
B = -cent_arr.dot(prox_arr**2).dot(cent_arr)/2
# Determine the m largest eigenvalues and corresponding eigenvectors
eig_vals, eig_vecs = np.linalg.eig(B)
eig_vals_vecs = zip(*sorted(zip(eig_vals, eig_vecs.T), key=operator.itemgetter(0), reverse=True)[:M])
eig_vals, eig_vecs = map(np.array, eig_vals_vecs)
# configuration X of n points/coordinates that optimise the cost function
coords = eig_vecs.T.dot((np.eye(M)*eig_vals)**0.5)
return coords, 0, 0, np.array([])
def recover_tag_distances(config, prox_arr):
NO_OF_TAGS, NO_OF_ANCHORS = config.no_of_tags, config.no_of_anchors
for j in range(NO_OF_ANCHORS, NO_OF_TAGS+NO_OF_ANCHORS):
for i in range(j, NO_OF_TAGS+NO_OF_ANCHORS):
if i == j:
continue
prox_arr[i, j] = prox_arr[j, i] = np.mean(np.absolute([prox_arr[i,a]-prox_arr[j,a] for a in range(NO_OF_ANCHORS)]))
VARIANTS = {'_smacof_with_anchors_single': _smacof_with_anchors_single,
'_smacof_single': _smacof_single,
'_smacof_with_distance_recovery_single': _smacof_with_distance_recovery_single}
def smacof(config, variant, similarities, metric=True, n_components=2, init=None, n_init=8,
n_jobs=1, max_iter=300, verbose=0, eps=1e-3, random_state=None,
return_n_iter=False):
"""
Computes multidimensional scaling using SMACOF (Scaling by Majorizing a
Complicated Function) algorithm
The SMACOF algorithm is a multidimensional scaling algorithm: it minimizes
a objective function, the *stress*, using a majorization technique. The
Stress Majorization, also known as the Guttman Transform, guarantees a
monotone convergence of Stress, and is more powerful than traditional
techniques such as gradient descent.
The SMACOF algorithm for metric MDS can summarized by the following steps:
1. Set an initial start configuration, randomly or not.
2. Compute the stress
3. Compute the Guttman Transform
4. Iterate 2 and 3 until convergence.
The nonmetric algorithm adds a monotonic regression steps before computing
the stress.
Parameters
----------
config : Config object
configuration object for anchor-tag deployment parameters
variant : str
variant of MDS algorithm to be used for computing configuration
similarities : symmetric ndarray, shape (n_samples, n_samples)
similarities between the points
metric : boolean, optional, default: True
compute metric or nonmetric SMACOF algorithm
n_components : int, optional, default: 2
number of dimension in which to immerse the similarities
overridden if initial array is provided.
init : {None or ndarray of shape (n_samples, n_components)}, optional
if None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array
n_init : int, optional, default: 8
Number of time the smacof algorithm will be run with different
initialisation. The final results will be the best output of the
n_init consecutive runs in terms of stress.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose : int, optional, default: 0
level of verbosity
eps : float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
X : ndarray (n_samples,n_components)
Coordinates of the n_samples points in a n_components-space
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
n_iter : int
The number of iterations corresponding to the best stress.
Returned only if `return_n_iter` is set to True
last_positions: ndarray [X1,...,Xn]
An array of computed Xs from the selected mds/smacof variant,
used displaying trails showing convergence in animation.
Notes
-----
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
similarities = check_array(similarities)
random_state = check_random_state(random_state)
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
if not n_init == 1:
warnings.warn(
'Explicit initial positions passed: '
'performing only one init of the MDS instead of %d'
% n_init)
n_init = 1
best_pos, best_stress = None, None
smacof_variant = VARIANTS[variant]
if n_jobs == 1:
for it in range(n_init):
pos, stress, n_iter_, last_n_pos = smacof_variant(
config, similarities, metric=metric,
n_components=n_components, init=init,
max_iter=max_iter, verbose=verbose,
eps=eps, random_state=random_state)
if best_stress is None or stress < best_stress:
best_stress = stress
best_pos = pos.copy()
best_iter = n_iter_
best_last_n_pos = last_n_pos
else:
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))(
delayed(smacof_variant)(
config, similarities, metric=metric, n_components=n_components,
init=init, max_iter=max_iter, verbose=verbose, eps=eps,
random_state=seed)
for seed in seeds)
positions, stress, n_iters, last_n_pos = zip(*results)
best = np.argmin(stress)
best_stress = stress[best]
best_pos = positions[best]
best_iter = n_iters[best]
best_last_n_pos = last_n_pos[best]
if return_n_iter:
return best_pos, best_stress, best_iter, best_last_n_pos
else:
return best_pos, best_stress, best_last_n_pos
class MDS(BaseEstimator):
"""Multidimensional scaling
Read more in the :ref:`User Guide <multidimensional_scaling>`.
Parameters
----------
config : Config object
configuration object for anchor-tag deployment parameters
algorithm : str
MDS algorithm to be used for computing configuration
metric : boolean, optional, default: True
compute metric or nonmetric SMACOF (Scaling by Majorizing a
Complicated Function) algorithm
n_components : int, optional, default: 2
number of dimension in which to immerse the similarities
overridden if initial array is provided.
n_init : int, optional, default: 4
Number of time the smacof algorithm will be run with different
initialisation. The final results will be the best output of the
n_init consecutive runs in terms of stress.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose : int, optional, default: 0
level of verbosity
eps : float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
dissimilarity : string
Which dissimilarity measure to use.
Supported are 'euclidean' and 'precomputed'.
Attributes
----------
embedding_ : array-like, shape [n_components, n_samples]
Stores the position of the dataset in the embedding space
stress_ : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
References
----------
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
def __init__(self, config, algorithm, n_components=2, metric=True, n_init=4,
max_iter=300, verbose=0, eps=1e-3, n_jobs=1,
random_state=None, dissimilarity="euclidean"):
self.n_components = n_components
self.dissimilarity = dissimilarity
self.metric = metric
self.n_init = n_init
self.max_iter = max_iter
self.eps = eps
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.variant = algorithm
self.config = config
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, init=None):
"""
Computes the position of the points in the embedding space
Parameters
----------
X : array, shape=[n_samples, n_features], or [n_samples, n_samples] \
if dissimilarity='precomputed'
Input data.
init : {None or ndarray, shape (n_samples,)}, optional
If None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array.
"""
self.fit_transform(X, init=init)
return self
def fit_transform(self, X, y=None, init=None):
"""
Fit the data from X, and returns the embedded coordinates
Parameters
----------
X : array, shape=[n_samples, n_features], or [n_samples, n_samples] \
if dissimilarity='precomputed'
Input data.
init : {None or ndarray, shape (n_samples,)}, optional
If None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array.
"""
X = check_array(X)
if X.shape[0] == X.shape[1] and self.dissimilarity != "precomputed":
warnings.warn("The MDS API has changed. ``fit`` now constructs an"
" dissimilarity matrix from data. To use a custom "
"dissimilarity matrix, set "
"``dissimilarity='precomputed'``.")
if self.dissimilarity == "precomputed":
self.dissimilarity_matrix_ = X
elif self.dissimilarity == "euclidean":
self.dissimilarity_matrix_ = euclidean_distances(X)
else:
raise ValueError("Proximity must be 'precomputed' or 'euclidean'."
" Got %s instead" % str(self.dissimilarity))
self.embedding_, self.stress_, self.n_iter_, self.last_n_embeddings = smacof(self.config, self.variant,
self.dissimilarity_matrix_, metric=self.metric,
n_components=self.n_components, init=init, n_init=self.n_init,
n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose,
eps=self.eps, random_state=self.random_state,
return_n_iter=True)
return self.embedding_, self.last_n_embeddings | mit |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/scipy/ndimage/filters.py | 24 | 42327 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import math
import numpy
from . import _ni_support
from . import _nd_image
from scipy.misc import doccer
from scipy._lib._version import NumpyVersion
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
_input_doc = \
"""input : array_like
Input array to filter."""
_axis_doc = \
"""axis : int, optional
The axis of `input` along which to calculate. Default is -1."""
_output_doc = \
"""output : array, optional
The `output` parameter passes an array in which to store the
filter output."""
_size_foot_doc = \
"""size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
"""
_mode_doc = \
"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'"""
_cval_doc = \
"""cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0"""
_origin_doc = \
"""origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0.0."""
_extra_arguments_doc = \
"""extra_arguments : sequence, optional
Sequence of extra positional arguments to pass to passed function"""
_extra_keywords_doc = \
"""extra_keywords : dict, optional
dict of extra keyword arguments to pass to passed function"""
docdict = {
'input': _input_doc,
'axis': _axis_doc,
'output': _output_doc,
'size_foot': _size_foot_doc,
'mode': _mode_doc,
'cval': _cval_doc,
'origin': _origin_doc,
'extra_arguments': _extra_arguments_doc,
'extra_keywords': _extra_keywords_doc,
}
docfiller = doccer.filldoc(docdict)
@docfiller
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = _ni_support._check_axis(axis, input.ndim)
if (len(weights) // 2 + origin < 0) or (len(weights) // 2 +
origin > len(weights)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return return_value
@docfiller
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a one-dimensional convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
One-dimensional sequence of numbers.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
convolve1d : ndarray
Convolved array with same shape as input
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
return correlate1d(input, weights, axis, output, mode, cval, origin)
@docfiller
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : {0, 1, 2, 3}, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. An order of 1, 2, or 3 corresponds to convolution with
the first, second or third derivatives of a Gaussian. Higher
order derivatives are not implemented
%(output)s
%(mode)s
%(cval)s
truncate : float, optional
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter1d : ndarray
"""
if order not in range(4):
raise ValueError('Order outside 0..3 not implemented')
sd = float(sigma)
# make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd = sd * sd
# calculate the kernel:
for ii in range(1, lw + 1):
tmp = math.exp(-0.5 * float(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
# implement first, second and third order derivatives:
if order == 1: # first derivative
weights[lw] = 0.0
for ii in range(1, lw + 1):
x = float(ii)
tmp = -x / sd * weights[lw + ii]
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
elif order == 2: # second derivative
weights[lw] *= -1.0 / sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (x * x / sd - 1.0) * weights[lw + ii] / sd
weights[lw + ii] = tmp
weights[lw - ii] = tmp
elif order == 3: # third derivative
weights[lw] = 0.0
sd2 = sd * sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (3.0 - x * x / sd) * x * weights[lw + ii] / sd2
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
return correlate1d(input, weights, axis, output, mode, cval, 0)
@docfiller
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : {0, 1, 2, 3} or sequence from same set, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. An order of 1, 2, or 3
corresponds to convolution with the first, second or third
derivatives of a Gaussian. Higher order derivatives are not
implemented
%(output)s
%(mode)s
%(cval)s
truncate : float
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> from scipy.ndimage import gaussian_filter
>>> a = np.arange(50, step=2).reshape((5,5))
>>> a
array([[ 0, 2, 4, 6, 8],
[10, 12, 14, 16, 18],
[20, 22, 24, 26, 28],
[30, 32, 34, 36, 38],
[40, 42, 44, 46, 48]])
>>> gaussian_filter(a, sigma=1)
array([[ 4, 6, 8, 9, 11],
[10, 12, 14, 15, 17],
[20, 22, 24, 25, 27],
[29, 31, 33, 34, 36],
[35, 37, 39, 40, 42]])
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
if not set(orders).issubset(set(range(4))):
raise ValueError('Order outside 0..4 not implemented')
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval, truncate)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> result = ndimage.prewitt(ascent)
>>> plt.gray() # show the filtered result in grayscale
>>> plt.imshow(result)
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, mode, cval, 0,)
return return_value
@docfiller
def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> result = ndimage.sobel(ascent)
>>> plt.gray() # show the filtered result in grayscale
>>> plt.imshow(result)
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, mode, cval, 0)
return return_value
@docfiller
def generic_laplace(input, derivative2, output=None, mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords = None):
"""N-dimensional Laplace filter using a provided second derivative function
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
derivative2(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return return_value
@docfiller
def laplace(input, output=None, mode="reflect", cval=0.0):
"""N-dimensional Laplace filter based on approximate second derivatives.
Parameters
----------
%(input)s
%(output)s
%(mode)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> result = ndimage.laplace(ascent)
>>> plt.gray() # show the filtered result in grayscale
>>> plt.imshow(result)
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@docfiller
def gaussian_laplace(input, sigma, output=None, mode="reflect",
cval=0.0, **kwargs):
"""Multidimensional Laplace filter using gaussian second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> result = ndimage.gaussian_laplace(ascent, sigma=1)
>>> ax1.imshow(result)
>>> result = ndimage.gaussian_laplace(ascent, sigma=3)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval,
**kwargs)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments=(sigma,),
extra_keywords=kwargs)
@docfiller
def generic_gradient_magnitude(input, derivative, output=None,
mode="reflect", cval=0.0,
extra_arguments=(), extra_keywords = None):
"""Gradient magnitude using a provided gradient function.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
`derivative` can assume that `input` and `output` are ndarrays.
Note that the output from `derivative` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
derivative(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
numpy.sqrt(output, output, casting='unsafe')
else:
output[...] = input[...]
return return_value
@docfiller
def gaussian_gradient_magnitude(input, sigma, output=None,
mode="reflect", cval=0.0, **kwargs):
"""Multidimensional gradient magnitude using Gaussian derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode,
cval, **kwargs)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments=(sigma,),
extra_keywords=kwargs)
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw):
raise ValueError('invalid origin')
if not weights.flags.contiguous:
weights = weights.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
return return_value
@docfiller
def correlate(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multi-dimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
input : array-like
input array to filter
weights : ndarray
array of weights, same number of dimensions as input
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
See Also
--------
convolve : Convolve an image with a kernel.
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@docfiller
def convolve(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
input : array_like
Input array to filter.
weights : array_like
Array of weights, same number of dimensions as input
output : ndarray, optional
The `output` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
the `mode` parameter determines how the array borders are
handled. For 'constant' mode, values beyond borders are set to be
`cval`. Default is 'reflect'.
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
origin : array_like, optional
The `origin` parameter controls the placement of the filter,
relative to the centre of the current element of the input.
Default of 0 is equivalent to ``(0,)*input.ndim``.
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
W is the `weights` kernel,
j is the n-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e. where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`.
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@docfiller
def uniform_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : int
length of uniform filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
return return_value
@docfiller
def uniform_filter(input, size=3, output=None, mode="reflect",
cval=0.0, origin=0):
"""Multi-dimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints, optional
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def minimum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
This function implements the MINLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return return_value
@docfiller
def maximum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a one-dimensional maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
Length along which to calculate the 1-D maximum.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
maximum1d : ndarray, None
Maximum-filtered array with same shape as input.
None if `output` is not None
Notes
-----
This function implements the MAXLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return return_value
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable = True
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
if numpy.alltrue(numpy.ravel(footprint), axis=0):
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
return return_value
@docfiller
def minimum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@docfiller
def maximum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@docfiller
def _rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0, operation='rank'):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origins)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origins)
else:
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
return return_value
@docfiller
def rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional rank filter.
Parameters
----------
%(input)s
rank : int
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@docfiller
def median_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Calculates a multidimensional median filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
Returns
-------
median_filter : ndarray
Return of same shape as `input`.
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@docfiller
def percentile_filter(input, percentile, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""Calculates a multi-dimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@docfiller
def generic_filter1d(input, function, filter_size, axis=-1,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords = None):
"""Calculate a one-dimensional filter along the given axis.
`generic_filter1d` iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : callable
Function to apply along given axis.
filter_size : scalar
Length of the filter.
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = _ni_support._check_axis(axis, input.ndim)
if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
filter_size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments, extra_keywords)
return return_value
@docfiller
def generic_filter(input, function, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords = None):
"""Calculates a multi-dimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1D array of double values.
Parameters
----------
%(input)s
function : callable
Function to apply at each element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return return_value
| mit |
rgommers/statsmodels | statsmodels/examples/tsa/arma_plots.py | 33 | 2516 | '''Plot acf and pacf for some ARMA(1,1)
'''
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.tsa.arima_process as tsp
from statsmodels.sandbox.tsa.fftarma import ArmaFft as FftArmaProcess
import statsmodels.tsa.stattools as tss
from statsmodels.graphics.tsaplots import plotacf
np.set_printoptions(precision=2)
arcoefs = [0.9, 0., -0.5] #[0.9, 0.5, 0.1, 0., -0.5]
macoefs = [0.9, 0., -0.5] #[0.9, 0.5, 0.1, 0., -0.5]
nsample = 1000
nburnin = 1000
sig = 1
fig = plt.figure(figsize=(8, 13))
fig.suptitle('ARMA: Autocorrelation (left) and Partial Autocorrelation (right)')
subplotcount = 1
nrows = 4
for arcoef in arcoefs[:-1]:
for macoef in macoefs[:-1]:
ar = np.r_[1., -arcoef]
ma = np.r_[1., macoef]
#y = tsp.arma_generate_sample(ar,ma,nsample, sig, burnin)
#armaprocess = FftArmaProcess(ar, ma, nsample) #TODO: make n optional
#armaprocess.plot4()
armaprocess = tsp.ArmaProcess(ar, ma)
acf = armaprocess.acf(20)[:20]
pacf = armaprocess.pacf(20)[:20]
ax = fig.add_subplot(nrows, 2, subplotcount)
plotacf(acf, ax=ax)
## ax.set_title('Autocorrelation \nar=%s, ma=%rs' % (ar, ma),
## size='xx-small')
ax.text(0.7, 0.6, 'ar =%s \nma=%s' % (ar, ma),
transform=ax.transAxes,
horizontalalignment='left', #'right',
size='xx-small')
ax.set_xlim(-1,20)
subplotcount +=1
ax = fig.add_subplot(nrows, 2, subplotcount)
plotacf(pacf, ax=ax)
## ax.set_title('Partial Autocorrelation \nar=%s, ma=%rs' % (ar, ma),
## size='xx-small')
ax.text(0.7, 0.6, 'ar =%s \nma=%s' % (ar, ma),
transform=ax.transAxes,
horizontalalignment='left', #'right',
size='xx-small')
ax.set_xlim(-1,20)
subplotcount +=1
axs = fig.axes
### turn of the 2nd column y tick labels
##for ax in axs[1::2]:#[:,1].flat:
## for label in ax.get_yticklabels(): label.set_visible(False)
# turn off all but the bottom xtick labels
for ax in axs[:-2]:#[:-1,:].flat:
for label in ax.get_xticklabels(): label.set_visible(False)
# use a MaxNLocator on the first column y axis if you have a bunch of
# rows to avoid bunching; example below uses at most 3 ticks
import matplotlib.ticker as mticker
for ax in axs: #[::2]:#[:,1].flat:
ax.yaxis.set_major_locator( mticker.MaxNLocator(3 ))
plt.show()
| bsd-3-clause |
asmitagupta/crowdsource-platform | fixtures/createJson.py | 6 | 2462 | __author__ = 'Megha'
# Script to transfer csv containing data about various models to json
# Input csv file constituting of the model data
# Output json file representing the csv data as json object
# Assumes model name to be first line
# Field names of the model on the second line
# Data seperated by __DELIM__
# Example:
# L01 ModelName: registrationmodel
# L02 FieldNames: user,activation_key,created_timestamp,last_updated
# L03 Data: 1,qwer,2015-05-01T00:17:40.085Z,2015-05-01T00:17:40.085Z
# L04 Data: 2,assd,2015-05-01T00:17:40.085Z,2015-05-01T00:17:40.085Z
import numpy as np
import pandas as pd
import json as json
__MODULE_NAME__ = 7 #Number of lines after which Model Name
__INPUT_FILE__ = 'meghaWorkerData.csv'
__OUTPUT_FILE__ = 'meghaWorkerData.json'
__NEWLINE__ = '\n'
__KEY1__ = 0
__KEY2__ = 0
__DELIM__ = ','
__APPEND__ = 'crowdsourcing.'
__KEY_MODEL__ = 'model'
__KEY_FIELDS__ = 'fields'
__KEY_PK__ = 'pk'
def create_dict(input_dict, module, data_collection):
for key, value in input_dict.items():
data_dict = {}
data_dict[__KEY_FIELDS__] = value
data_dict[__KEY_PK__] = key
data_dict[__KEY_MODEL__] = __APPEND__ + module
data_collection.append(data_dict)
return data_collection
def create_data_json(__FILE__):
in_fp = open(__INPUT_FILE__, 'rb')
file_lines = in_fp.readlines()
in_fp.close()
data_collection = []
for line_no in range(0, len(file_lines)):
if line_no % __MODULE_NAME__ == 0:
columns = file_lines[line_no + 1].strip(__NEWLINE__).split(__DELIM__)
instance1 = file_lines[line_no + 2].strip(__NEWLINE__).split(__DELIM__)
instance2 = file_lines[line_no + 3].strip(__NEWLINE__).split(__DELIM__)
instance3 = file_lines[line_no + 4].strip(__NEWLINE__).split(__DELIM__)
instance4 = file_lines[line_no + 5].strip(__NEWLINE__).split(__DELIM__)
instance5 = file_lines[line_no + 6].strip(__NEWLINE__).split(__DELIM__)
data = np.array([instance1,instance2,instance3,instance4,instance5])
df = pd.DataFrame(data, columns = columns)
create_dict(df.transpose().to_dict(), file_lines[line_no].strip(__NEWLINE__), data_collection)
del(df)
print data_collection
out_fp = open(__OUTPUT_FILE__, 'wb')
out_fp.write(json.dumps(data_collection, indent = 2))
out_fp.close()
if __name__ == '__main__':
create_data_json (__INPUT_FILE__) | mit |
kaushiksk/DeepNLP | Training-Sessions/Session II - Word Embeddings/word2vec/run.py | 2 | 2166 | import random
import numpy as np
from cs224d.data_utils import *
import matplotlib.pyplot as plt
from word2vec import *
from sgd import *
# Reset the random seed to make sure that everyone gets the same results
random.seed(314)
dataset = StanfordSentiment()
tokens = dataset.tokens()
nWords = len(tokens)
# We are going to train 10-dimensional vectors for this assignment
dimVectors = 10
# Context size
C = 5
# Reset the random seed to make sure that everyone gets the same results
random.seed(31415)
np.random.seed(9265)
wordVectors = np.concatenate(((np.random.rand(nWords, dimVectors) - .5) / \
dimVectors, np.zeros((nWords, dimVectors))), axis=0)
wordVectors0 = sgd(
lambda vec: word2vec_sgd_wrapper(skipgram, tokens, vec, dataset, C,
negSamplingCostAndGradient),
wordVectors, 0.3, 40000, None, True, PRINT_EVERY=10)
# It has already run for 40000 iterations. Theweights are stored in
# saved_params_40000.npy
# Run it for more iterations. It will resume from the 40000th.
# Change 40000 above to a different number
print "sanity check: cost at convergence should be around or below 10"
# sum the input and output word vectors
wordVectors = (wordVectors0[:nWords,:] + wordVectors0[nWords:,:])
# Visualize the word vectors you trained
_, wordVectors0, _ = load_saved_params()
wordVectors = (wordVectors0[:nWords,:] + wordVectors0[nWords:,:])
visualizeWords = ["the", "a", "an", ",", ".", "?", "!", "``", "''", "--",
"good", "great", "cool", "brilliant", "wonderful", "well", "amazing",
"worth", "sweet", "enjoyable", "boring", "bad", "waste", "dumb",
"annoying"]
visualizeIdx = [tokens[word] for word in visualizeWords]
visualizeVecs = wordVectors[visualizeIdx, :]
temp = (visualizeVecs - np.mean(visualizeVecs, axis=0))
covariance = 1.0 / len(visualizeIdx) * temp.T.dot(temp)
U,S,V = np.linalg.svd(covariance)
coord = temp.dot(U[:,0:2])
for i in xrange(len(visualizeWords)):
plt.text(coord[i,0], coord[i,1], visualizeWords[i],
bbox=dict(facecolor='green', alpha=0.1))
plt.xlim((np.min(coord[:,0]), np.max(coord[:,0])))
plt.ylim((np.min(coord[:,1]), np.max(coord[:,1])))
plt.savefig('q3_word_vectors.png')
plt.show() | mit |
kaslusimoes/SummerSchool2016 | simulation-multiple-variations-random.py | 1 | 4619 | #! /bin/env python2
# coding: utf-8
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import random as rd
from pickle import dump
class Data:
def __init__(self):
self.m_list1 = []
self.m_list2 = []
N = 100
M = 100
MAX = N + M + 1
MAX_EDGE = 380
MAX_DEG = 450
ITERATIONS = 50000
S1 = 0.
T1 = 1.
S2 = 0.
T2 = 1.
beta = 0.5
NUMGRAPH = 10
NSIM = 10
NAME = "lucasmedeiros"
# initial fraction of cooperators
p1, p2 = .5, .5
# number of cooperators
cc1, cc2 = 0, 0
# fraction of cooperators
r1, r2 = np.zeros(ITERATIONS + 1, dtype=np.float), np.zeros(ITERATIONS + 1, dtype=np.float)
payoff = np.array(
[
[1, S1],
[T1, 0]
]
, dtype=np.float, ndmin=2)
payoff2 = np.array(
[
[1, S2],
[T2, 0]
]
, dtype=np.float, ndmin=2)
def interaction(x, y):
if x < N:
return payoff[g.node[x]['strategy']][g.node[y]['strategy']]
else:
return payoff2[g.node[x]['strategy']][g.node[y]['strategy']]
def change_prob(x, y):
return 1. / (1 + np.exp(-beta * (y - x)))
def complete():
return nx.complete_bipartite_graph(N, M)
def random():
g = nx.Graph()
g.add_nodes_from(np.arange(0, N + M, 1, dtype=np.int))
while g.number_of_edges() < MAX_EDGE:
a, b = rd.randint(0, N - 1), rd.randint(N, N + M - 1)
if b not in g[a]:
g.add_edge(a, b)
return g
def set_initial_strategy(g):
global cc1, cc2
coop = range(0, int(p1 * N), 1) + range(N, int(p2 * M) + N, 1)
cc1 = int(p1 * N)
defect = set(range(0, N + M, 1)) - set(coop)
cc2 = int(p2 * M)
coop = dict(zip(coop, len(coop) * [0]))
defect = dict(zip(defect, len(defect) * [1]))
nx.set_node_attributes(g, 'strategy', coop)
nx.set_node_attributes(g, 'strategy', defect)
def fitness(x):
ret = 0
for i in g.neighbors(x):
ret += interaction(x, i)
return ret
def simulate():
global cc1, cc2
it = 0
while it < ITERATIONS:
it += 1
if it % 2:
a = rd.randint(0, N - 1)
else:
a = rd.randint(N, N + M - 1)
if len(g.neighbors(a)) == 0:
it -= 1
continue
b = g.neighbors(a)[rd.randint(0, len(g.neighbors(a)) - 1)]
b = g.neighbors(b)[rd.randint(0, len(g.neighbors(b)) - 1)]
if a == b:
it -= 1
continue
assert (a < N and b < N) or (a >= N and b >= N)
if g.node[a]['strategy'] != g.node[b]['strategy']:
fa, fb = fitness(a), fitness(b)
l = np.random.random()
p = change_prob(fa, fb)
if l <= p:
if a < N:
if g.node[a]['strategy'] == 0:
cc1 -= 1
else:
cc1 += 1
else:
if g.node[a]['strategy'] == 0:
cc2 -= 1
else:
cc2 += 1
nx.set_node_attributes(g, 'strategy', { a:g.node[b]['strategy'] })
r1[it] = float(cc1) / N
r2[it] = float(cc2) / M
nbins = 10
T1range = np.linspace(1,2,3)
S1range = np.linspace(-1,0,3)
T2range = np.linspace(1,2,nbins)
S2range = np.linspace(-1,0,nbins)
mag1 = np.zeros((nbins, nbins), dtype=np.float)
mag2 = np.zeros((nbins, nbins), dtype=np.float)
for G in xrange(NUMGRAPH):
g = random()
data = Data()
for S1 in S1range:
for T1 in T1range:
global payoff, payoff2
mag1 = np.zeros((nbins, nbins), dtype=np.float)
mag2 = np.zeros((nbins, nbins), dtype=np.float)
i = 0
payoff = np.array([
[1, S1],
[T1, 0]], dtype=np.float, ndmin=2)
for S2 in S2range:
j = 0
for T2 in T2range:
payoff2 = np.array([
[1, S2],
[T2, 0]], dtype=np.float, ndmin=2)
for SS in xrange(NSIM):
set_initial_strategy(g)
simulate()
mag1[i][j] = np.mean(r1[-1000:])
mag2[i][j] = np.mean(r2[-1000:])
j += 1
i += 1
mag1 /= NSIM
mag2 /= NSIM
data.m_list1.append((S1, T1, S2, T2, mag1))
data.m_list2.append((S1, T1, S2, T2, mag2))
f = open('random graph {1} {0}.grph'.format(G, NAME), 'w')
dump(data,f,2)
f.close()
print("Finished Random Graph {0}".format(G))
| apache-2.0 |
mhogg/BMDanalyse | BMDanalyse/MatplotlibWidget.py | 1 | 1608 | # -*- coding: utf-8 -*-
# Copyright (C) 2016 Michael Hogg
# This file is part of BMDanalyse - See LICENSE.txt for information on usage and redistribution
# Fixes error in pyqtgraph when importing NavigationToolbar2QTAgg. This class is not available
# in matplotlib any longer (since version?)
from pyqtgraph.Qt import QtGui, QtCore, USE_PYSIDE
import matplotlib
if USE_PYSIDE:
matplotlib.rcParams['backend.qt4']='PySide'
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
#from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
class MatplotlibWidget(QtGui.QWidget):
"""
Implements a Matplotlib figure inside a QWidget.
Use getFigure() and redraw() to interact with matplotlib.
Example::
mw = MatplotlibWidget()
subplot = mw.getFigure().add_subplot(111)
subplot.plot(x,y)
mw.draw()
"""
def __init__(self, size=(5.0, 4.0), dpi=100):
QtGui.QWidget.__init__(self)
self.fig = Figure(size, dpi=dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.toolbar = NavigationToolbar(self.canvas, self)
self.vbox = QtGui.QVBoxLayout()
self.vbox.addWidget(self.toolbar)
self.vbox.addWidget(self.canvas)
self.setLayout(self.vbox)
def getFigure(self):
return self.fig
def draw(self):
self.canvas.draw()
| mit |
andrewnc/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 77 | 1820 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
harisbal/pandas | pandas/core/indexing.py | 4 | 91629 | # pylint: disable=W0223
import textwrap
import warnings
import numpy as np
from pandas._libs.indexing import _NDFrameIndexerBase
from pandas.util._decorators import Appender
from pandas.errors import AbstractMethodError
import pandas.compat as compat
from pandas.compat import range, zip
from pandas.core.dtypes.common import (
is_integer_dtype,
is_integer, is_float,
is_list_like,
is_sequence,
is_iterator,
is_scalar,
is_sparse,
ensure_platform_int)
from pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries
from pandas.core.dtypes.missing import isna, _infer_fill_value
import pandas.core.common as com
from pandas.core.index import Index, MultiIndex
# the supported indexers
def get_indexers_list():
return [
('ix', _IXIndexer),
('iloc', _iLocIndexer),
('loc', _LocIndexer),
('at', _AtIndexer),
('iat', _iAtIndexer),
]
# "null slice"
_NS = slice(None, None)
# the public IndexSlicerMaker
class _IndexSlice(object):
"""
Create an object to more easily perform multi-index slicing
See Also
--------
MultiIndex.remove_unused_levels : New MultiIndex with no unused levels.
Notes
-----
See :ref:`Defined Levels <advanced.shown_levels>`
for further info on slicing a MultiIndex.
Examples
--------
>>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']])
>>> columns = ['foo', 'bar']
>>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))),
index=midx, columns=columns)
Using the default slice command:
>>> dfmi.loc[(slice(None), slice('B0', 'B1')), :]
foo bar
A0 B0 0 1
B1 2 3
A1 B0 8 9
B1 10 11
Using the IndexSlice class for a more intuitive command:
>>> idx = pd.IndexSlice
>>> dfmi.loc[idx[:, 'B0':'B1'], :]
foo bar
A0 B0 0 1
B1 2 3
A1 B0 8 9
B1 10 11
"""
def __getitem__(self, arg):
return arg
IndexSlice = _IndexSlice()
class IndexingError(Exception):
pass
class _NDFrameIndexer(_NDFrameIndexerBase):
_valid_types = None
_exception = KeyError
axis = None
def __call__(self, axis=None):
# we need to return a copy of ourselves
new_self = self.__class__(self.name, self.obj)
if axis is not None:
axis = self.obj._get_axis_number(axis)
new_self.axis = axis
return new_self
def __iter__(self):
raise NotImplementedError('ix is not iterable')
def __getitem__(self, key):
if type(key) is tuple:
key = tuple(com.apply_if_callable(x, self.obj)
for x in key)
try:
values = self.obj._get_value(*key)
if is_scalar(values):
return values
except Exception:
pass
return self._getitem_tuple(key)
else:
# we by definition only have the 0th axis
axis = self.axis or 0
key = com.apply_if_callable(key, self.obj)
return self._getitem_axis(key, axis=axis)
def _get_label(self, label, axis=None):
if axis is None:
axis = self.axis or 0
if self.ndim == 1:
# for perf reasons we want to try _xs first
# as its basically direct indexing
# but will fail when the index is not present
# see GH5667
return self.obj._xs(label, axis=axis)
elif isinstance(label, tuple) and isinstance(label[axis], slice):
raise IndexingError('no slices here, handle elsewhere')
return self.obj._xs(label, axis=axis)
def _get_loc(self, key, axis=None):
if axis is None:
axis = self.axis
return self.obj._ixs(key, axis=axis)
def _slice(self, obj, axis=None, kind=None):
if axis is None:
axis = self.axis
return self.obj._slice(obj, axis=axis, kind=kind)
def _get_setitem_indexer(self, key):
if self.axis is not None:
return self._convert_tuple(key, is_setter=True)
axis = self.obj._get_axis(0)
if isinstance(axis, MultiIndex) and self.name != 'iloc':
try:
return axis.get_loc(key)
except Exception:
pass
if isinstance(key, tuple):
try:
return self._convert_tuple(key, is_setter=True)
except IndexingError:
pass
if isinstance(key, range):
return self._convert_range(key, is_setter=True)
try:
return self._convert_to_indexer(key, is_setter=True)
except TypeError as e:
# invalid indexer type vs 'other' indexing errors
if 'cannot do' in str(e):
raise
raise IndexingError(key)
def __setitem__(self, key, value):
if isinstance(key, tuple):
key = tuple(com.apply_if_callable(x, self.obj)
for x in key)
else:
key = com.apply_if_callable(key, self.obj)
indexer = self._get_setitem_indexer(key)
self._setitem_with_indexer(indexer, value)
def _validate_key(self, key, axis):
"""
Ensure that key is valid for current indexer.
Parameters
----------
key : scalar, slice or list-like
The key requested
axis : int
Dimension on which the indexing is being made
Raises
------
TypeError
If the key (or some element of it) has wrong type
IndexError
If the key (or some element of it) is out of bounds
KeyError
If the key was not found
"""
raise AbstractMethodError()
def _has_valid_tuple(self, key):
""" check the key for valid keys across my indexer """
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
try:
self._validate_key(k, i)
except ValueError:
raise ValueError("Location based indexing can only have "
"[{types}] types"
.format(types=self._valid_types))
def _is_nested_tuple_indexer(self, tup):
if any(isinstance(ax, MultiIndex) for ax in self.obj.axes):
return any(is_nested_tuple(tup, ax) for ax in self.obj.axes)
return False
def _convert_tuple(self, key, is_setter=False):
keyidx = []
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
for i in range(self.ndim):
if i == axis:
keyidx.append(self._convert_to_indexer(
key, axis=axis, is_setter=is_setter))
else:
keyidx.append(slice(None))
else:
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter)
keyidx.append(idx)
return tuple(keyidx)
def _convert_range(self, key, is_setter=False):
""" convert a range argument """
return list(key)
def _convert_scalar_indexer(self, key, axis):
# if we are accessing via lowered dim, use the last dim
if axis is None:
axis = 0
ax = self.obj._get_axis(min(axis, self.ndim - 1))
# a scalar
return ax._convert_scalar_indexer(key, kind=self.name)
def _convert_slice_indexer(self, key, axis):
# if we are accessing via lowered dim, use the last dim
ax = self.obj._get_axis(min(axis, self.ndim - 1))
return ax._convert_slice_indexer(key, kind=self.name)
def _has_valid_setitem_indexer(self, indexer):
return True
def _has_valid_positional_setitem_indexer(self, indexer):
""" validate that an positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally
"""
if isinstance(indexer, dict):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
else:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
for ax, i in zip(self.obj.axes, indexer):
if isinstance(i, slice):
# should check the stop slice?
pass
elif is_list_like_indexer(i):
# should check the elements?
pass
elif is_integer(i):
if i >= len(ax):
raise IndexError("{name} cannot enlarge its target "
"object".format(name=self.name))
elif isinstance(i, dict):
raise IndexError("{name} cannot enlarge its target object"
.format(name=self.name))
return True
def _setitem_with_indexer(self, indexer, value):
self._has_valid_setitem_indexer(indexer)
# also has the side effect of consolidating in-place
from pandas import Series
info_axis = self.obj._info_axis_number
# maybe partial set
take_split_path = self.obj._is_mixed_type
# if there is only one block/type, still have to take split path
# unless the block is one-dimensional or it can hold the value
if not take_split_path and self.obj._data.blocks:
blk, = self.obj._data.blocks
if 1 < blk.ndim: # in case of dict, keys are indices
val = list(value.values()) if isinstance(value,
dict) else value
take_split_path = not blk._can_hold_element(val)
if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes):
for i, ax in zip(indexer, self.obj.axes):
# if we have any multi-indexes that have non-trivial slices
# (not null slices) then we must take the split path, xref
# GH 10360
if (isinstance(ax, MultiIndex) and
not (is_integer(i) or com.is_null_slice(i))):
take_split_path = True
break
if isinstance(indexer, tuple):
nindexer = []
for i, idx in enumerate(indexer):
if isinstance(idx, dict):
# reindex the axis to the new value
# and set inplace
key, _ = convert_missing_indexer(idx)
# if this is the items axes, then take the main missing
# path first
# this correctly sets the dtype and avoids cache issues
# essentially this separates out the block that is needed
# to possibly be modified
if self.ndim > 1 and i == self.obj._info_axis_number:
# add the new item, and set the value
# must have all defined axes if we have a scalar
# or a list-like on the non-info axes if we have a
# list-like
len_non_info_axes = [
len(_ax) for _i, _ax in enumerate(self.obj.axes)
if _i != i
]
if any(not l for l in len_non_info_axes):
if not is_list_like_indexer(value):
raise ValueError("cannot set a frame with no "
"defined index and a scalar")
self.obj[key] = value
return self.obj
# add a new item with the dtype setup
self.obj[key] = _infer_fill_value(value)
new_indexer = convert_from_missing_indexer_tuple(
indexer, self.obj.axes)
self._setitem_with_indexer(new_indexer, value)
return self.obj
# reindex the axis
# make sure to clear the cache because we are
# just replacing the block manager here
# so the object is the same
index = self.obj._get_axis(i)
labels = index.insert(len(index), key)
self.obj._data = self.obj.reindex(labels, axis=i)._data
self.obj._maybe_update_cacher(clear=True)
self.obj._is_copy = None
nindexer.append(labels.get_loc(key))
else:
nindexer.append(idx)
indexer = tuple(nindexer)
else:
indexer, missing = convert_missing_indexer(indexer)
if missing:
# reindex the axis to the new value
# and set inplace
if self.ndim == 1:
index = self.obj.index
new_index = index.insert(len(index), indexer)
# we have a coerced indexer, e.g. a float
# that matches in an Int64Index, so
# we will not create a duplicate index, rather
# index to that element
# e.g. 0.0 -> 0
# GH12246
if index.is_unique:
new_indexer = index.get_indexer([new_index[-1]])
if (new_indexer != -1).any():
return self._setitem_with_indexer(new_indexer,
value)
# this preserves dtype of the value
new_values = Series([value])._values
if len(self.obj._values):
try:
new_values = np.concatenate([self.obj._values,
new_values])
except TypeError:
as_obj = self.obj.astype(object)
new_values = np.concatenate([as_obj,
new_values])
self.obj._data = self.obj._constructor(
new_values, index=new_index, name=self.obj.name)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
elif self.ndim == 2:
# no columns and scalar
if not len(self.obj.columns):
raise ValueError("cannot set a frame with no defined "
"columns")
# append a Series
if isinstance(value, Series):
value = value.reindex(index=self.obj.columns,
copy=True)
value.name = indexer
# a list-list
else:
# must have conforming columns
if is_list_like_indexer(value):
if len(value) != len(self.obj.columns):
raise ValueError("cannot set a row with "
"mismatched columns")
value = Series(value, index=self.obj.columns,
name=indexer)
self.obj._data = self.obj.append(value)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
# set using setitem (Panel and > dims)
elif self.ndim >= 3:
return self.obj.__setitem__(indexer, value)
# set
item_labels = self.obj._get_axis(info_axis)
# align and set the values
if take_split_path:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
if isinstance(value, ABCSeries):
value = self._align_series(indexer, value)
info_idx = indexer[info_axis]
if is_integer(info_idx):
info_idx = [info_idx]
labels = item_labels[info_idx]
# if we have a partial multiindex, then need to adjust the plane
# indexer here
if (len(labels) == 1 and
isinstance(self.obj[labels[0]].axes[0], MultiIndex)):
item = labels[0]
obj = self.obj[item]
index = obj.index
idx = indexer[:info_axis][0]
plane_indexer = tuple([idx]) + indexer[info_axis + 1:]
lplane_indexer = length_of_indexer(plane_indexer[0], index)
# require that we are setting the right number of values that
# we are indexing
if is_list_like_indexer(value) and np.iterable(
value) and lplane_indexer != len(value):
if len(obj[idx]) != len(value):
raise ValueError("cannot set using a multi-index "
"selection indexer with a different "
"length than the value")
# make sure we have an ndarray
value = getattr(value, 'values', value).ravel()
# we can directly set the series here
# as we select a slice indexer on the mi
idx = index._convert_slice_indexer(idx)
obj._consolidate_inplace()
obj = obj.copy()
obj._data = obj._data.setitem(indexer=tuple([idx]),
value=value)
self.obj[item] = obj
return
# non-mi
else:
plane_indexer = indexer[:info_axis] + indexer[info_axis + 1:]
if info_axis > 0:
plane_axis = self.obj.axes[:info_axis][0]
lplane_indexer = length_of_indexer(plane_indexer[0],
plane_axis)
else:
lplane_indexer = 0
def setter(item, v):
s = self.obj[item]
pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
# perform the equivalent of a setitem on the info axis
# as we have a null slice or a slice with full bounds
# which means essentially reassign to the columns of a
# multi-dim object
# GH6149 (null slice), GH10408 (full bounds)
if (isinstance(pi, tuple) and
all(com.is_null_slice(idx) or
com.is_full_slice(idx, len(self.obj))
for idx in pi)):
s = v
else:
# set the item, possibly having a dtype change
s._consolidate_inplace()
s = s.copy()
s._data = s._data.setitem(indexer=pi, value=v)
s._maybe_update_cacher(clear=True)
# reset the sliced object if unique
self.obj[item] = s
def can_do_equal_len():
""" return True if we have an equal len settable """
if (not len(labels) == 1 or not np.iterable(value) or
is_scalar(plane_indexer[0])):
return False
item = labels[0]
index = self.obj[item].index
values_len = len(value)
# equal len list/ndarray
if len(index) == values_len:
return True
elif lplane_indexer == values_len:
return True
return False
# we need an iterable, with a ndim of at least 1
# eg. don't pass through np.array(0)
if is_list_like_indexer(value) and getattr(value, 'ndim', 1) > 0:
# we have an equal len Frame
if isinstance(value, ABCDataFrame) and value.ndim > 1:
sub_indexer = list(indexer)
multiindex_indexer = isinstance(labels, MultiIndex)
for item in labels:
if item in value:
sub_indexer[info_axis] = item
v = self._align_series(
tuple(sub_indexer), value[item],
multiindex_indexer)
else:
v = np.nan
setter(item, v)
# we have an equal len ndarray/convertible to our labels
elif np.array(value).ndim == 2:
# note that this coerces the dtype if we are mixed
# GH 7551
value = np.array(value, dtype=object)
if len(labels) != value.shape[1]:
raise ValueError('Must have equal len keys and value '
'when setting with an ndarray')
for i, item in enumerate(labels):
# setting with a list, recoerces
setter(item, value[:, i].tolist())
# we have an equal len list/ndarray
elif can_do_equal_len():
setter(labels[0], value)
# per label values
else:
if len(labels) != len(value):
raise ValueError('Must have equal len keys and value '
'when setting with an iterable')
for item, v in zip(labels, value):
setter(item, v)
else:
# scalar
for item in labels:
setter(item, value)
else:
if isinstance(indexer, tuple):
indexer = maybe_convert_ix(*indexer)
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
# logic here
if (len(indexer) > info_axis and
is_integer(indexer[info_axis]) and
all(com.is_null_slice(idx)
for i, idx in enumerate(indexer)
if i != info_axis) and
item_labels.is_unique):
self.obj[item_labels[indexer[info_axis]]] = value
return
if isinstance(value, (ABCSeries, dict)):
# TODO(EA): ExtensionBlock.setitem this causes issues with
# setting for extensionarrays that store dicts. Need to decide
# if it's worth supporting that.
value = self._align_series(indexer, Series(value))
elif isinstance(value, ABCDataFrame):
value = self._align_frame(indexer, value)
if isinstance(value, ABCPanel):
value = self._align_panel(indexer, value)
# check for chained assignment
self.obj._check_is_chained_assignment_possible()
# actually do the set
self.obj._consolidate_inplace()
self.obj._data = self.obj._data.setitem(indexer=indexer,
value=value)
self.obj._maybe_update_cacher(clear=True)
def _align_series(self, indexer, ser, multiindex_indexer=False):
"""
Parameters
----------
indexer : tuple, slice, scalar
The indexer used to get the locations that will be set to
`ser`
ser : pd.Series
The values to assign to the locations specified by `indexer`
multiindex_indexer : boolean, optional
Defaults to False. Should be set to True if `indexer` was from
a `pd.MultiIndex`, to avoid unnecessary broadcasting.
Returns:
--------
`np.array` of `ser` broadcast to the appropriate shape for assignment
to the locations selected by `indexer`
"""
if isinstance(indexer, (slice, np.ndarray, list, Index)):
indexer = tuple([indexer])
if isinstance(indexer, tuple):
# flatten np.ndarray indexers
def ravel(i):
return i.ravel() if isinstance(i, np.ndarray) else i
indexer = tuple(map(ravel, indexer))
aligners = [not com.is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
obj = self.obj
# are we a single alignable value on a non-primary
# dim (e.g. panel: 1,2, or frame: 0) ?
# hence need to align to a single axis dimension
# rather that find all valid dims
# frame
if is_frame:
single_aligner = single_aligner and aligners[0]
# panel
elif is_panel:
single_aligner = (single_aligner and
(aligners[1] or aligners[2]))
# we have a frame, with multiple indexers on both axes; and a
# series, so need to broadcast (see GH5206)
if (sum_aligners == self.ndim and
all(is_sequence(_) for _ in indexer)):
ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
# single indexer
if len(indexer) > 1 and not multiindex_indexer:
len_indexer = len(indexer[1])
ser = np.tile(ser, len_indexer).reshape(len_indexer, -1).T
return ser
for i, idx in enumerate(indexer):
ax = obj.axes[i]
# multiple aligners (or null slices)
if is_sequence(idx) or isinstance(idx, slice):
if single_aligner and com.is_null_slice(idx):
continue
new_ix = ax[idx]
if not is_list_like_indexer(new_ix):
new_ix = Index([new_ix])
else:
new_ix = Index(new_ix)
if ser.index.equals(new_ix) or not len(new_ix):
return ser._values.copy()
return ser.reindex(new_ix)._values
# 2 dims
elif single_aligner and is_frame:
# reindex along index
ax = self.obj.axes[1]
if ser.index.equals(ax) or not len(ax):
return ser._values.copy()
return ser.reindex(ax)._values
# >2 dims
elif single_aligner:
broadcast = []
for n, labels in enumerate(self.obj._get_plane_axes(i)):
# reindex along the matching dimensions
if len(labels & ser.index):
ser = ser.reindex(labels)
else:
broadcast.append((n, len(labels)))
# broadcast along other dims
ser = ser._values.copy()
for (axis, l) in broadcast:
shape = [-1] * (len(broadcast) + 1)
shape[axis] = l
ser = np.tile(ser, l).reshape(shape)
if self.obj.ndim == 3:
ser = ser.T
return ser
elif is_scalar(indexer):
ax = self.obj._get_axis(1)
if ser.index.equals(ax):
return ser._values.copy()
return ser.reindex(ax)._values
raise ValueError('Incompatible indexer with Series')
def _align_frame(self, indexer, df):
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
if isinstance(indexer, tuple):
idx, cols = None, None
sindexers = []
for i, ix in enumerate(indexer):
ax = self.obj.axes[i]
if is_sequence(ix) or isinstance(ix, slice):
if isinstance(ix, np.ndarray):
ix = ix.ravel()
if idx is None:
idx = ax[ix]
elif cols is None:
cols = ax[ix]
else:
break
else:
sindexers.append(i)
# panel
if is_panel:
# need to conform to the convention
# as we are not selecting on the items axis
# and we have a single indexer
# GH 7763
if len(sindexers) == 1 and sindexers[0] != 0:
df = df.T
if idx is None:
idx = df.index
if cols is None:
cols = df.columns
if idx is not None and cols is not None:
if df.index.equals(idx) and df.columns.equals(cols):
val = df.copy()._values
else:
val = df.reindex(idx, columns=cols)._values
return val
elif ((isinstance(indexer, slice) or is_list_like_indexer(indexer)) and
is_frame):
ax = self.obj.index[indexer]
if df.index.equals(ax):
val = df.copy()._values
else:
# we have a multi-index and are trying to align
# with a particular, level GH3738
if (isinstance(ax, MultiIndex) and
isinstance(df.index, MultiIndex) and
ax.nlevels != df.index.nlevels):
raise TypeError("cannot align on a multi-index with out "
"specifying the join levels")
val = df.reindex(index=ax)._values
return val
elif is_scalar(indexer) and is_panel:
idx = self.obj.axes[1]
cols = self.obj.axes[2]
# by definition we are indexing on the 0th axis
# a passed in dataframe which is actually a transpose
# of what is needed
if idx.equals(df.index) and cols.equals(df.columns):
return df.copy()._values
return df.reindex(idx, columns=cols)._values
raise ValueError('Incompatible indexer with DataFrame')
def _align_panel(self, indexer, df):
raise NotImplementedError("cannot set using an indexer with a Panel "
"yet!")
def _getitem_tuple(self, tup):
try:
return self._getitem_lowerdim(tup)
except IndexingError:
pass
# no multi-index, so validate all of the indexers
self._has_valid_tuple(tup)
# ugly hack for GH #836
if self._multi_take_opportunity(tup):
return self._multi_take(tup)
# no shortcut needed
retval = self.obj
for i, key in enumerate(tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if com.is_null_slice(key):
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
return retval
def _multi_take_opportunity(self, tup):
"""
Check whether there is the possibility to use ``_multi_take``.
Currently the limit is that all axes being indexed must be indexed with
list-likes.
Parameters
----------
tup : tuple
Tuple of indexers, one per axis
Returns
-------
boolean: Whether the current indexing can be passed through _multi_take
"""
if not all(is_list_like_indexer(x) for x in tup):
return False
# just too complicated
if any(com.is_bool_indexer(x) for x in tup):
return False
return True
def _multi_take(self, tup):
"""
Create the indexers for the passed tuple of keys, and execute the take
operation. This allows the take operation to be executed all at once -
rather than once for each dimension - improving efficiency.
Parameters
----------
tup : tuple
Tuple of indexers, one per axis
Returns
-------
values: same type as the object being indexed
"""
# GH 836
o = self.obj
d = {axis: self._get_listlike_indexer(key, axis)
for (key, axis) in zip(tup, o._AXIS_ORDERS)}
return o._reindex_with_indexers(d, copy=True, allow_dups=True)
def _convert_for_reindex(self, key, axis=None):
return key
def _handle_lowerdim_multi_index_axis0(self, tup):
# we have an axis0 multi-index, handle or raise
try:
# fast path for series or for tup devoid of slices
return self._get_label(tup, axis=self.axis)
except TypeError:
# slices are unhashable
pass
except Exception as e1:
if isinstance(tup[0], (slice, Index)):
raise IndexingError("Handle elsewhere")
# raise the error if we are not sorted
ax0 = self.obj._get_axis(0)
if not ax0.is_lexsorted_for_tuple(tup):
raise e1
return None
def _getitem_lowerdim(self, tup):
# we can directly get the axis result since the axis is specified
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
return self._getitem_axis(tup, axis=axis)
# we may have a nested tuples indexer here
if self._is_nested_tuple_indexer(tup):
return self._getitem_nested_tuple(tup)
# we maybe be using a tuple to represent multiple dimensions here
ax0 = self.obj._get_axis(0)
# ...but iloc should handle the tuple as simple integer-location
# instead of checking it as multiindex representation (GH 13797)
if isinstance(ax0, MultiIndex) and self.name != 'iloc':
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
if len(tup) > self.obj.ndim:
raise IndexingError("Too many indexers. handle elsewhere")
# to avoid wasted computation
# df.ix[d1:d2, 0] -> columns first (True)
# df.ix[0, ['C', 'B', A']] -> rows first (False)
for i, key in enumerate(tup):
if is_label_like(key) or isinstance(key, tuple):
section = self._getitem_axis(key, axis=i)
# we have yielded a scalar ?
if not is_list_like_indexer(section):
return section
elif section.ndim == self.ndim:
# we're in the middle of slicing through a MultiIndex
# revise the key wrt to `section` by inserting an _NS
new_key = tup[:i] + (_NS,) + tup[i + 1:]
else:
new_key = tup[:i] + tup[i + 1:]
# unfortunately need an odious kludge here because of
# DataFrame transposing convention
if (isinstance(section, ABCDataFrame) and i > 0 and
len(new_key) == 2):
a, b = new_key
new_key = b, a
if len(new_key) == 1:
new_key, = new_key
# Slices should return views, but calling iloc/loc with a null
# slice returns a new object.
if com.is_null_slice(new_key):
return section
# This is an elided recursive call to iloc/loc/etc'
return getattr(section, self.name)[new_key]
raise IndexingError('not applicable')
def _getitem_nested_tuple(self, tup):
# we have a nested tuple so have at least 1 multi-index level
# we should be able to match up the dimensionaility here
# we have too many indexers for our dim, but have at least 1
# multi-index dimension, try to see if we have something like
# a tuple passed to a series with a multi-index
if len(tup) > self.ndim:
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
# this is a series with a multi-index specified a tuple of
# selectors
return self._getitem_axis(tup, axis=self.axis)
# handle the multi-axis by taking sections and reducing
# this is iterative
obj = self.obj
axis = 0
for i, key in enumerate(tup):
if com.is_null_slice(key):
axis += 1
continue
current_ndim = obj.ndim
obj = getattr(obj, self.name)._getitem_axis(key, axis=axis)
axis += 1
# if we have a scalar, we are done
if is_scalar(obj) or not hasattr(obj, 'ndim'):
break
# has the dim of the obj changed?
# GH 7199
if obj.ndim < current_ndim:
# GH 7516
# if had a 3 dim and are going to a 2d
# axes are reversed on a DataFrame
if i >= 1 and current_ndim == 3 and obj.ndim == 2:
obj = obj.T
axis -= 1
return obj
def _getitem_axis(self, key, axis=None):
if axis is None:
axis = self.axis or 0
if is_iterator(key):
key = list(key)
self._validate_key(key, axis)
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
elif (is_list_like_indexer(key) and
not (isinstance(key, tuple) and
isinstance(labels, MultiIndex))):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
else:
# maybe coerce a float scalar to integer
key = labels._maybe_cast_indexer(key)
if is_integer(key):
if axis == 0 and isinstance(labels, MultiIndex):
try:
return self._get_label(key, axis=axis)
except (KeyError, TypeError):
if self.obj.index.levels[0].is_integer():
raise
# this is the fallback! (for a non-float, non-integer index)
if not labels.is_floating() and not labels.is_integer():
return self._get_loc(key, axis=axis)
return self._get_label(key, axis=axis)
def _get_listlike_indexer(self, key, axis, raise_missing=False):
"""
Transform a list-like of keys into a new index and an indexer.
Parameters
----------
key : list-like
Target labels
axis: int
Dimension on which the indexing is being made
raise_missing: bool
Whether to raise a KeyError if some labels are not found. Will be
removed in the future, and then this method will always behave as
if raise_missing=True.
Raises
------
KeyError
If at least one key was requested but none was found, and
raise_missing=True.
Returns
-------
keyarr: Index
New index (coinciding with 'key' if the axis is unique)
values : array-like
An indexer for the return object; -1 denotes keys not found
"""
o = self.obj
ax = o._get_axis(axis)
# Have the index compute an indexer or return None
# if it cannot handle:
indexer, keyarr = ax._convert_listlike_indexer(key,
kind=self.name)
# We only act on all found values:
if indexer is not None and (indexer != -1).all():
self._validate_read_indexer(key, indexer, axis,
raise_missing=raise_missing)
return ax[indexer], indexer
if ax.is_unique:
# If we are trying to get actual keys from empty Series, we
# patiently wait for a KeyError later on - otherwise, convert
if len(ax) or not len(key):
key = self._convert_for_reindex(key, axis)
indexer = ax.get_indexer_for(key)
keyarr = ax.reindex(keyarr)[0]
else:
keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr)
self._validate_read_indexer(keyarr, indexer,
o._get_axis_number(axis),
raise_missing=raise_missing)
return keyarr, indexer
def _getitem_iterable(self, key, axis=None):
"""
Index current object with an an iterable key (which can be a boolean
indexer, or a collection of keys).
Parameters
----------
key : iterable
Target labels, or boolean indexer
axis: int, default None
Dimension on which the indexing is being made
Raises
------
KeyError
If no key was found. Will change in the future to raise if not all
keys were found.
IndexingError
If the boolean indexer is unalignable with the object being
indexed.
Returns
-------
scalar, DataFrame, or Series: indexed value(s),
"""
if axis is None:
axis = self.axis or 0
self._validate_key(key, axis)
labels = self.obj._get_axis(axis)
if com.is_bool_indexer(key):
# A boolean indexer
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
return self.obj._take(inds, axis=axis)
else:
# A collection of keys
keyarr, indexer = self._get_listlike_indexer(key, axis,
raise_missing=False)
return self.obj._reindex_with_indexers({axis: [keyarr, indexer]},
copy=True, allow_dups=True)
def _validate_read_indexer(self, key, indexer, axis, raise_missing=False):
"""
Check that indexer can be used to return a result (e.g. at least one
element was found, unless the list of keys was actually empty).
Parameters
----------
key : list-like
Target labels (only used to show correct error message)
indexer: array-like of booleans
Indices corresponding to the key (with -1 indicating not found)
axis: int
Dimension on which the indexing is being made
raise_missing: bool
Whether to raise a KeyError if some labels are not found. Will be
removed in the future, and then this method will always behave as
if raise_missing=True.
Raises
------
KeyError
If at least one key was requested but none was found, and
raise_missing=True.
"""
ax = self.obj._get_axis(axis)
if len(key) == 0:
return
# Count missing values:
missing = (indexer < 0).sum()
if missing:
if missing == len(indexer):
raise KeyError(
u"None of [{key}] are in the [{axis}]".format(
key=key, axis=self.obj._get_axis_name(axis)))
# We (temporarily) allow for some missing keys with .loc, except in
# some cases (e.g. setting) in which "raise_missing" will be False
if not(self.name == 'loc' and not raise_missing):
not_found = list(set(key) - set(ax))
raise KeyError("{} not in index".format(not_found))
# we skip the warning on Categorical/Interval
# as this check is actually done (check for
# non-missing values), but a bit later in the
# code, so we want to avoid warning & then
# just raising
_missing_key_warning = textwrap.dedent("""
Passing list-likes to .loc or [] with any missing label will raise
KeyError in the future, you can use .reindex() as an alternative.
See the documentation here:
https://pandas.pydata.org/pandas-docs/stable/indexing.html#deprecate-loc-reindex-listlike""") # noqa
if not (ax.is_categorical() or ax.is_interval()):
warnings.warn(_missing_key_warning,
FutureWarning, stacklevel=6)
def _convert_to_indexer(self, obj, axis=None, is_setter=False,
raise_missing=False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
if axis is None:
axis = self.axis or 0
labels = self.obj._get_axis(axis)
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
# try to find out correct indexer, if not type correct raise
try:
obj = self._convert_scalar_indexer(obj, axis)
except TypeError:
# but we will allow setting
if is_setter:
pass
# see if we are positional in nature
is_int_index = labels.is_integer()
is_int_positional = is_integer(obj) and not is_int_index
# if we are a label return me
try:
return labels.get_loc(obj)
except LookupError:
if isinstance(obj, tuple) and isinstance(labels, MultiIndex):
if is_setter and len(obj) == labels.nlevels:
return {'key': obj}
raise
except TypeError:
pass
except (ValueError):
if not is_int_positional:
raise
# a positional
if is_int_positional:
# if we are setting and its not a valid location
# its an insert which fails by definition
if is_setter:
# always valid
if self.name == 'loc':
return {'key': obj}
# a positional
if (obj >= self.obj.shape[axis] and
not isinstance(labels, MultiIndex)):
raise ValueError("cannot set by positional indexing with "
"enlargement")
return obj
if is_nested_tuple(obj, labels):
return labels.get_locs(obj)
elif is_list_like_indexer(obj):
if com.is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
inds, = obj.nonzero()
return inds
else:
# When setting, missing keys are not allowed, even with .loc:
kwargs = {'raise_missing': True if is_setter else
raise_missing}
return self._get_listlike_indexer(obj, axis, **kwargs)[1]
else:
try:
return labels.get_loc(obj)
except LookupError:
# allow a not found key only if we are a setter
if not is_list_like_indexer(obj) and is_setter:
return {'key': obj}
raise
def _tuplify(self, loc):
tup = [slice(None, None) for _ in range(self.ndim)]
tup[0] = loc
return tuple(tup)
def _get_slice_axis(self, slice_obj, axis=None):
obj = self.obj
if axis is None:
axis = self.axis or 0
if not need_slice(slice_obj):
return obj.copy(deep=False)
indexer = self._convert_slice_indexer(slice_obj, axis)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj._take(indexer, axis=axis)
class _IXIndexer(_NDFrameIndexer):
"""A primarily label-location based indexer, with integer position
fallback.
Warning: Starting in 0.20.0, the .ix indexer is deprecated, in
favor of the more strict .iloc and .loc indexers.
``.ix[]`` supports mixed integer and label based access. It is
primarily label based, but will fall back to integer positional
access unless the corresponding axis is of integer type.
``.ix`` is the most general indexer and will support any of the
inputs in ``.loc`` and ``.iloc``. ``.ix`` also supports floating
point label schemes. ``.ix`` is exceptionally useful when dealing
with mixed positional and label based hierarchical indexes.
However, when an axis is integer based, ONLY label based access
and not positional access is supported. Thus, in such cases, it's
usually better to be explicit and use ``.iloc`` or ``.loc``.
See more at :ref:`Advanced Indexing <advanced>`.
"""
def __init__(self, name, obj):
_ix_deprecation_warning = textwrap.dedent("""
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated""") # noqa
warnings.warn(_ix_deprecation_warning,
DeprecationWarning, stacklevel=2)
super(_IXIndexer, self).__init__(name, obj)
@Appender(_NDFrameIndexer._validate_key.__doc__)
def _validate_key(self, key, axis):
if isinstance(key, slice):
return True
elif com.is_bool_indexer(key):
return True
elif is_list_like_indexer(key):
return True
else:
self._convert_scalar_indexer(key, axis)
return True
def _convert_for_reindex(self, key, axis=None):
"""
Transform a list of keys into a new array ready to be used as axis of
the object we return (e.g. including NaNs).
Parameters
----------
key : list-like
Target labels
axis: int
Where the indexing is being made
Returns
-------
list-like of labels
"""
if axis is None:
axis = self.axis or 0
labels = self.obj._get_axis(axis)
if com.is_bool_indexer(key):
key = check_bool_indexer(labels, key)
return labels[key]
if isinstance(key, Index):
keyarr = labels._convert_index_indexer(key)
else:
# asarray can be unsafe, NumPy strings are weird
keyarr = com.asarray_tuplesafe(key)
if is_integer_dtype(keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
keyarr = labels._convert_arr_indexer(keyarr)
if not labels.is_integer():
keyarr = ensure_platform_int(keyarr)
return labels.take(keyarr)
return keyarr
class _LocationIndexer(_NDFrameIndexer):
_exception = Exception
def __getitem__(self, key):
if type(key) is tuple:
key = tuple(com.apply_if_callable(x, self.obj)
for x in key)
try:
if self._is_scalar_access(key):
return self._getitem_scalar(key)
except (KeyError, IndexError, AttributeError):
pass
return self._getitem_tuple(key)
else:
# we by definition only have the 0th axis
axis = self.axis or 0
maybe_callable = com.apply_if_callable(key, self.obj)
return self._getitem_axis(maybe_callable, axis=axis)
def _is_scalar_access(self, key):
raise NotImplementedError()
def _getitem_scalar(self, key):
raise NotImplementedError()
def _getitem_axis(self, key, axis=None):
raise NotImplementedError()
def _getbool_axis(self, key, axis=None):
if axis is None:
axis = self.axis or 0
labels = self.obj._get_axis(axis)
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
try:
return self.obj._take(inds, axis=axis)
except Exception as detail:
raise self._exception(detail)
def _get_slice_axis(self, slice_obj, axis=None):
""" this is pretty simple as we just have to deal with labels """
if axis is None:
axis = self.axis or 0
obj = self.obj
if not need_slice(slice_obj):
return obj.copy(deep=False)
labels = obj._get_axis(axis)
indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop,
slice_obj.step, kind=self.name)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj._take(indexer, axis=axis)
class _LocIndexer(_LocationIndexer):
"""
Access a group of rows and columns by label(s) or a boolean array.
``.loc[]`` is primarily label based, but may also be used with a
boolean array.
Allowed inputs are:
- A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is
interpreted as a *label* of the index, and **never** as an
integer position along the index).
- A list or array of labels, e.g. ``['a', 'b', 'c']``.
- A slice object with labels, e.g. ``'a':'f'``.
.. warning:: Note that contrary to usual python slices, **both** the
start and the stop are included
- A boolean array of the same length as the axis being sliced,
e.g. ``[True, False, True]``.
- A ``callable`` function with one argument (the calling Series, DataFrame
or Panel) and that returns valid output for indexing (one of the above)
See more at :ref:`Selection by Label <indexing.label>`
See Also
--------
DataFrame.at : Access a single value for a row/column label pair
DataFrame.iloc : Access group of rows and columns by integer position(s)
DataFrame.xs : Returns a cross-section (row(s) or column(s)) from the
Series/DataFrame.
Series.loc : Access group of values using labels
Examples
--------
**Getting values**
>>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', 'sidewinder'],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
sidewinder 7 8
Single label. Note this returns the row as a Series.
>>> df.loc['viper']
max_speed 4
shield 5
Name: viper, dtype: int64
List of labels. Note using ``[[]]`` returns a DataFrame.
>>> df.loc[['viper', 'sidewinder']]
max_speed shield
viper 4 5
sidewinder 7 8
Single label for row and column
>>> df.loc['cobra', 'shield']
2
Slice with labels for row and single label for column. As mentioned
above, note that both the start and stop of the slice are included.
>>> df.loc['cobra':'viper', 'max_speed']
cobra 1
viper 4
Name: max_speed, dtype: int64
Boolean list with the same length as the row axis
>>> df.loc[[False, False, True]]
max_speed shield
sidewinder 7 8
Conditional that returns a boolean Series
>>> df.loc[df['shield'] > 6]
max_speed shield
sidewinder 7 8
Conditional that returns a boolean Series with column labels specified
>>> df.loc[df['shield'] > 6, ['max_speed']]
max_speed
sidewinder 7
Callable that returns a boolean Series
>>> df.loc[lambda df: df['shield'] == 8]
max_speed shield
sidewinder 7 8
**Setting values**
Set value for all items matching the list of labels
>>> df.loc[['viper', 'sidewinder'], ['shield']] = 50
>>> df
max_speed shield
cobra 1 2
viper 4 50
sidewinder 7 50
Set value for an entire row
>>> df.loc['cobra'] = 10
>>> df
max_speed shield
cobra 10 10
viper 4 50
sidewinder 7 50
Set value for an entire column
>>> df.loc[:, 'max_speed'] = 30
>>> df
max_speed shield
cobra 30 10
viper 30 50
sidewinder 30 50
Set value for rows matching callable condition
>>> df.loc[df['shield'] > 35] = 0
>>> df
max_speed shield
cobra 30 10
viper 0 0
sidewinder 0 0
**Getting values on a DataFrame with an index that has integer labels**
Another example using integers for the index
>>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=[7, 8, 9], columns=['max_speed', 'shield'])
>>> df
max_speed shield
7 1 2
8 4 5
9 7 8
Slice with integer labels for rows. As mentioned above, note that both
the start and stop of the slice are included.
>>> df.loc[7:9]
max_speed shield
7 1 2
8 4 5
9 7 8
**Getting values with a MultiIndex**
A number of examples using a DataFrame with a MultiIndex
>>> tuples = [
... ('cobra', 'mark i'), ('cobra', 'mark ii'),
... ('sidewinder', 'mark i'), ('sidewinder', 'mark ii'),
... ('viper', 'mark ii'), ('viper', 'mark iii')
... ]
>>> index = pd.MultiIndex.from_tuples(tuples)
>>> values = [[12, 2], [0, 4], [10, 20],
... [1, 4], [7, 1], [16, 36]]
>>> df = pd.DataFrame(values, columns=['max_speed', 'shield'], index=index)
>>> df
max_speed shield
cobra mark i 12 2
mark ii 0 4
sidewinder mark i 10 20
mark ii 1 4
viper mark ii 7 1
mark iii 16 36
Single label. Note this returns a DataFrame with a single index.
>>> df.loc['cobra']
max_speed shield
mark i 12 2
mark ii 0 4
Single index tuple. Note this returns a Series.
>>> df.loc[('cobra', 'mark ii')]
max_speed 0
shield 4
Name: (cobra, mark ii), dtype: int64
Single label for row and column. Similar to passing in a tuple, this
returns a Series.
>>> df.loc['cobra', 'mark i']
max_speed 12
shield 2
Name: (cobra, mark i), dtype: int64
Single tuple. Note using ``[[]]`` returns a DataFrame.
>>> df.loc[[('cobra', 'mark ii')]]
max_speed shield
cobra mark ii 0 4
Single tuple for the index with a single label for the column
>>> df.loc[('cobra', 'mark i'), 'shield']
2
Slice from index tuple to single label
>>> df.loc[('cobra', 'mark i'):'viper']
max_speed shield
cobra mark i 12 2
mark ii 0 4
sidewinder mark i 10 20
mark ii 1 4
viper mark ii 7 1
mark iii 16 36
Slice from index tuple to index tuple
>>> df.loc[('cobra', 'mark i'):('viper', 'mark ii')]
max_speed shield
cobra mark i 12 2
mark ii 0 4
sidewinder mark i 10 20
mark ii 1 4
viper mark ii 7 1
Raises
------
KeyError:
when any items are not found
"""
_valid_types = ("labels (MUST BE IN THE INDEX), slices of labels (BOTH "
"endpoints included! Can be slices of integers if the "
"index is integers), listlike of labels, boolean")
_exception = KeyError
@Appender(_NDFrameIndexer._validate_key.__doc__)
def _validate_key(self, key, axis):
# valid for a collection of labels (we check their presence later)
# slice of labels (where start-end in labels)
# slice of integers (only if in the labels)
# boolean
if isinstance(key, slice):
return
if com.is_bool_indexer(key):
return
if not is_list_like_indexer(key):
self._convert_scalar_indexer(key, axis)
def _is_scalar_access(self, key):
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
# b) provide a performant path
if not hasattr(key, '__len__'):
return False
if len(key) != self.ndim:
return False
for i, k in enumerate(key):
if not is_scalar(k):
return False
ax = self.obj.axes[i]
if isinstance(ax, MultiIndex):
return False
if not ax.is_unique:
return False
return True
def _getitem_scalar(self, key):
# a fast-path to scalar access
# if not, raise
values = self.obj._get_value(*key)
return values
def _get_partial_string_timestamp_match_key(self, key, labels):
"""Translate any partial string timestamp matches in key, returning the
new key (GH 10331)"""
if isinstance(labels, MultiIndex):
if (isinstance(key, compat.string_types) and
labels.levels[0].is_all_dates):
# Convert key '2016-01-01' to
# ('2016-01-01'[, slice(None, None, None)]+)
key = tuple([key] + [slice(None)] * (len(labels.levels) - 1))
if isinstance(key, tuple):
# Convert (..., '2016-01-01', ...) in tuple to
# (..., slice('2016-01-01', '2016-01-01', None), ...)
new_key = []
for i, component in enumerate(key):
if (isinstance(component, compat.string_types) and
labels.levels[i].is_all_dates):
new_key.append(slice(component, component, None))
else:
new_key.append(component)
key = tuple(new_key)
return key
def _getitem_axis(self, key, axis=None):
if axis is None:
axis = self.axis or 0
if is_iterator(key):
key = list(key)
labels = self.obj._get_axis(axis)
key = self._get_partial_string_timestamp_match_key(key, labels)
if isinstance(key, slice):
self._validate_key(key, axis)
return self._get_slice_axis(key, axis=axis)
elif com.is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
# convert various list-like indexers
# to a list of keys
# we will use the *values* of the object
# and NOT the index if its a PandasObject
if isinstance(labels, MultiIndex):
if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim <= 1:
# Series, or 0,1 ndim ndarray
# GH 14730
key = list(key)
elif isinstance(key, ABCDataFrame):
# GH 15438
raise NotImplementedError("Indexing a MultiIndex with a "
"DataFrame key is not "
"implemented")
elif hasattr(key, 'ndim') and key.ndim > 1:
raise NotImplementedError("Indexing a MultiIndex with a "
"multidimensional key is not "
"implemented")
if (not isinstance(key, tuple) and len(key) > 1 and
not isinstance(key[0], tuple)):
key = tuple([key])
# an iterable multi-selection
if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
if is_nested_tuple(key, labels):
locs = labels.get_locs(key)
indexer = [slice(None)] * self.ndim
indexer[axis] = locs
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
self._validate_key(key, axis)
return self._get_label(key, axis=axis)
class _iLocIndexer(_LocationIndexer):
"""
Purely integer-location based indexing for selection by position.
``.iloc[]`` is primarily integer position based (from ``0`` to
``length-1`` of the axis), but may also be used with a boolean
array.
Allowed inputs are:
- An integer, e.g. ``5``.
- A list or array of integers, e.g. ``[4, 3, 0]``.
- A slice object with ints, e.g. ``1:7``.
- A boolean array.
- A ``callable`` function with one argument (the calling Series, DataFrame
or Panel) and that returns valid output for indexing (one of the above).
This is useful in method chains, when you don't have a reference to the
calling object, but would like to base your selection on some value.
``.iloc`` will raise ``IndexError`` if a requested indexer is
out-of-bounds, except *slice* indexers which allow out-of-bounds
indexing (this conforms with python/numpy *slice* semantics).
See more at ref:`Selection by Position <indexing.integer>`.
See Also
--------
DataFrame.iat : Fast integer location scalar accessor.
DataFrame.loc : Purely label-location based indexer for selection by label.
Series.iloc : Purely integer-location based indexing for
selection by position.
Examples
--------
>>> mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4},
... {'a': 100, 'b': 200, 'c': 300, 'd': 400},
... {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }]
>>> df = pd.DataFrame(mydict)
>>> df
a b c d
0 1 2 3 4
1 100 200 300 400
2 1000 2000 3000 4000
**Indexing just the rows**
With a scalar integer.
>>> type(df.iloc[0])
<class 'pandas.core.series.Series'>
>>> df.iloc[0]
a 1
b 2
c 3
d 4
Name: 0, dtype: int64
With a list of integers.
>>> df.iloc[[0]]
a b c d
0 1 2 3 4
>>> type(df.iloc[[0]])
<class 'pandas.core.frame.DataFrame'>
>>> df.iloc[[0, 1]]
a b c d
0 1 2 3 4
1 100 200 300 400
With a `slice` object.
>>> df.iloc[:3]
a b c d
0 1 2 3 4
1 100 200 300 400
2 1000 2000 3000 4000
With a boolean mask the same length as the index.
>>> df.iloc[[True, False, True]]
a b c d
0 1 2 3 4
2 1000 2000 3000 4000
With a callable, useful in method chains. The `x` passed
to the ``lambda`` is the DataFrame being sliced. This selects
the rows whose index label even.
>>> df.iloc[lambda x: x.index % 2 == 0]
a b c d
0 1 2 3 4
2 1000 2000 3000 4000
**Indexing both axes**
You can mix the indexer types for the index and columns. Use ``:`` to
select the entire axis.
With scalar integers.
>>> df.iloc[0, 1]
2
With lists of integers.
>>> df.iloc[[0, 2], [1, 3]]
b d
0 2 4
2 2000 4000
With `slice` objects.
>>> df.iloc[1:3, 0:3]
a b c
1 100 200 300
2 1000 2000 3000
With a boolean array whose length matches the columns.
>>> df.iloc[:, [True, False, True, False]]
a c
0 1 3
1 100 300
2 1000 3000
With a callable function that expects the Series or DataFrame.
>>> df.iloc[:, lambda df: [0, 2]]
a c
0 1 3
1 100 300
2 1000 3000
"""
_valid_types = ("integer, integer slice (START point is INCLUDED, END "
"point is EXCLUDED), listlike of integers, boolean array")
_exception = IndexError
def _validate_key(self, key, axis):
if com.is_bool_indexer(key):
if hasattr(key, 'index') and isinstance(key.index, Index):
if key.index.inferred_type == 'integer':
raise NotImplementedError("iLocation based boolean "
"indexing on an integer type "
"is not available")
raise ValueError("iLocation based boolean indexing cannot use "
"an indexable as a mask")
return
if isinstance(key, slice):
return
elif is_integer(key):
self._validate_integer(key, axis)
elif isinstance(key, tuple):
# a tuple should already have been caught by this point
# so don't treat a tuple as a valid indexer
raise IndexingError('Too many indexers')
elif is_list_like_indexer(key):
# check that the key does not exceed the maximum size of the index
arr = np.array(key)
len_axis = len(self.obj._get_axis(axis))
if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis):
raise IndexError("positional indexers are out-of-bounds")
else:
raise ValueError("Can only index by location with "
"a [{types}]".format(types=self._valid_types))
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
def _is_scalar_access(self, key):
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
# b) provide a performant path
if not hasattr(key, '__len__'):
return False
if len(key) != self.ndim:
return False
for i, k in enumerate(key):
if not is_integer(k):
return False
ax = self.obj.axes[i]
if not ax.is_unique:
return False
return True
def _getitem_scalar(self, key):
# a fast-path to scalar access
# if not, raise
values = self.obj._get_value(*key, takeable=True)
return values
def _validate_integer(self, key, axis):
"""
Check that 'key' is a valid position in the desired axis.
Parameters
----------
key : int
Requested position
axis : int
Desired axis
Returns
-------
None
Raises
------
IndexError
If 'key' is not a valid position in axis 'axis'
"""
len_axis = len(self.obj._get_axis(axis))
if key >= len_axis or key < -len_axis:
raise IndexError("single positional indexer is out-of-bounds")
def _getitem_tuple(self, tup):
self._has_valid_tuple(tup)
try:
return self._getitem_lowerdim(tup)
except IndexingError:
pass
retval = self.obj
axis = 0
for i, key in enumerate(tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if com.is_null_slice(key):
axis += 1
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=axis)
# if the dim was reduced, then pass a lower-dim the next time
if retval.ndim < self.ndim:
axis -= 1
# try to get for the next axis
axis += 1
return retval
def _get_slice_axis(self, slice_obj, axis=None):
if axis is None:
axis = self.axis or 0
obj = self.obj
if not need_slice(slice_obj):
return obj.copy(deep=False)
slice_obj = self._convert_slice_indexer(slice_obj, axis)
if isinstance(slice_obj, slice):
return self._slice(slice_obj, axis=axis, kind='iloc')
else:
return self.obj._take(slice_obj, axis=axis)
def _get_list_axis(self, key, axis=None):
"""
Return Series values by list or array of integers
Parameters
----------
key : list-like positional indexer
axis : int (can only be zero)
Returns
-------
Series object
"""
if axis is None:
axis = self.axis or 0
try:
return self.obj._take(key, axis=axis)
except IndexError:
# re-raise with different error message
raise IndexError("positional indexers are out-of-bounds")
def _getitem_axis(self, key, axis=None):
if axis is None:
axis = self.axis or 0
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
if isinstance(key, list):
key = np.asarray(key)
if com.is_bool_indexer(key):
self._validate_key(key, axis)
return self._getbool_axis(key, axis=axis)
# a list of integers
elif is_list_like_indexer(key):
return self._get_list_axis(key, axis=axis)
# a single integer
else:
if not is_integer(key):
raise TypeError("Cannot index by location index with a "
"non-integer key")
# validate the location
self._validate_integer(key, axis)
return self._get_loc(key, axis=axis)
def _convert_to_indexer(self, obj, axis=None, is_setter=False):
""" much simpler as we only have to deal with our valid types """
if axis is None:
axis = self.axis or 0
# make need to convert a float key
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
elif is_float(obj):
return self._convert_scalar_indexer(obj, axis)
try:
self._validate_key(obj, axis)
return obj
except ValueError:
raise ValueError("Can only index by location with "
"a [{types}]".format(types=self._valid_types))
class _ScalarAccessIndexer(_NDFrameIndexer):
""" access scalars quickly """
def _convert_key(self, key, is_setter=False):
return list(key)
def __getitem__(self, key):
if not isinstance(key, tuple):
# we could have a convertible item here (e.g. Timestamp)
if not is_list_like_indexer(key):
key = tuple([key])
else:
raise ValueError('Invalid call for scalar access (getting)!')
key = self._convert_key(key)
return self.obj._get_value(*key, takeable=self._takeable)
def __setitem__(self, key, value):
if isinstance(key, tuple):
key = tuple(com.apply_if_callable(x, self.obj)
for x in key)
else:
# scalar callable may return tuple
key = com.apply_if_callable(key, self.obj)
if not isinstance(key, tuple):
key = self._tuplify(key)
if len(key) != self.obj.ndim:
raise ValueError('Not enough indexers for scalar access '
'(setting)!')
key = list(self._convert_key(key, is_setter=True))
key.append(value)
self.obj._set_value(*key, takeable=self._takeable)
class _AtIndexer(_ScalarAccessIndexer):
"""
Access a single value for a row/column label pair.
Similar to ``loc``, in that both provide label-based lookups. Use
``at`` if you only need to get or set a single value in a DataFrame
or Series.
See Also
--------
DataFrame.iat : Access a single value for a row/column pair by integer
position
DataFrame.loc : Access a group of rows and columns by label(s)
Series.at : Access a single value using a label
Examples
--------
>>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]],
... index=[4, 5, 6], columns=['A', 'B', 'C'])
>>> df
A B C
4 0 2 3
5 0 4 1
6 10 20 30
Get value at specified row/column pair
>>> df.at[4, 'B']
2
Set value at specified row/column pair
>>> df.at[4, 'B'] = 10
>>> df.at[4, 'B']
10
Get value within a Series
>>> df.loc[5].at['B']
4
Raises
------
KeyError
When label does not exist in DataFrame
"""
_takeable = False
def _convert_key(self, key, is_setter=False):
""" require they keys to be the same type as the index (so we don't
fallback)
"""
# allow arbitrary setting
if is_setter:
return list(key)
for ax, i in zip(self.obj.axes, key):
if ax.is_integer():
if not is_integer(i):
raise ValueError("At based indexing on an integer index "
"can only have integer indexers")
else:
if is_integer(i) and not ax.holds_integer():
raise ValueError("At based indexing on an non-integer "
"index can only have non-integer "
"indexers")
return key
class _iAtIndexer(_ScalarAccessIndexer):
"""
Access a single value for a row/column pair by integer position.
Similar to ``iloc``, in that both provide integer-based lookups. Use
``iat`` if you only need to get or set a single value in a DataFrame
or Series.
See Also
--------
DataFrame.at : Access a single value for a row/column label pair
DataFrame.loc : Access a group of rows and columns by label(s)
DataFrame.iloc : Access a group of rows and columns by integer position(s)
Examples
--------
>>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]],
... columns=['A', 'B', 'C'])
>>> df
A B C
0 0 2 3
1 0 4 1
2 10 20 30
Get value at specified row/column pair
>>> df.iat[1, 2]
1
Set value at specified row/column pair
>>> df.iat[1, 2] = 10
>>> df.iat[1, 2]
10
Get value within a series
>>> df.loc[0].iat[1]
2
Raises
------
IndexError
When integer position is out of bounds
"""
_takeable = True
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
def _convert_key(self, key, is_setter=False):
""" require integer args (and convert to label arguments) """
for a, i in zip(self.obj.axes, key):
if not is_integer(i):
raise ValueError("iAt based indexing can only have integer "
"indexers")
return key
def length_of_indexer(indexer, target=None):
"""return the length of a single non-tuple indexer which could be a slice
"""
if target is not None and isinstance(indexer, slice):
target_len = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += target_len
if stop is None or stop > target_len:
stop = target_len
elif stop < 0:
stop += target_len
if step is None:
step = 1
elif step < 0:
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, Index, np.ndarray, list)):
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
def convert_to_index_sliceable(obj, key):
"""if we are index sliceable, then return my slicer, otherwise return None
"""
idx = obj.index
if isinstance(key, slice):
return idx._convert_slice_indexer(key, kind='getitem')
elif isinstance(key, compat.string_types):
# we are an actual column
if obj._data.items.contains(key):
return None
# We might have a datetimelike string that we can translate to a
# slice here via partial string indexing
if idx.is_all_dates:
try:
return idx._get_string_slice(key)
except (KeyError, ValueError, NotImplementedError):
return None
return None
def check_bool_indexer(ax, key):
# boolean indexing, need to check that the data are aligned, otherwise
# disallowed
# this function assumes that is_bool_indexer(key) == True
result = key
if isinstance(key, ABCSeries) and not key.index.equals(ax):
result = result.reindex(ax)
mask = isna(result._values)
if mask.any():
raise IndexingError('Unalignable boolean Series provided as '
'indexer (index of the boolean Series and of '
'the indexed object do not match')
result = result.astype(bool)._values
elif is_sparse(result):
result = result.to_dense()
result = np.asarray(result, dtype=bool)
else:
# is_bool_indexer has already checked for nulls in the case of an
# object array key, so no check needed here
result = np.asarray(result, dtype=bool)
return result
def check_setitem_lengths(indexer, value, values):
"""Validate that value and indexer are the same length.
An special-case is allowed for when the indexer is a boolean array
and the number of true values equals the length of ``value``. In
this case, no exception is raised.
Parameters
----------
indexer : sequence
The key for the setitem
value : array-like
The value for the setitem
values : array-like
The values being set into
Returns
-------
None
Raises
------
ValueError
When the indexer is an ndarray or list and the lengths don't
match.
"""
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and len(values):
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
def convert_missing_indexer(indexer):
""" reverse convert a missing indexer, which is a dict
return the scalar indexer and a boolean indicating if we converted
"""
if isinstance(indexer, dict):
# a missing key (but not a tuple indexer)
indexer = indexer['key']
if isinstance(indexer, bool):
raise KeyError("cannot use a single bool to index into setitem")
return indexer, True
return indexer, False
def convert_from_missing_indexer_tuple(indexer, axes):
""" create a filtered indexer that doesn't have any missing indexers """
def get_indexer(_i, _idx):
return (axes[_i].get_loc(_idx['key']) if isinstance(_idx, dict) else
_idx)
return tuple(get_indexer(_i, _idx) for _i, _idx in enumerate(indexer))
def maybe_convert_indices(indices, n):
"""
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
The array of indices that we are to convert.
n : int
The number of elements in the array that we are indexing.
Returns
-------
valid_indices : array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError : one of the converted indices either exceeded the number
of elements (specified by `n`) OR was still negative.
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If list is empty, np.array will return float and cause indexing
# errors.
return np.empty(0, dtype=np.intp)
mask = indices < 0
if mask.any():
indices = indices.copy()
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
def validate_indices(indices, n):
"""Perform bounds-checking for an indexer.
-1 is allowed for indicating missing values.
Parameters
----------
indices : ndarray
n : int
length of the array being indexed
Raises
------
ValueError
Examples
--------
>>> validate_indices([1, 2], 3)
# OK
>>> validate_indices([1, -2], 3)
ValueError
>>> validate_indices([1, 2, 3], 3)
IndexError
>>> validate_indices([-1, -1], 0)
# OK
>>> validate_indices([0, 1], 0)
IndexError
"""
if len(indices):
min_idx = indices.min()
if min_idx < -1:
msg = ("'indices' contains values less than allowed ({} < {})"
.format(min_idx, -1))
raise ValueError(msg)
max_idx = indices.max()
if max_idx >= n:
raise IndexError("indices are out-of-bounds")
def maybe_convert_ix(*args):
"""
We likely want to take the cross-product
"""
ixify = True
for arg in args:
if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)):
ixify = False
if ixify:
return np.ix_(*args)
else:
return args
def is_nested_tuple(tup, labels):
# check for a compatible nested tuple and multiindexes among the axes
if not isinstance(tup, tuple):
return False
for i, k in enumerate(tup):
if is_list_like(k) or isinstance(k, slice):
return isinstance(labels, MultiIndex)
return False
def is_list_like_indexer(key):
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and
type(key) is not tuple)
def is_label_like(key):
# select a label or row
return not isinstance(key, slice) and not is_list_like_indexer(key)
def need_slice(obj):
return (obj.start is not None or obj.stop is not None or
(obj.step is not None and obj.step != 1))
def maybe_droplevels(index, key):
# drop levels
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index.droplevel(0)
except ValueError:
# we have dropped too much, so back out
return original_index
else:
try:
index = index.droplevel(0)
except ValueError:
pass
return index
def _non_reducing_slice(slice_):
"""
Ensurse that a slice doesn't reduce to a Series or Scalar.
Any user-paseed `subset` should have this called on it
to make sure we're always working with DataFrames.
"""
# default to column slice, like DataFrame
# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
kinds = tuple(list(compat.string_types) + [ABCSeries, np.ndarray, Index,
list])
if isinstance(slice_, kinds):
slice_ = IndexSlice[:, slice_]
def pred(part):
# true when slice does *not* reduce
return isinstance(part, slice) or is_list_like(part)
if not is_list_like(slice_):
if not isinstance(slice_, slice):
# a 1-d slice, like df.loc[1]
slice_ = [[slice_]]
else:
# slice(a, b, c)
slice_ = [slice_] # to tuplize later
else:
slice_ = [part if pred(part) else [part] for part in slice_]
return tuple(slice_)
def _maybe_numeric_slice(df, slice_, include_bool=False):
"""
want nice defaults for background_gradient that don't break
with non-numeric data. But if slice_ is passed go with that.
"""
if slice_ is None:
dtypes = [np.number]
if include_bool:
dtypes.append(bool)
slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns]
return slice_
| bsd-3-clause |
jlramalheira/sudoku | tests/io_tests.py | 1 | 1224 | import unittest
import os.path
import math
import sudoku.io
import networkx as nx
import matplotlib.pyplot as plt
class IoTests(unittest.TestCase):
def test_read(self):
filepath = os.path.join(
os.path.dirname(__file__),
'../rsc/9/sample-1.sdk')
graph = sudoku.io.read(filepath)
order = int(math.pow(len(graph.node), 1 / 4))
node_degree = (3 * (order ** 2)) - (2 * (order - 1)) - 3
nodes_degree = [len(graph.neighbors(n)) for n in graph.node]
self.assertTrue(
expr=all([d == nodes_degree[0] for d in nodes_degree]),
msg='All vertexes must have the same degree.')
self.assertTrue(
expr=nodes_degree[0] == node_degree,
msg='The vertex degree must be')
def test_read_draw(self):
filepath = os.path.join(
os.path.dirname(__file__),
'../rsc/9/sample-1.sdk')
nx.draw_circular(graph)
graph = sudoku.io.read(filepath)
plt.show()
def test_write(self):
filepath = os.path.join(
os.path.dirname(__file__),
'../rsc/9/sample-1.sdk')
graph = sudoku.io.read(filepath)
sudoku.io.print(graph)
| mit |
ppries/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 17 | 9144 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
| apache-2.0 |
djgagne/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 68 | 43439 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
ahoyosid/scikit-learn | sklearn/preprocessing/tests/test_label.py | 48 | 18419 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
@ignore_warnings
def test_label_binarizer_column_y():
# first for binary classification vs multi-label with 1 possible class
# lists are multi-label, array is multi-class :-/
inp_list = [[1], [2], [1]]
inp_array = np.array(inp_list)
multilabel_indicator = np.array([[1, 0], [0, 1], [1, 0]])
binaryclass_array = np.array([[0], [1], [0]])
lb_1 = LabelBinarizer()
out_1 = lb_1.fit_transform(inp_list)
lb_2 = LabelBinarizer()
out_2 = lb_2.fit_transform(inp_array)
assert_array_equal(out_1, multilabel_indicator)
assert_array_equal(out_2, binaryclass_array)
# second for multiclass classification vs multi-label with multiple
# classes
inp_list = [[1], [2], [1], [3]]
inp_array = np.array(inp_list)
# the indicator matrix output is the same in this case
indicator = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]])
lb_1 = LabelBinarizer()
out_1 = lb_1.fit_transform(inp_list)
lb_2 = LabelBinarizer()
out_2 = lb_2.fit_transform(inp_array)
assert_array_equal(out_1, out_2)
assert_array_equal(out_2, indicator)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
njustesen/coep-starcraft | broodwar_strategy_evolver/evolution/evolution_timing.py | 1 | 2950 | import numpy as np
import tqdm
from broodwar_strategy_evolver.starcraft.unit_repository import UnitRepository
from broodwar_strategy_evolver.starcraft.starcraft import Race, Type
from broodwar_strategy_evolver.starcraft.forward_model import ForwardModel, GameState
from broodwar_strategy_evolver.evolution.evolution import Evolution
from broodwar_strategy_evolver.evolution.evolution import CrossoverMethod
import matplotlib.pyplot as plt
import time
unit_repo = UnitRepository()
own_units = np.zeros(len(unit_repo.units))
own_units[unit_repo.get_by_name("Probe").id] = 18
own_units[unit_repo.get_by_name("Assimilator").id] = 1
own_units[unit_repo.get_by_name("Gateway").id] = 1
own_units[unit_repo.get_by_name("Cybernetics Core").id] = 1
own_units[unit_repo.get_by_name("Nexus").id] = 1
own_units[unit_repo.get_by_name("Pylon").id] = 3
'''
opp_units = np.zeros(len(unit_repo.units))
opp_units[unit_repo.get_by_name("Nexus").id] = 1
opp_units[unit_repo.get_by_name("Probe").id] = 4
opp_units[unit_repo.get_by_name("Zealot").id] = 0
opp_units[unit_repo.get_by_name("Dragoon").id] = 12
'''
opp_units = np.zeros(len(unit_repo.units))
opp_units[unit_repo.get_by_name("Drone").id] = 18
opp_units[unit_repo.get_by_name("Zergling").id] = 0
opp_units[unit_repo.get_by_name("Overlord").id] = 3
opp_units[unit_repo.get_by_name("Spawning Pool").id] = 1
opp_units[unit_repo.get_by_name("Hatchery").id] = 1
own_techs = np.zeros(len(unit_repo.techs))
own_upgrades = np.zeros(len(unit_repo.upgrades))
gamestate = GameState(own_race=Race.PROTOSS,
opp_race=Race.ZERG,
own_units=own_units,
opp_units=opp_units,
own_techs=own_techs,
own_upgrades=own_upgrades,
own_units_under_construction=[],
own_techs_under_construction=[],
own_upgrades_under_construction=[])
gamestate.frame = 24 * 60 * 4
minutes = 8
horizon = int(24*60*minutes)
print("Horizon=" + str(horizon))
evolution_a = None
tests = 50
generations = 100
timing = []
for test in tqdm.trange(tests):
evolution_a = Evolution(gamestate=gamestate,
pop_size=64,
horizon=horizon,
crossover_method=CrossoverMethod.TWO_POINT,
bellman=2,
add_mutate_prob=0.5,
remove_mutate_prob=0.5,
swap_mutate_prob=0.5,
clone_mutate_prob=0.5,
chain_mutate_prob=0.0,
survival_rate=0.25)
for gen in range(generations):
start = millis = int(round(time.time() * 1000))
evolution_a.update()
t = int(round(time.time() * 1000)) - start
timing.append(t)
print("Avg: + " + str(np.average(timing)) + ", +/- " + str(np.std(timing)))
| gpl-3.0 |
alsrgv/tensorflow | tensorflow/contrib/metrics/python/ops/metric_ops_test.py | 5 | 282544 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metric_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
NAN = float('nan')
metrics = metrics_lib
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_sparse_value(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64), np.array(values, np.int64),
np.array(shape, np.int64))
def _binary_2d_label_to_sparse(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_2d_label_to_sparse_value(labels))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64), np.array(values, np.int64),
np.array(shape, np.int64))
def _binary_3d_label_to_sparse(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_3d_label_to_sparse_value(labels))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
def _assert_metric_variables(test_case, expected):
test_case.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
test_case.assertEquals(
set(expected),
set(v.name for v in ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
class StreamingMeanTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean(array_ops.ones([4, 3]))
_assert_metric_variables(self, ('mean/count:0', 'mean/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
class StreamingMeanTensorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_tensor(array_ops.ones([4, 3]))
_assert_metric_variables(self,
('mean/total_tensor:0', 'mean/count_tensor:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
class StreamingAccuracyTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
name='my_accuracy')
_assert_metric_variables(self,
('my_accuracy/count:0', 'my_accuracy/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 3))
weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels, weights)
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=2)
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testMultipleUpdates(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.cached_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaicShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
1) # shape 3, 1
with self.cached_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = array_ops.placeholder(
dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.cached_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights_placeholder)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testMultipleUpdatesWithWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
class StreamingTruePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('true_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(1, tp_update_op.eval())
self.assertEqual(1, tp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels, weights=37.0)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(37.0, tp_update_op.eval())
self.assertEqual(37.0, tp.eval())
class StreamingFalseNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('false_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(2, fn_update_op.eval())
self.assertEqual(2, fn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels, weights=((3.0,), (5.0,), (7.0,)))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(8.0, fn_update_op.eval())
self.assertEqual(8.0, fn.eval())
class StreamingFalsePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('false_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fp, fp_update_op = metrics.streaming_false_positives(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(4, fp_update_op.eval())
self.assertEqual(4, fp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
fp, fp_update_op = metrics.streaming_false_positives(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0), (19.0, 23.0,
29.0, 31.0)))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(42.0, fp_update_op.eval())
self.assertEqual(42.0, fp.eval())
class StreamingTrueNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('true_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(5, tn_update_op.eval())
self.assertEqual(5, tn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels, weights=((0.0, 2.0, 3.0, 5.0),))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(15.0, tn_update_op.eval())
self.assertEqual(15.0, tn.eval())
class StreamingTruePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tp.eval())
self.assertAllEqual((3, 1, 0), tp_update_op.eval())
self.assertAllEqual((3, 1, 0), tp.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, weights=37.0, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tp.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp.eval())
class StreamingFalseNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(
0.15,
0.5,
0.85,
))
_assert_metric_variables(self, ('false_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fn.eval())
self.assertAllEqual((0, 2, 3), fn_update_op.eval())
self.assertAllEqual((0, 2, 3), fn.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions,
labels,
weights=((3.0,), (5.0,), (7.0,)),
thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fn.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn.eval())
class StreamingFalsePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('false_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fp.eval())
self.assertAllEqual((7, 4, 2), fp_update_op.eval())
self.assertAllEqual((7, 4, 2), fp.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0), (19.0, 23.0,
29.0, 31.0)),
thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fp.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp.eval())
class StreamingTrueNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tn.eval())
self.assertAllEqual((2, 5, 7), tn_update_op.eval())
self.assertAllEqual((2, 5, 7), tn.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions,
labels,
weights=((0.0, 2.0, 3.0, 5.0),),
thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tn.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn.eval())
class StreamingPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
class StreamingRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('recall/false_negatives/count:0', 'recall/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
class StreamingFPRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_positive_rate/false_positives/count:0',
'false_positive_rate/true_negatives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertEqual(initial_fpr, fpr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fpr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 2.0 + 5.0
weighted_f = (2.0 + 2.0) + (5.0 + 5.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 1.0 + 3.0
weighted_f = (1.0 + 4.0) + (2.0 + 3.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fpr.eval())
def testZeroFalsePositivesAndTrueNegativesGivesZeroFPR(self):
predictions = array_ops.ones((1, 4))
labels = array_ops.ones((1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
class StreamingFNRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_negative_rate/false_negatives/count:0',
'false_negative_rate/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertEqual(initial_fnr, fnr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fnr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 4.0
weighted_t = (2.0 + 3.0) + (1.0 + 4.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fnr.eval())
def testZeroFalseNegativesAndTruePositivesGivesZeroFNR(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
class StreamingCurvePointsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metric_ops.streaming_curve_points(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('curve_points/true_positives:0', 'curve_points/false_negatives:0',
'curve_points/false_positives:0', 'curve_points/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
points, _ = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [points])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def _testValueTensorIsIdempotent(self, curve):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(high=2, size=(10, 3)), dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels, predictions=predictions, curve=curve)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
initial_points = points.eval()
sess.run(update_op)
self.assertAllClose(initial_points, points.eval())
def testValueTensorIsIdempotentROC(self):
self._testValueTensorIsIdempotent(curve='ROC')
def testValueTensorIsIdempotentPR(self):
self._testValueTensorIsIdempotent(curve='PR')
def _testCase(self, labels, predictions, curve, expected_points):
with self.cached_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels=labels_tensor,
predictions=predictions_tensor,
num_thresholds=3,
curve=curve)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAllClose(expected_points, points.eval())
def testEdgeCasesROC(self):
self._testCase([[1]], [[1]], 'ROC', [[0, 1], [0, 1], [0, 0]])
self._testCase([[0]], [[0]], 'ROC', [[1, 1], [0, 1], [0, 1]])
self._testCase([[0]], [[1]], 'ROC', [[1, 1], [1, 1], [0, 1]])
self._testCase([[1]], [[0]], 'ROC', [[0, 1], [0, 0], [0, 0]])
def testManyValuesROC(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'ROC',
[[1.0, 1.0], [0.0, 0.75], [0.0, 0.0]])
def testEdgeCasesPR(self):
self._testCase([[1]], [[1]], 'PR', [[1, 1], [1, 1], [0, 1]])
self._testCase([[0]], [[0]], 'PR', [[1, 0], [1, 1], [1, 1]])
self._testCase([[0]], [[1]], 'PR', [[1, 0], [1, 0], [1, 1]])
self._testCase([[1]], [[0]], 'PR', [[1, 1], [0, 1], [0, 1]])
def testManyValuesPR(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'PR',
[[1.0, 4.0 / 6.0], [0.75, 1.0], [0.0, 1.0]])
def _np_auc(predictions, labels, weights=None):
"""Computes the AUC explicitly using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
class StreamingAUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_auc(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('auc/true_positives:0', 'auc/false_negatives:0',
'auc/false_positives:0', 'auc/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_auc(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testPredictionsOutOfRange(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, -1, 1, -1], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
_, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertRaises(errors_impl.InvalidArgumentError, update_op.eval)
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_auc(predictions, labels, curve=curve)
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([2], shape=(1, 1))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.cached_session() as sess:
predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
labels = array_ops.ones([4])
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None, np.ones(num_samples),
np.random.exponential(scale=1.0, size=num_samples)):
expected_auc = _np_auc(predictions, labels, weights)
with self.cached_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (
_enqueue_as_batches(weights, enqueue_ops)
if weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.streaming_auc(
tf_predictions,
tf_labels,
curve='ROC',
num_thresholds=500,
weights=tf_weights)
sess.run(variables.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class StreamingDynamicAUCTest(test.TestCase):
def setUp(self):
super(StreamingDynamicAUCTest, self).setUp()
np.random.seed(1)
ops.reset_default_graph()
def testUnknownCurve(self):
with self.assertRaisesRegexp(
ValueError, 'curve must be either ROC or PR, TEST_CURVE unknown'):
metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
curve='TEST_CURVE')
def testVars(self):
metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)), predictions=array_ops.ones((10, 1)))
_assert_metric_variables(self, [
'dynamic_auc/concat_labels/array:0', 'dynamic_auc/concat_labels/size:0',
'dynamic_auc/concat_preds/array:0', 'dynamic_auc/concat_preds/size:0'
])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
auc, _ = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [auc])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in xrange(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in xrange(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testAllLabelsOnes(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([1, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testAllLabelsZeros(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([0, 0, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testNonZeroOnePredictions(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2.5, -2.5, 2.5, -2.5], dtype=dtypes_lib.float32)
labels = constant_op.constant([1, 0, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(auc.eval(), 1.0)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0, 1, 0])
labels = constant_op.constant([0, 1, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.5, auc.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, auc.eval())
def testExceptionOnIncompatibleShapes(self):
with self.cached_session() as sess:
predictions = array_ops.ones([5])
labels = array_ops.zeros([6])
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
def testExceptionOnGreaterThanOneLabel(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([2, 1, 0])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is >1.*'):
sess.run(update_op)
def testExceptionOnNegativeLabel(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([1, 0, -1])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is <0.*'):
sess.run(update_op)
def testWithMultipleUpdates(self):
batch_size = 10
num_batches = 100
labels = np.array([])
predictions = np.array([])
tf_labels = variables.VariableV1(
array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.VariableV1(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(tf_labels, tf_predictions)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.normal(0.0, scale=0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
sess.run(tf_labels.assign(new_labels))
sess.run(tf_predictions.assign(new_predictions))
sess.run(update_op)
expected_auc = _np_auc(predictions, labels)
self.assertAlmostEqual(expected_auc, auc.eval())
def testAUCPRReverseIncreasingPredictions(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-5)
def testAUCPRJumbledPredictions(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81], dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-6)
def testAUCPRPredictionsLessThanHalf(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-5)
def testWithWeights(self):
batch_size = 10
num_batches = 100
labels = np.array([])
predictions = np.array([])
weights = np.array([])
tf_labels = variables.VariableV1(
array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.VariableV1(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
tf_weights = variables.VariableV1(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(tf_labels,
tf_predictions,
weights=tf_weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.uniform(-0.2, 0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
new_weights = np.random.uniform(0.0, 3.0, size=batch_size)
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
weights = np.concatenate([weights, new_weights])
sess.run([tf_labels.assign(new_labels),
tf_predictions.assign(new_predictions),
tf_weights.assign(new_weights)])
sess.run(update_op)
expected_auc = _np_auc(predictions, labels, weights)
self.assertAlmostEqual(expected_auc, auc.eval())
class AucWithConfidenceIntervalsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def _testResultsEqual(self, expected_dict, gotten_result):
"""Tests that 2 results (dicts) represent the same data.
Args:
expected_dict: A dictionary with keys that are the names of properties
of PrecisionRecallData and whose values are lists of floats.
gotten_result: A AucWithConfidenceIntervalData object.
"""
gotten_dict = {k: t.eval() for k, t in gotten_result._asdict().items()}
self.assertItemsEqual(
list(expected_dict.keys()), list(gotten_dict.keys()))
for key, expected_values in expected_dict.items():
self.assertAllClose(expected_values, gotten_dict[key])
def _testCase(self, predictions, labels, expected_result, weights=None):
"""Performs a test given a certain scenario of labels, predictions, weights.
Args:
predictions: The predictions tensor. Of type float32.
labels: The labels tensor. Of type bool.
expected_result: The expected result (dict) that maps to tensors.
weights: Optional weights tensor.
"""
with self.cached_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.int64)
weights_tensor = None
if weights:
weights_tensor = constant_op.constant(weights, dtype=dtypes_lib.float32)
gotten_result, update_op = (
metric_ops.auc_with_confidence_intervals(
labels=labels_tensor,
predictions=predictions_tensor,
weights=weights_tensor))
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self._testResultsEqual(expected_result, gotten_result)
def testAucAllCorrect(self):
self._testCase(
predictions=[0., 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],
labels=[0, 0, 1, 0, 0, 1, 0, 1, 1, 0],
expected_result={
'auc': 0.66666667,
'lower': 0.27826795,
'upper': 0.91208512,
})
def testAucUnorderedInput(self):
self._testCase(
predictions=[1.0, 0.6, 0., 0.3, 0.4, 0.2, 0.5, 0.3, 0.6, 0.8],
labels=[0, 1, 0, 1, 0, 0, 1, 0, 0, 1],
expected_result={
'auc': 0.66666667,
'lower': 0.27826795,
'upper': 0.91208512,
})
def testAucWithWeights(self):
self._testCase(
predictions=[0., 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],
labels=[0, 0, 1, 0, 0, 1, 0, 1, 1, 0],
weights=[0.5, 0.6, 1.2, 1.5, 2.0, 2.0, 1.5, 1.2, 0.6, 0.5],
expected_result={
'auc': 0.65151515,
'lower': 0.28918604,
'upper': 0.89573906,
})
def testAucEqualOne(self):
self._testCase(
predictions=[0, 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],
labels=[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
expected_result={
'auc': 1.0,
'lower': 1.0,
'upper': 1.0,
})
def testAucEqualZero(self):
self._testCase(
predictions=[0, 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],
labels=[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
expected_result={
'auc': 0.0,
'lower': 0.0,
'upper': 0.0,
})
def testNonZeroOnePredictions(self):
self._testCase(
predictions=[2.5, -2.5, .5, -.5, 1],
labels=[1, 0, 1, 0, 0],
expected_result={
'auc': 0.83333333,
'lower': 0.15229267,
'upper': 0.99286517,
})
def testAllLabelsOnes(self):
self._testCase(
predictions=[1., 1., 1., 1., 1.],
labels=[1, 1, 1, 1, 1],
expected_result={
'auc': 0.,
'lower': 0.,
'upper': 0.,
})
def testAllLabelsZeros(self):
self._testCase(
predictions=[0., 0., 0., 0., 0.],
labels=[0, 0, 0, 0, 0],
expected_result={
'auc': 0.,
'lower': 0.,
'upper': 0.,
})
def testWeightSumLessThanOneAll(self):
self._testCase(
predictions=[1., 1., 0., 1., 0., 0.],
labels=[1, 1, 1, 0, 0, 0],
weights=[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
expected_result={
'auc': 0.,
'lower': 0.,
'upper': 0.,
})
def testWithMultipleUpdates(self):
batch_size = 50
num_batches = 100
labels = np.array([])
predictions = np.array([])
tf_labels = variables.VariableV1(
array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.VariableV1(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.auc_with_confidence_intervals(tf_labels,
tf_predictions)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.normal(0.0, scale=0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
sess.run(tf_labels.assign(new_labels))
sess.run(tf_predictions.assign(new_predictions))
sess.run(update_op)
expected_auc = _np_auc(predictions, labels)
self.assertAllClose(expected_auc, auc.auc.eval())
def testExceptionOnFloatLabels(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0, 1, 0], dtypes_lib.float32)
labels = constant_op.constant([0.7, 0, 1, 0, 1])
_, update_op = metrics.auc_with_confidence_intervals(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertRaises(TypeError, sess.run(update_op))
def testExceptionOnGreaterThanOneLabel(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0, 1, 0], dtypes_lib.float32)
labels = constant_op.constant([2, 1, 0, 1, 0])
_, update_op = metrics.auc_with_confidence_intervals(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is >1.*'):
sess.run(update_op)
def testExceptionOnNegativeLabel(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0, 1, 0], dtypes_lib.float32)
labels = constant_op.constant([1, 0, -1, 1, 0])
_, update_op = metrics.auc_with_confidence_intervals(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is <0.*'):
sess.run(update_op)
class StreamingPrecisionRecallAtEqualThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def _testResultsEqual(self, expected_dict, gotten_result, eps=None):
"""Tests that 2 results (dicts) represent the same data.
Args:
expected_dict: A dictionary with keys that are the names of properties
of PrecisionRecallData and whose values are lists of floats.
gotten_result: A PrecisionRecallData object.
eps: Epsilon value to use for testing output values. If unspecified, use
default from assertAllClose.
"""
gotten_dict = {k: t.eval() for k, t in gotten_result._asdict().items()}
self.assertItemsEqual(list(expected_dict.keys()), list(gotten_dict.keys()))
for key, expected_values in expected_dict.items():
if eps is not None:
self.assertAllClose(expected_values, gotten_dict[key], atol=eps)
else:
self.assertAllClose(expected_values, gotten_dict[key])
def testVars(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32))
_assert_metric_variables(
self, ('precision_recall_at_equal_thresholds/variables/tp_buckets:0',
'precision_recall_at_equal_thresholds/variables/fp_buckets:0'))
def testVarsWithName(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32),
name='foo')
_assert_metric_variables(
self, ('foo/variables/tp_buckets:0', 'foo/variables/fp_buckets:0'))
def testValuesAreIdempotent(self):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(size=(10, 3)) > 0.5, dtype=dtypes_lib.bool)
result, update_op = metric_ops.precision_recall_at_equal_thresholds(
labels=labels, predictions=predictions)
with self.cached_session() as sess:
# Run several updates.
sess.run(variables.local_variables_initializer())
for _ in range(3):
sess.run(update_op)
# Then verify idempotency.
initial_result = {
k: value.eval().tolist()
for k, value in result._asdict().items()
}
for _ in range(3):
self._testResultsEqual(initial_result, result)
def _testCase(self,
predictions,
labels,
expected_result,
dtype=dtypes_lib.float32,
eps=None,
weights=None):
"""Performs a test given a certain scenario of labels, predictions, weights.
Args:
predictions: The predictions tensor. Of type dtype.
labels: The labels tensor. Of type bool.
expected_result: The expected result (dict) that maps to tensors.
dtype: Data type to use for predictions and weights tensor. Default
is float32.
eps: Epsilon value to use for testing output values. If unspecified, use
default from assertAllClose.
weights: Optional weights tensor.
"""
with self.cached_session() as sess:
predictions_tensor = constant_op.constant(predictions, dtype=dtype)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.bool)
weights_tensor = None
if weights:
weights_tensor = constant_op.constant(weights, dtype=dtype)
gotten_result, update_op = (
metric_ops.precision_recall_at_equal_thresholds(
labels=labels_tensor,
predictions=predictions_tensor,
weights=weights_tensor,
num_thresholds=3))
self.assertEqual(gotten_result.tp.dtype, dtype)
self.assertEqual(gotten_result.fp.dtype, dtype)
self.assertEqual(gotten_result.tn.dtype, dtype)
self.assertEqual(gotten_result.fn.dtype, dtype)
self.assertEqual(gotten_result.precision.dtype, dtype)
self.assertEqual(gotten_result.recall.dtype, dtype)
self.assertEqual(gotten_result.thresholds.dtype, dtype)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self._testResultsEqual(expected_result, gotten_result, eps=eps)
def testAllTruePositives(self):
self._testCase(
[[1]], [[True]], {
'tp': [1, 1, 1],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [1.0, 1.0, 1.0],
'recall': [1.0, 1.0, 1.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllTrueNegatives(self):
self._testCase(
[[0]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 0, 0],
'tn': [0, 1, 1],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalsePositives(self):
self._testCase(
[[1]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 1, 1],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalseNegatives(self):
self._testCase(
[[0]], [[True]], {
'tp': [1, 0, 0],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 1, 1],
'precision': [1.0, 0.0, 0.0],
'recall': [1.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValues(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValuesWithWeights(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [1.5, 1.5, 0.0],
'fp': [2.5, 0.0, 0.0],
'tn': [0.0, 2.5, 2.5],
'fn': [0.0, 0.0, 1.5],
'precision': [0.375, 1.0, 0.0],
'recall': [1.0, 1.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
weights=[[0.0, 0.5, 2.0, 0.0, 0.5, 1.0]])
def testFloat64(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
dtype=dtypes_lib.float64)
def testFloat16(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
dtype=dtypes_lib.float16,
eps=1e-3)
class StreamingSpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7)
_assert_metric_variables(self,
('specificity_at_sensitivity/true_positives:0',
'specificity_at_sensitivity/false_negatives:0',
'specificity_at_sensitivity/false_positives:0',
'specificity_at_sensitivity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.8)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class StreamingSensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7)
_assert_metric_variables(self,
('sensitivity_at_specificity/true_positives:0',
'sensitivity_at_specificity/false_negatives:0',
'sensitivity_at_specificity/false_positives:0',
'sensitivity_at_specificity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.8)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, weights=weights, specificity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class StreamingPrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run([prec_op, rec_op])
# Then verify idempotency.
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testWeights1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testWeights2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testExtremeThresholds(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testZeroLabelsPredictions(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.cached_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.streaming_precision_at_thresholds(
tf_predictions, tf_labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
class StreamingFPRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_positive_rate_at_thresholds/false_positives:0',
'false_positive_rate_at_thresholds/true_negatives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fpr, _ = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fpr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fpr_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertAllClose(initial_fpr, fpr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.5, fpr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1, fpr.eval())
def testWeights1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testWeights2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testZeroLabelsPredictions(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0, fpr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fp = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 0:
fp += 1
else:
if labels[i] == 0:
tn += 1
epsilon = 1e-7
expected_fpr = fp / (epsilon + fp + tn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.cached_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fpr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fpr, fpr.eval(), 2)
class RecallAtPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7)
_assert_metric_variables(self, ('recall_at_precision/true_positives:0',
'recall_at_precision/false_negatives:0',
'recall_at_precision/false_positives:0',
'recall_at_precision/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertAlmostEqual(initial_recall, recall.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=1.0)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, recall.eval())
def testSomeCorrectHighPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3]
labels_values = [1, 1, 1, 1, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.8)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, recall.eval())
def testSomeCorrectLowPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3, .2, .1]
labels_values = [1, 1, 0, 0, 0, 0, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
def testWeighted(self):
predictions_values = [1, .9, .8, .7, .6]
labels_values = [1, 1, 0, 0, 1]
weights_values = [1, 1, 3, 4, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, weights=weights, precision=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
def _test_strict_mode(self, strict_mode, target_precision, expected_recall):
num_thresholds = 11
predictions_values = [.2, .3, .5, .6, .7, .8, .9, .9, .9, .1]
labels_values = [1, 1, 0, 0, 0, 0, 0, 0, 0, 1]
# Resulting thresholds and the corresponding precision and recall values at
# each threshold:
# Thresholds [0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9]
# precisions: [0.3 0.2 0.1 0 0 0 0 0 0]
# recalls: [1.0 0.7 0.3 0 0 0 0 0 0]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels,
predictions,
num_thresholds=num_thresholds,
precision=target_precision,
strict_mode=strict_mode)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_recall, sess.run(update_op))
self.assertAlmostEqual(expected_recall, recall.eval())
def testStrictMode_Off(self):
# strict_mode is turned off and return the recall at the threshold where the
# precision (0.3) is closest to target precision (0.9). The recall
# corresponding to the threshold is 1.0.
self._test_strict_mode(
strict_mode=False, target_precision=0.9, expected_recall=1.0)
def testStrictMode_OnAndFail(self):
# strict_mode is turned on and we fail to reach the target precision at any
# threshold.
# Target precision: 0.9
# Diff: [-0.6 -0.7 -0.8 -0.9 -0.9 -0.9 -0.9 -0.9 -0.9]
# Reciprocal: [-1.6 -1.4 -1.3 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1]
# Max index: 3 and corresponding precision is: 0 which is smaller than
# target precsion 0.9. As a result, the expected recall is 0.
self._test_strict_mode(
strict_mode=True, target_precision=0.9, expected_recall=.0)
def testStrictMode_OnAndSucceed(self):
# strict_mode is on and we can reach the target precision at certain
# threshold.
# Target precision: 0.2
# Diff: [0.1 0 -0.1 -0.2 -0.2 -0.2 -0.2 -0.2 -0.2]
# Reciprocal: [10 infty -10.0 -5.0 -5.0 -5.0 -5.0 -5.0 -5.0]
# Max index: 1 and corresponding precision is: 0.2 which is no smaller than
# target precsion 0.2. In this case, we return the recall at index 1, which
# is 2.0/3 (0.7).
self._test_strict_mode(
strict_mode=True, target_precision=0.2, expected_recall=2.0 / 3)
class PrecisionAtRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.precision_at_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
target_recall=0.7)
_assert_metric_variables(self,
('precision_at_recall/true_positives:0',
'precision_at_recall/false_negatives:0',
'precision_at_recall/false_positives:0',
'precision_at_recall/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.precision_at_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
target_recall=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.precision_at_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
target_recall=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertAlmostEqual(initial_precision, precision.eval(), places=5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, precision.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = 1.0 - predictions
label_prior = math_ops.reduce_mean(labels)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(sess.run(label_prior), sess.run(update_op))
self.assertEqual(sess.run(label_prior), precision.eval())
def testSomeCorrectHighRecall(self):
predictions_values = [0.1, 0.2, 0.5, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.8)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, precision.eval())
def testSomeCorrectLowRecall(self):
predictions_values = [0.1, 0.2, 0.7, 0.3, 0.0, 0.1, 0.45, 0.5, 0.6, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(2.0/3, sess.run(update_op))
self.assertAlmostEqual(2.0/3, precision.eval())
def testWeighted_multipleLabelDtypes(self):
for label_dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions_values = [
0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.22, 0.25, 0.31, 0.35]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = math_ops.cast(labels_values, dtype=label_dtype)
weights = constant_op.constant(weights_values)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.8, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(34.0/43, sess.run(update_op))
self.assertAlmostEqual(34.0/43, precision.eval())
class StreamingFNRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_negative_rate_at_thresholds/false_negatives:0',
'false_negative_rate_at_thresholds/true_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fnr, _ = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fnr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fnr_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertAllClose(initial_fnr, fnr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.5, fnr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(1, fnr.eval())
def testWeights1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testWeights2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval())
self.assertAlmostEqual(1.0, fnr_high.eval())
def testZeroLabelsPredictions(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0, fnr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fn = 0
tp = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
if labels[i] == 1:
fn += 1
epsilon = 1e-7
expected_fnr = fn / (epsilon + fn + tp)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.cached_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fnr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fnr, fnr.eval(), 2)
# TODO(ptucker): Remove when we remove `streaming_recall_at_k`.
# This op will be deprecated soon in favor of `streaming_sparse_recall_at_k`.
# Until then, this test validates that both ops yield the same results.
class StreamingRecallAtKTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
self._batch_size = 4
self._num_classes = 3
self._np_predictions = np.matrix(('0.1 0.2 0.7;'
'0.6 0.2 0.2;'
'0.0 0.9 0.1;'
'0.2 0.0 0.8'))
self._np_labels = [0, 0, 0, 0]
def testVars(self):
metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1)
_assert_metric_variables(self,
('recall_at_1/count:0', 'recall_at_1/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testSingleUpdateKIs1(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=1)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=1)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.25, sess.run(update_op))
self.assertEqual(0.25, recall.eval())
self.assertEqual(0.25, sess.run(sp_update_op))
self.assertEqual(0.25, sp_recall.eval())
def testSingleUpdateKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=2)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, recall.eval())
self.assertEqual(0.5, sess.run(sp_update_op))
self.assertEqual(0.5, sp_recall.eval())
def testSingleUpdateKIs3(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=3)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=3)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
def testSingleUpdateSomeMissingKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
weights = constant_op.constant(
[0, 1, 0, 1], shape=(self._batch_size,), dtype=dtypes_lib.float32)
recall, update_op = metrics.streaming_recall_at_k(
predictions, labels, k=2, weights=weights)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions,
array_ops.reshape(labels, (self._batch_size, 1)),
k=2,
weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
class StreamingSparsePrecisionTest(test.TestCase):
def _test_streaming_sparse_precision_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
labels=labels,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_k(self,
predictions,
labels,
k,
expected,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_k(
predictions, labels, k, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_top_k(
top_k_predictions, labels, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def test_top_k_rank_invalid(self):
with self.cached_session():
# top_k_predictions has rank < 2.
top_k_predictions = [9, 4, 6, 2, 0]
sp_labels = sparse_tensor.SparseTensorValue(
indices=np.array([[
0,
], [
1,
], [
2,
]], np.int64),
values=np.array([2, 7, 8], np.int64),
dense_shape=np.array([
10,
], np.int64))
with self.assertRaises(ValueError):
precision, _ = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int64),
labels=sp_labels)
variables.variables_initializer(variables.local_variables()).run()
precision.eval()
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
predictions_top_k_ex2 = (1, 3, 0, 6, 5)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=precision_ex2[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [
(ex1 + ex2) / 2 for ex1, ex2 in zip(precision_ex1, precision_ex2)
]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
predictions_top_k = (predictions_top_k_ex1[:k], predictions_top_k_ex2[:k])
self._test_streaming_sparse_precision_at_top_k(
predictions_top_k, labels, expected=streaming_precision[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
self._test_streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k], predictions_top_k_ex2[:k]),
labels,
expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
def test_average_precision_at_top_k_static_shape_check(self):
predictions_top_k = array_ops.placeholder(
shape=(2, None), dtype=dtypes_lib.int64)
labels = np.array(((1,), (2,)), dtype=np.int64)
# Fails due to non-static predictions_idx shape.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
predictions_top_k = (2, 1)
# Fails since rank of predictions_idx is less than one.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
predictions_top_k = ((2,), (1,))
# Valid static shape.
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2, class_id=3)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=3.0 / 10)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1,
3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=3.0 / 10)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=7.0 / 20)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
expected_precision = 0.5
with self.cached_session():
_, precision = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_precision, precision.eval())
class StreamingSparseRecallTest(test.TestCase):
def _test_streaming_sparse_recall_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_sparse_recall_at_top_k(self,
labels,
top_k_predictions,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metric_ops.sparse_recall_at_top_k(
labels=labels,
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in (sparse_labels, dense_labels):
for class_id in (-1, 0, 1, 4):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_one_label_at_k1_no_predictions(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 0 predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=2)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2)
def _test_one_label_at_k1_weighted(self, labels):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=3, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.6 / 0.9, weights=(0.6, 0.3))
def test_one_label_at_k1_weighted_sparse_labels(self):
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
self._test_one_label_at_k1_weighted(sparse_labels)
def test_one_label_at_k1_weighted_dense_labels(self):
dense_labels = np.array([[3], [2]], dtype=np.int64)
self._test_one_label_at_k1_weighted(dense_labels)
def test_three_labels_at_k5_nan(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 8: 1 label, no predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=8)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=8)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=3.0 / 6)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 6)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1,
3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=2.0 / 2,
class_id=2)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=1.0 / 1,
class_id=5)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=0.0 / 1,
class_id=7)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 8 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions, labels=sp_labels, k=5, expected=3.0 / 8)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=3.0 / 8)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_3d_no_predictions(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 labels, all correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=4.0 / 4, class_id=2)
# Class 5: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, class_id=7)
# All classes: 12 labels, 7 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=7.0 / 12)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=7.0 / 12)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 1, 0], [0, 0, 0, 1]]
expected_recall = 0.5
with self.cached_session():
_, recall = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_recall, recall.eval())
class StreamingMeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_absolute_error/count:0', 'mean_absolute_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_absolute_error(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_absolute_error(
predictions, labels, weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
class StreamingMeanRelativeErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_relative_error/count:0', 'mean_relative_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
normalizer = random_ops.random_normal((10, 3), seed=3)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels), np_labels))
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=array_ops.zeros_like(labels))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
class StreamingMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_squared_error/count:0', 'mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels, weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMetricsComputedConcurrently(self):
with self.cached_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.streaming_mean_squared_error(
predictions0, labels0, name='msd0')
mse1, update_op1 = metrics.streaming_mean_squared_error(
predictions1, labels1, name='msd1')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
mse0, mse1 = sess.run([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.streaming_mean_absolute_error(
predictions, labels)
mse, ms_update_op = metrics.streaming_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class StreamingRootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('root_mean_squared_error/count:0', 'root_mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
0.0, shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels, weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
class StreamingCovarianceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_covariance(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])))
_assert_metric_variables(self, (
'covariance/comoment:0',
'covariance/count:0',
'covariance/mean_label:0',
'covariance/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
cov, _ = metrics.streaming_covariance(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [cov])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_covariance(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
cov, update_op = metrics.streaming_covariance(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_cov = cov.eval()
for _ in range(10):
self.assertEqual(initial_cov, cov.eval())
def testSingleUpdateIdentical(self):
with self.cached_session() as sess:
predictions = math_ops.cast(math_ops.range(10), dtypes_lib.float32)
labels = math_ops.cast(math_ops.range(10), dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op), 5)
self.assertAlmostEqual(expected_cov, cov.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, update_op.eval())
self.assertAlmostEqual(expected_cov, cov.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 7], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 3, 1], shape=(1, 4), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(
predictions, labels, weights=weights)
expected_cov = np.cov(
[2, 4, 6, 8], [1, 3, 2, 7], fweights=[0, 1, 3, 1])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op))
self.assertAlmostEqual(expected_cov, cov.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(prev_expected_cov,
sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_cov,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_cov, sess.run(cov, feed_dict=feed_dict),
5)
prev_expected_cov = expected_cov
def testMultiUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(prev_expected_cov,
sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_cov,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_cov, sess.run(cov, feed_dict=feed_dict),
5)
prev_expected_cov = expected_cov
class StreamingPearsonRTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_pearson_correlation(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])))
_assert_metric_variables(self, (
'pearson_r/covariance/comoment:0',
'pearson_r/covariance/count:0',
'pearson_r/covariance/mean_label:0',
'pearson_r/covariance/mean_prediction:0',
'pearson_r/variance_labels/count:0',
'pearson_r/variance_labels/comoment:0',
'pearson_r/variance_labels/mean_label:0',
'pearson_r/variance_labels/mean_prediction:0',
'pearson_r/variance_predictions/comoment:0',
'pearson_r/variance_predictions/count:0',
'pearson_r/variance_predictions/mean_label:0',
'pearson_r/variance_predictions/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
pearson_r, _ = metrics.streaming_pearson_correlation(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [pearson_r])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_pearson_correlation(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_r = pearson_r.eval()
for _ in range(10):
self.assertEqual(initial_r, pearson_r.eval())
def testSingleUpdateIdentical(self):
with self.cached_session() as sess:
predictions = math_ops.cast(math_ops.range(10), dtypes_lib.float32)
labels = math_ops.cast(math_ops.range(10), dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
expected_r = np.corrcoef(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op), 5)
self.assertAlmostEqual(expected_r, pearson_r.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
expected_r = np.corrcoef([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, update_op.eval())
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
predictions = np.array([2, 4, 6, 8])
labels = np.array([1, 3, 2, 7])
weights = np.array([0, 1, 3, 1])
predictions_t = constant_op.constant(
predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels_t = constant_op.constant(
labels, shape=(1, 4), dtype=dtypes_lib.float32)
weights_t = constant_op.constant(
weights, shape=(1, 4), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
cmat = np.cov(predictions, labels, fweights=weights)
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op))
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(prev_expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
expected_r = np.corrcoef(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_r,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(prev_expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
cmat = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
self.assertAlmostEqual(expected_r,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndSingletonBatches(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
weights = (np.arange(n).reshape(n // stride, stride) % stride == 0)
for row in weights:
np.random.shuffle(row)
# Now, weights is one-hot by row - one item per batch has non-zero weight.
weights = weights.reshape((n,))
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
cmat = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
actual_r = sess.run(update_op, feed_dict=feed_dict)
self.assertEqual(np.isnan(expected_r), np.isnan(actual_r))
self.assertEqual(
np.isnan(expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(expected_r):
self.assertAlmostEqual(expected_r, actual_r, 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
class StreamingMeanCosineDistanceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1)
_assert_metric_variables(self, (
'mean_cosine_distance/count:0',
'mean_cosine_distance/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=1)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
np_predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_percentage_less(values=array_ops.ones((10,)), threshold=2)
_assert_metric_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.cached_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, name='low')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
def testSomePresentOneUpdate(self):
with self.cached_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, weights=weights, name='low')
sess.run(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class StreamingMeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_metric_variables(self, ('mean_iou/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(predictions, labels, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(
predictions, labels, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=2)
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes=num_classes)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_miou = miou.eval()
for _ in range(10):
self.assertEqual(initial_miou, miou.eval())
def testMultipleUpdates(self):
num_classes = 3
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.cached_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]),
constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]),
constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]),
constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]),
constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
weights = array_ops.concat([
constant_op.constant(0, shape=[1]),
constant_op.constant(1, shape=[8]),
constant_op.constant(0, shape=[1])
], 0)
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testMissingClassInLabels(self):
labels = constant_op.constant([[[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant(
[[[0, 0, 2, 1, 1, 0], [0, 1, 2, 2, 0, 1]], [[0, 0, 2, 1, 1, 1],
[1, 1, 2, 0, 0, 0]]])
num_classes = 3
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[7, 4, 3], [3, 5, 2], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(1 / 3 * (7 / (7 + 3 + 7) + 5 / (5 + 4 + 5) + 0 /
(0 + 5 + 0)), miou.eval())
def testMissingClassOverallSmall(self):
labels = constant_op.constant([0])
predictions = constant_op.constant([0])
num_classes = 2
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[1, 0], [0, 0]], update_op.eval())
self.assertAlmostEqual(1, miou.eval())
def testMissingClassOverallLarge(self):
labels = constant_op.constant([[[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant(
[[[0, 0, 1, 1, 0, 0], [1, 1, 0, 0, 1, 1]], [[0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0]]])
num_classes = 3
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[9, 5, 0], [3, 7, 0], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(1 / 2 * (9 / (9 + 3 + 5) + 7 / (7 + 5 + 3)),
miou.eval())
class StreamingConcatTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_concat(values=array_ops.ones((10,)))
_assert_metric_variables(self, (
'streaming_concat/array:0',
'streaming_concat/size:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
value, _ = metrics.streaming_concat(
values=array_ops.ones((10,)), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [value])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_concat(
values=array_ops.ones((10,)), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testNextArraySize(self):
next_array_size = metric_ops._next_array_size # pylint: disable=protected-access
with self.cached_session():
self.assertEqual(next_array_size(2, growth_factor=2).eval(), 2)
self.assertEqual(next_array_size(3, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(4, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(5, growth_factor=2).eval(), 8)
self.assertEqual(next_array_size(6, growth_factor=2).eval(), 8)
def testStreamingConcat(self):
with self.cached_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([0, 1, 2, 3, 4], concatenated.eval())
sess.run([update_op], feed_dict={values: [5, 6, 7, 8, 9]})
self.assertAllEqual(np.arange(10), concatenated.eval())
def testStreamingConcatStringValues(self):
with self.cached_session() as sess:
values = array_ops.placeholder(dtypes_lib.string, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertItemsEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: ['a', 'b', 'c']})
self.assertItemsEqual([b'a', b'b', b'c'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['d', 'e']})
self.assertItemsEqual([b'a', b'b', b'c', b'd', b'e'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['f', 'g', 'h', 'i', 'j']})
self.assertItemsEqual(
[b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j'],
concatenated.eval())
def testStreamingConcatMaxSize(self):
with self.cached_session() as sess:
values = math_ops.range(3)
concatenated, update_op = metrics.streaming_concat(values, max_size=5)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
def testStreamingConcat2D(self):
with self.cached_session() as sess:
values = array_ops.reshape(math_ops.range(3), (3, 1))
concatenated, update_op = metrics.streaming_concat(values, axis=-1)
sess.run(variables.local_variables_initializer())
for _ in range(10):
sess.run([update_op])
self.assertAllEqual([[0] * 10, [1] * 10, [2] * 10], concatenated.eval())
def testStreamingConcatErrors(self):
with self.assertRaises(ValueError):
metrics.streaming_concat(array_ops.placeholder(dtypes_lib.float32))
values = array_ops.zeros((2, 3))
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=-3, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=2, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(
array_ops.placeholder(dtypes_lib.float32, [None, None]))
def testStreamingConcatReset(self):
with self.cached_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run(variables.local_variables_initializer())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([3, 4], concatenated.eval())
class AggregateMetricsTest(test.TestCase):
def testAggregateNoMetricsRaisesValueError(self):
with self.assertRaises(ValueError):
metrics.aggregate_metrics()
def testAggregateSingleMetricReturnsOneItemLists(self):
values = array_ops.ones((10, 4))
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean(values))
self.assertEqual(len(value_tensors), 1)
self.assertEqual(len(update_ops), 1)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, update_ops[0].eval())
self.assertEqual(1, value_tensors[0].eval())
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean_absolute_error(predictions, labels),
metrics.streaming_mean_squared_error(predictions, labels))
self.assertEqual(len(value_tensors), 2)
self.assertEqual(len(update_ops), 2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, update_ops[0].eval())
self.assertEqual(4, update_ops[1].eval())
self.assertEqual(2, value_tensors[0].eval())
self.assertEqual(4, value_tensors[1].eval())
class AggregateMetricMapTest(test.TestCase):
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
names_to_values, names_to_updates = metrics.aggregate_metric_map({
'm1': metrics.streaming_mean_absolute_error(predictions, labels),
'm2': metrics.streaming_mean_squared_error(predictions, labels),
})
self.assertEqual(2, len(names_to_values))
self.assertEqual(2, len(names_to_updates))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, names_to_updates['m1'].eval())
self.assertEqual(4, names_to_updates['m2'].eval())
self.assertEqual(2, names_to_values['m1'].eval())
self.assertEqual(4, names_to_values['m2'].eval())
class CountTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.count(array_ops.ones([4, 3]))
_assert_metric_variables(self, ['count/count:0'])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.count(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.count(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testReturnType(self):
c, op = metrics.count(array_ops.ones([4, 3]))
self.assertTrue(isinstance(c, ops.Tensor))
self.assertTrue(isinstance(op, ops.Operation) or isinstance(op, ops.Tensor))
def testBasic(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(2.0, sess.run(update_op), 5)
self.assertAlmostEqual(4.0, sess.run(update_op), 5)
self.assertAlmostEqual(6.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def test1dWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [0.5])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1.2])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(3.4, result.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 0.5, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1.2, shape=(1,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(3.4, result.eval(), 5)
def test2dWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1.1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(4.1, result.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1.1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(4.1, result.eval(), 5)
class CohenKappaTest(test.TestCase):
def _confusion_matrix_to_samples(self, confusion_matrix):
x, y = confusion_matrix.shape
pairs = []
for label in range(x):
for feature in range(y):
pairs += [label, feature] * confusion_matrix[label, feature]
pairs = np.array(pairs).reshape((-1, 2))
return pairs[:, 0], pairs[:, 1]
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2)
_assert_metric_variables(self, (
'cohen_kappa/po:0',
'cohen_kappa/pe_row:0',
'cohen_kappa/pe_col:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
kappa, _ = metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [kappa])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 1), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 1), maxval=3, dtype=dtypes_lib.int64, seed=2)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 3)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_kappa = kappa.eval()
for _ in range(10):
self.assertAlmostEqual(initial_kappa, kappa.eval(), 5)
def testBasic(self):
confusion_matrix = np.array([[9, 3, 1], [4, 8, 2], [2, 1, 6]])
# overall total = 36
# po = [9, 8, 6], sum(po) = 23
# pe_row = [15, 12, 9], pe_col = [13, 14, 9], so pe = [5.42, 4.67, 2.25]
# finally, kappa = (sum(po) - sum(pe)) / (N - sum(pe))
# = (23 - 12.34) / (36 - 12.34)
# = 0.45
# see: http://psych.unl.edu/psycrs/handcomp/hckappa.PDF
expect = 0.45
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
dtypes = [dtypes_lib.int16, dtypes_lib.int32, dtypes_lib.int64]
shapes = [
(len(labels,)), # 1-dim
(len(labels), 1)
] # 2-dim
weights = [None, np.ones_like(labels)]
for dtype in dtypes:
for shape in shapes:
for weight in weights:
with self.cached_session() as sess:
predictions_tensor = constant_op.constant(
np.reshape(predictions, shape), dtype=dtype)
labels_tensor = constant_op.constant(
np.reshape(labels, shape), dtype=dtype)
kappa, update_op = metrics.cohen_kappa(
labels_tensor, predictions_tensor, 3, weights=weight)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 2)
self.assertAlmostEqual(expect, kappa.eval(), 2)
def testAllCorrect(self):
inputs = np.arange(0, 100) % 4
# confusion matrix
# [[25, 0, 0],
# [0, 25, 0],
# [0, 0, 25]]
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(inputs, inputs)
expect = 1.0
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testAllIncorrect(self):
labels = np.arange(0, 100) % 4
predictions = (labels + 1) % 4
# confusion matrix
# [[0, 25, 0],
# [0, 0, 25],
# [25, 0, 0]]
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(labels, predictions)
expect = -0.333333333333
with self.cached_session() as sess:
predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testWeighted(self):
confusion_matrix = np.array([[9, 3, 1], [4, 8, 2], [2, 1, 6]])
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
num_samples = np.sum(confusion_matrix, dtype=np.int32)
weights = (np.arange(0, num_samples) % 5) / 5.0
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(
# labels, predictions, sample_weight=weights)
expect = 0.453466583385
with self.cached_session() as sess:
predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels)
kappa, update_op = metrics.cohen_kappa(
labels, predictions, 4, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testWithMultipleUpdates(self):
confusion_matrix = np.array([[90, 30, 10, 20], [40, 80, 20, 30],
[20, 10, 60, 35], [15, 25, 30, 25]])
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
num_samples = np.sum(confusion_matrix, dtype=np.int32)
weights = (np.arange(0, num_samples) % 5) / 5.0
num_classes = confusion_matrix.shape[0]
batch_size = num_samples // 10
predictions_t = array_ops.placeholder(
dtypes_lib.float32, shape=(batch_size,))
labels_t = array_ops.placeholder(dtypes_lib.int32, shape=(batch_size,))
weights_t = array_ops.placeholder(dtypes_lib.float32, shape=(batch_size,))
kappa, update_op = metrics.cohen_kappa(
labels_t, predictions_t, num_classes, weights=weights_t)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for idx in range(0, num_samples, batch_size):
batch_start, batch_end = idx, idx + batch_size
sess.run(
update_op,
feed_dict={
labels_t: labels[batch_start:batch_end],
predictions_t: predictions[batch_start:batch_end],
weights_t: weights[batch_start:batch_end]
})
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(
# labels_np, predictions_np, sample_weight=weights_np)
expect = 0.289965397924
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testInvalidNumClasses(self):
predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))
labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))
with self.assertRaisesRegexp(ValueError, 'num_classes'):
metrics.cohen_kappa(labels, predictions, 1)
def testInvalidDimension(self):
predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))
invalid_labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 2))
with self.assertRaises(ValueError):
metrics.cohen_kappa(invalid_labels, predictions, 3)
invalid_predictions = array_ops.placeholder(
dtypes_lib.float32, shape=(4, 2))
labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))
with self.assertRaises(ValueError):
metrics.cohen_kappa(labels, invalid_predictions, 3)
def testConditionalPackingOptimization(self):
placeholder = array_ops.placeholder(dtypes_lib.float32, [None])
values, update_op = metric_ops.streaming_concat(placeholder)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for feed in range(10):
sess.run(update_op, feed_dict={placeholder: [feed]})
print(sess.run(values))
if __name__ == '__main__':
test.main()
| apache-2.0 |
trevstanhope/agri-vision | agrivision.py | 1 | 27286 | """
Agri-Vision
Precision Agriculture and Soil Sensing Group (PASS)
McGill University, Department of Bioresource Engineering
IDEAS:
- Rotation compensation --> take Hough Line of plants to estimate row angle
"""
__author__ = 'Tsevor Stanhope'
__version__ = '2.01'
## Libraries
import cv2, cv
import serial
import pymongo
from bson import json_util
from pymongo import MongoClient
import json
import numpy as np
from matplotlib import pyplot as plt
import thread
import gps
import time
import sys
from datetime import datetime
import ast
import os
## Constants
try:
CONFIG_FILE = '%s' % sys.argv[1]
except Exception as err:
settings = open('settings.cfg').read()
CONFIG_FILE = settings.rstrip()
def pretty_print(task, msg, *args):
date = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S.%f")
print "%s\t%s\t%s" % (date, task, msg)
## Class
class AgriVision:
def __init__(self, config_file):
# Load Config
pretty_print("CONFIG", "Loading %s" % config_file)
self.config = json.loads(open(config_file).read())
for key in self.config:
try:
getattr(self, key)
except AttributeError as error:
setattr(self, key, self.config[key])
# Initializers
self.init_log() # it's best to run the log first to catch all events
self.init_cameras()
self.init_controller()
self.init_pid()
self.init_db()
self.init_gps()
if self.DISPLAY_ON: self.init_display()
# Initialize Cameras
def init_cameras(self):
# Setting variables
pretty_print('CAM', 'Initializing CV Variables')
if self.CAMERA_ROTATED:
self.CAMERA_HEIGHT, self.CAMERA_WIDTH = self.CAMERA_WIDTH, self.CAMERA_HEIGHT # flip dimensions if rotated
self.CAMERA_CENTER = self.CAMERA_WIDTH / 2
if self.VERBOSE:
pretty_print('CAM', 'Camera Width: %d px' % self.CAMERA_WIDTH)
pretty_print('CAM', 'Camera Height: %d px' % self.CAMERA_HEIGHT)
pretty_print('CAM', 'Camera Center: %d px' % self.CAMERA_CENTER)
pretty_print('CAM', 'Camera Depth: %d cm' % self.CAMERA_DEPTH)
pretty_print('CAM', 'Camera FOV: %f rad' % self.CAMERA_FOV)
if self.VERBOSE:
pretty_print('INIT', 'Image Center: %d px' % self.CAMERA_CENTER)
self.GROUND_WIDTH = 2 * self.CAMERA_DEPTH * np.tan(self.CAMERA_FOV / 2.0)
pretty_print('CAM', 'Ground Width: %d cm' % self.GROUND_WIDTH)
pretty_print('CAM', 'Error Tolerance: +/- %d cm' % self.ERROR_TOLERANCE)
self.PIXEL_PER_CM = self.CAMERA_WIDTH / self.GROUND_WIDTH
pretty_print('CAM', 'Pixel-per-cm: %d px/cm' % self.PIXEL_PER_CM)
self.PIXEL_RANGE = int(self.PIXEL_PER_CM * self.ERROR_TOLERANCE)
pretty_print('CAM', 'Pixel Range: +/- %d px' % self.PIXEL_RANGE)
self.PIXEL_MIN = self.CAMERA_CENTER - self.PIXEL_RANGE
self.PIXEL_MAX = self.CAMERA_CENTER + self.PIXEL_RANGE
# Set Thresholds
self.threshold_min = np.array([self.HUE_MIN, self.SAT_MIN, self.VAL_MIN], np.uint8)
self.threshold_max = np.array([self.HUE_MAX, self.SAT_MAX, self.VAL_MAX], np.uint8)
# Attempt to set each camera index/name
pretty_print('CAM', 'Initializing Cameras')
self.cameras = []
self.images = []
for i in range(self.CAMERAS):
try:
if self.VERBOSE: pretty_print('CAM', 'Attaching Camera #%d' % i)
cam = cv2.VideoCapture(i)
cam.set(cv.CV_CAP_PROP_SATURATION, self.CAMERA_SATURATION)
cam.set(cv.CV_CAP_PROP_BRIGHTNESS, self.CAMERA_BRIGHTNESS)
cam.set(cv.CV_CAP_PROP_CONTRAST, self.CAMERA_CONTRAST)
cam.set(cv.CV_CAP_PROP_FPS, self.CAMERA_FPS)
if not self.CAMERA_ROTATED:
cam.set(cv.CV_CAP_PROP_FRAME_WIDTH, self.CAMERA_WIDTH)
cam.set(cv.CV_CAP_PROP_FRAME_HEIGHT, self.CAMERA_HEIGHT)
else:
cam.set(cv.CV_CAP_PROP_FRAME_WIDTH, self.CAMERA_HEIGHT)
cam.set(cv.CV_CAP_PROP_FRAME_HEIGHT, self.CAMERA_WIDTH)
self.cameras.append(cam)
self.images.append(np.zeros((self.CAMERA_HEIGHT, self.CAMERA_WIDTH, 3), np.uint8))
if self.VERBOSE: pretty_print('CAM', 'Camera #%d OK' % i)
except Exception as error:
pretty_print('CAM', 'ERROR: %s' % str(error))
# Initialize Database
def init_db(self):
self.LOG_NAME = datetime.strftime(datetime.now(), self.LOG_FORMAT)
self.MONGO_NAME = datetime.strftime(datetime.now(), self.MONGO_FORMAT)
if self.VERBOSE: pretty_print('DB', 'Initializing MongoDB')
if self.VERBOSE: pretty_print('DB', 'Connecting to MongoDB: %s' % self.MONGO_NAME)
if self.VERBOSE: pretty_print('DB', 'New session: %s' % self.LOG_NAME)
try:
self.client = MongoClient()
self.database = self.client[self.MONGO_NAME]
self.collection = self.database[self.LOG_NAME]
if self.VERBOSE: pretty_print('DB', 'Setup OK')
except Exception as error:
pretty_print('DB', 'ERROR: %s' % str(error))
# Initialize PID Controller
def init_pid(self):
if self.VERBOSE: pretty_print('PID', 'Initialing Electro-Hydraulics')
if self.VERBOSE: pretty_print('PID', 'PWM Minimum: %d' % self.PWM_MIN)
if self.VERBOSE: pretty_print('PID', 'PWM Maximum: %d' % self.PWM_MAX)
self.CENTER_PWM = int(self.PWM_MIN + self.PWM_MAX / 2.0)
if self.VERBOSE: pretty_print('PID', 'PWM Center: %d' % self.CENTER_PWM)
try:
if self.VERBOSE: pretty_print('PID', 'Default Number of Averages: %d' % self.NUM_AVERAGES)
self.offset_history = [self.CAMERA_CENTER] * self.NUM_AVERAGES
if self.VERBOSE: pretty_print('PID', 'Setup OK')
except Exception as error:
pretty_print('PID', 'ERROR: %s' % str(error))
self.average = 0
self.estimated = 0
self.pwm = 0
# Initialize Log
def init_log(self):
if self.VERBOSE: pretty_print('LOG', 'Initializing Log')
self.LOG_NAME = datetime.strftime(datetime.now(), self.LOG_FORMAT)
if self.VERBOSE: pretty_print('LOG', 'New log file: %s' % self.LOG_NAME)
try:
self.log = open('logs/' + self.LOG_NAME + '.csv', 'w')
self.log.write(','.join(['time', 'lat', 'long', 'speed', 'cam0', 'cam1', 'estimate', 'average', 'pwm','\n']))
if self.VERBOSE: pretty_print('LOG', 'Setup OK')
except Exception as error:
pretty_print('ERROR', str(error))
# Initialize Controller
def init_controller(self):
if self.VERBOSE: pretty_print('CTRL', 'Initializing controller ...')
try:
if self.VERBOSE: pretty_print('CTRL', 'Device: %s' % str(self.SERIAL_DEVICE))
if self.VERBOSE: pretty_print('CTRL', 'Baud Rate: %s' % str(self.SERIAL_BAUD))
self.controller = serial.Serial(self.SERIAL_DEVICE, self.SERIAL_BAUD)
pretty_print('CTRL', 'Setup OK')
except Exception as error:
pretty_print('CTRL', 'ERROR: %s' % str(error))
# Initialize GPS
def init_gps(self):
if self.VERBOSE: pretty_print('GPS', 'Initializing GPS ...')
self.latitude = 0
self.longitude = 0
self.speed = 0
try:
if self.VERBOSE: pretty_print('GPS', 'Enabing GPS ...')
self.gpsd = gps.gps()
self.gpsd.stream(gps.WATCH_ENABLE)
thread.start_new_thread(self.update_gps, ())
except Exception as err:
pretty_print('GPS', 'WARNING: GPS not available! %s' % str(err))
# Display
def init_display(self):
if self.VERBOSE: pretty_print('INIT', 'Initializing Display')
try:
self.updating = False
if self.DISPLAY_ON:
thread.start_new_thread(self.update_display, ())
except Exception as error:
pretty_print('DISP', 'ERROR: %s' % str(error))
## Rotate image
def rotate_image(self, bgr):
bgr = cv2.transpose(bgr)
return bgr
def capture_images(self):
a = time.time()
pretty_print('CAM', 'Capturing Images ...')
images = []
for i in range(self.CAMERAS):
pretty_print('CAM', 'Attempting on Camera #%d' % i)
try:
(s, bgr) = self.cameras[i].read()
if s and (self.images[i] is not None):
if self.CAMERA_ROTATED: bgr = self.rotate_image(bgr)
if np.all(bgr==self.images[i]):
images.append(None)
pretty_print('CAM', 'ERROR: Frozen frame')
else:
pretty_print('CAM', 'Capture successful: %s' % str(bgr.shape))
images.append(bgr)
else:
pretty_print('CAM', 'ERROR: Capture failed')
self.images[i] = np.zeros((self.CAMERA_HEIGHT, self.CAMERA_WIDTH, 3), np.uint8)
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
images.append(None)
b = time.time()
pretty_print('CAM', '... %.2f ms' % ((b - a) * 1000))
return images
## Plant Segmentation Filter
"""
1. RBG --> HSV
2. Set minimum saturation equal to the mean saturation
3. Set minimum value equal to the mean value
4. Take hues within range from green-yellow to green-blue
"""
def plant_filter(self, images):
pretty_print('BPPD', 'Filtering for plants ...')
a = time.time()
masks = []
for bgr in images:
if bgr is not None:
try:
hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
self.threshold_min[1] = np.percentile(hsv[:,:,1], 100 * self.SAT_MIN / 255.0) # overwrite the saturation minima
self.threshold_min[2] = np.percentile(hsv[:,:,2], 100 * self.VAL_MIN / 255.0) # overwrite the value minima
self.threshold_max[1] = 255
self.threshold_max[2] = np.percentile(hsv[:,:,2], 100 * self.VAL_MAX / 255.0)
mask = cv2.inRange(hsv, self.threshold_min, self.threshold_max)
masks.append(mask)
if self.VERBOSE: pretty_print('BPPD', 'Mask Number #%d was successful' % len(masks))
except Exception as error:
pretty_print('BPPD', str(error))
else:
if self.VERBOSE: pretty_print('BPPD', 'Mask Number #%d is blank' % len(masks))
masks.append(None)
b = time.time()
if self.VERBOSE: pretty_print('BPPD', '... %.2f ms' % ((b - a) * 1000))
return masks
## Find Plants
"""
1. Calculates the column summation of the mask
2. Calculates the 95th percentile threshold of the column sum array
3. Finds indicies which are greater than or equal to the threshold
4. Finds the median of this array of indices
5. Repeat for each mask
"""
def find_offset(self, masks):
a = time.time()
offsets = []
sums = []
for mask in masks:
if mask is not None:
try:
column_sum = mask.sum(axis=0) # vertical summation
threshold = np.percentile(column_sum, self.THRESHOLD_PERCENTILE)
probable = np.nonzero(column_sum >= threshold) # returns 1 length tuble
if self.DEBUG:
fig = plt.figure()
plt.plot(range(self.CAMERA_WIDTH), column_sum)
plt.show()
time.sleep(0.1)
plt.close(fig)
num_probable = len(probable[0])
if (num_probable == self.CAMERA_WIDTH * self.THRESHOLD_PERCENTILE / 100.0):
print "WARNING"
time.sleep(1)
best = int(np.median(probable[0]))
sum = column_sum[best]
centroid = best - self.CAMERA_CENTER
offsets.append(centroid)
sums.append(sum)
except Exception as error:
pretty_print('OFF', '%s' % str(error))
if self.VERBOSE: pretty_print('OFF', 'Detected offsets: %s' % str(offsets))
b = time.time()
if self.VERBOSE: pretty_print('OFF', '... %.2f ms' % ((b - a) * 1000))
return offsets, sums
## Best Guess for row based on multiple offsets from indices
"""
1. If outside bounds, default to edges
2. If inside, use mean of detected indices from both cameras
1. Takes the current assumed offset and number of averages
2. Calculate weights of previous offset
3. Estimate the weighted position of the crop row (in pixels)
"""
def estimate_row(self, indices, sums):
a = time.time()
if self.VERBOSE: pretty_print('ROW', 'Smoothing offset estimation ...')
try:
indices = np.array(indices)
sums = np.array(sums)
est = indices[np.argmax(sums)]
except Exception as error:
pretty_print('ROW', 'ERROR: %s' % str(error))
est = self.CAMERA_CENTER
self.offset_history.append(est)
while len(self.offset_history) > self.NUM_AVERAGES:
self.offset_history.pop(0)
avg = int(np.mean(self.offset_history)) #!TODO
diff = est - avg #!TODO can be a little more clever e.g. np.gradient, np.convolve
if self.VERBOSE:
pretty_print('ROW', 'Est = %.2f' % est)
pretty_print('ROW', 'Avg = %.2f' % avg)
pretty_print('ROW', 'Diff.= %.2f' % diff)
b = time.time()
if self.VERBOSE: pretty_print('ROW', '... %.2f ms' % ((b - a) * 1000))
return est, avg, diff
## Control Hydraulics
"""
Calculates the PID output for the PWM controller
Arguments: est, avg, diff
Requires: PWM_MAX, PWM_MIN, CENTER_PWM
Returns: PWM
"""
def calculate_output(self, estimate, average, diff):
a = time.time()
if self.VERBOSE: pretty_print('PID', 'Calculating PID Output ...')
try:
p = estimate * self.P_COEF
i = average * self.I_COEF
d = diff * self.D_COEF
if self.VERBOSE: pretty_print('PID', "P = %.1f" % p)
if self.VERBOSE: pretty_print('PID', "I = %.1f" % i)
if self.VERBOSE: pretty_print('PID', "D = %.1f" % d)
pwm = int(p + i + d + self.CENTER_PWM) # offset to zero
if pwm > self.PWM_MAX: pwm = self.PWM_MAX
elif pwm < self.PWM_MIN: pwm = self.PWM_MIN
volts = round((pwm * (self.MAX_VOLTAGE - self.MIN_VOLTAGE) / (self.PWM_MAX - self.PWM_MIN) + self.MIN_VOLTAGE), 2)
if pwm > self.PWM_MAX:
pwm = self.PWM_MAX
elif pwm < self.PWM_MIN:
pwm = self.PWM_MIN
if self.VERBOSE: pretty_print('PID', 'PWM = %d (%.2f V)' % (pwm, volts))
except Exception as error:
pretty_print('PID', 'ERROR: %s' % str(error))
pwm = self.CENTER_PWM
b = time.time()
if self.VERBOSE: pretty_print('PID', '... %.2f ms' % ((b - a) * 1000))
return pwm, volts
## Control Hydraulics
"""
1. Get PWM response corresponding to average offset
2. Send PWM response over serial to controller
"""
def set_controller(self, pwm):
a = time.time()
if self.VERBOSE: pretty_print('CTRL', 'Setting controller state ...')
try:
try:
assert self.controller is not None
self.controller.write(str(pwm) + '\n') # Write to PWM adaptor
if self.VERBOSE: pretty_print('CTRL', 'Wrote successfully')
except Exception as error:
pretty_print('CTRL', 'ERROR: %s' % str(error))
except Exception as error:
pretty_print('CTRL', 'ERROR: %s' % str(error))
b = time.time()
if self.VERBOSE: pretty_print('CTRL', '... %.2f ms' % ((b - a) * 1000))
## Log to Mongo
"""
1. Log results to the database
2. Returns Doc ID
"""
def log_db(self, sample):
if self.VERBOSE: pretty_print('DB', 'Logging to Database ...')
try:
assert self.collection is not None
doc_id = self.collection.insert(sample)
if self.VERBOSE: pretty_print('DB', 'Doc ID: %s' % str(doc_id))
except Exception as error:
pretty_print('DB', 'ERROR: %s' % str(error))
return doc_id
## Log to File
"""
1. Open new text file
2. For each document in session, print parameters to file
"""
def log_file(self, sample):
if self.VERBOSE: pretty_print('LOG', 'Logging to File')
try:
assert self.log is not None
time = str(sample['time'])
latitude = str(sample['lat'])
longitude = str(sample['long'])
speed = str(sample['speed'])
estimate = str(sample['estimate'])
average = str(sample['average'])
pwm = str(sample['pwm'])
self.log.write(','.join([time, latitude, longitude, speed, estimate, average, pwm,'\n']))
except Exception as error:
pretty_print('LOG', 'ERROR: %s' % str(error))
## Update the Display
"""
0. Check for concurrent update process
1. Draw lines on RGB images
2. Draw lines on ABP masks
3. Output GUI display
"""
def update_display(self):
a = time.time()
if self.updating:
return # if the display is already updating, wait and exit (puts less harm on the CPU)
else:
self.updating = True
if self.VERBOSE: pretty_print('DISP', 'Displaying Images ...')
try:
pwm = self.pwm
average = self.average + self.CAMERA_CENTER
estimated = self.estimated + self.CAMERA_CENTER
masks = self.masks
images = self.images
volts = self.volts
output_images = []
distance = round((average - self.CAMERA_CENTER) / float(self.PIXEL_PER_CM), 1)
if self.VERBOSE: pretty_print('DISP', 'Offset Distance: %d' % distance)
for i in xrange(self.CAMERAS):
try:
if self.VERBOSE: pretty_print('DISP', 'Image #%d' % (i+1))
img = images[i]
mask = masks[i]
if img is None: img = np.zeros((self.CAMERA_HEIGHT, self.CAMERA_WIDTH, 3), np.uint8)
if mask is None: mask = np.zeros((self.CAMERA_HEIGHT, self.CAMERA_WIDTH), np.uint8)
(h, w, d) = img.shape
if self.VERBOSE: pretty_print('DISP', 'Mask shape: %s' % str(mask.shape))
if self.VERBOSE: pretty_print('DISP', 'Img shape: %s' % str(img.shape))
if self.HIGHLIGHT:
img = np.dstack((mask, mask, mask))
img[:, self.PIXEL_MIN, 2] = 255
img[:, self.PIXEL_MAX, 2] = 255
img[:, self.CAMERA_CENTER, 1] = 255
img[:, average, 0] = 255
if self.VERBOSE: pretty_print('DISP', 'Highlighted detected plants')
else:
cv2.line(img, (self.PIXEL_MIN, 0), (self.PIXEL_MIN, self.CAMERA_HEIGHT), (0,0,255), 1)
cv2.line(img, (self.PIXEL_MAX, 0), (self.PIXEL_MAX, self.CAMERA_HEIGHT), (0,0,255), 1)
cv2.line(img, (average, 0), (average, self.CAMERA_HEIGHT), (0,255,0), 2)
cv2.line(img, (self.CAMERA_CENTER, 0), (self.CAMERA_CENTER, self.CAMERA_HEIGHT), (255,255,255), 1)
output_images.append(img)
except Exception as error:
pretty_print('DISP', 'ERROR: %s' % str(error))
if self.VERBOSE: pretty_print('DISP', 'Stacking images ...')
output_small = np.hstack(output_images)
pad = np.zeros((self.CAMERA_HEIGHT * 0.1, self.CAMERAS * self.CAMERA_WIDTH, 3), np.uint8) # add blank space
output_padded = np.vstack([output_small, pad])
if self.VERBOSE: pretty_print('DISP', 'Padded image')
output_large = cv2.resize(output_padded, (self.DISPLAY_WIDTH, self.DISPLAY_HEIGHT))
# Offset Distance
if average - self.CAMERA_CENTER >= 0:
distance_str = str("+%2.1f cm" % distance)
elif average - self.CAMERA_CENTER< 0:
distance_str = str("%2.1f cm" % distance)
cv2.putText(output_large, distance_str, (int(self.DISPLAY_WIDTH * 0.01), int(self.DISPLAY_WIDTH * 0.74)), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 4)
# Output Voltage
volts_str = str("%2.1f V" % volts)
cv2.putText(output_large, volts_str, (int(self.DISPLAY_WIDTH * 0.82), int(self.DISPLAY_WIDTH * 0.74)), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 4)
# Arrow
if average - self.CAMERA_CENTER >= 0:
p = (int(self.DISPLAY_WIDTH * 0.45), int(self.DISPLAY_WIDTH * 0.72))
q = (int(self.DISPLAY_WIDTH * 0.55), int(self.DISPLAY_WIDTH * 0.72))
elif average - self.CAMERA_CENTER< 0:
p = (int(self.DISPLAY_WIDTH * 0.55), int(self.DISPLAY_WIDTH * 0.72))
q = (int(self.DISPLAY_WIDTH * 0.45), int(self.DISPLAY_WIDTH * 0.72))
color = (255,255,255)
thickness = 8
line_type = 8
shift = 0
arrow_magnitude=20
cv2.line(output_large, p, q, color, thickness, line_type, shift) # draw arrow tail
angle = np.arctan2(p[1]-q[1], p[0]-q[0])
p = (int(q[0] + arrow_magnitude * np.cos(angle + np.pi/4)), # starting point of first line of arrow head
int(q[1] + arrow_magnitude * np.sin(angle + np.pi/4)))
cv2.line(output_large, p, q, color, thickness, line_type, shift) # draw first half of arrow head
p = (int(q[0] + arrow_magnitude * np.cos(angle - np.pi/4)), # starting point of second line of arrow head
int(q[1] + arrow_magnitude * np.sin(angle - np.pi/4)))
cv2.line(output_large, p, q, color, thickness, line_type, shift) # draw second half of arrow head
# Draw GUI
cv2.namedWindow('Agri-Vision', cv2.WINDOW_NORMAL)
if self.FULLSCREEN: cv2.setWindowProperty('Agri-Vision', cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
if self.VERBOSE: pretty_print('DISP', 'Output shape: %s' % str(output_large.shape))
cv2.imshow('Agri-Vision', output_large)
if cv2.waitKey(5) == 0:
pass
except Exception as error:
pretty_print('DISP', str(error))
self.updating = False
b = time.time()
if self.VERBOSE: pretty_print('DISP', '... %.2f ms' % ((b - a) * 1000))
## Update GPS
"""
1. Get the most recent GPS data
2. Set global variables for lat, long and speed
"""
def update_gps(self):
while True:
time.sleep(1) # GPS update time
self.gpsd.next()
self.latitude = self.gpsd.fix.latitude
self.longitude = self.gpsd.fix.longitude
self.speed = self.gpsd.fix.speed
pretty_print('GPS', '%d N %d E' % (self.latitude, self.longitude))
## Close
"""
Function to shutdown application safely
1. Close windows
2. Disable controller
3. Release capture interfaces
"""
def close(self):
if self.VERBOSE: pretty_print('SYSTEM', 'Shutting Down ...')
time.sleep(1)
try:
if self.VERBOSE: pretty_print('CTRL', 'Closing Controller ...')
self.controller.close() ## Disable controller
time.sleep(0.5)
except Exception as error:
pretty_print('CTRL', 'ERROR: %s' % str(error))
for i in range(len(self.cameras)):
try:
if self.VERBOSE: pretty_print('CAM', 'Closing Camera #%d ...' % i)
self.cameras[i].release() ## Disable cameras
time.sleep(0.5)
except Exception as error:
pretty_print('CAM', 'ERROR: %s' % str(error))
cv2.destroyAllWindows() ## Close windows
## Run
"""
Function for Run-time loop
1. Get initial time
2. Capture images
3. Generate mask filter for plant matter
4. Calculate indices of rows
5. Estimate row from both images
6. Get number of averages
7. Calculate moving average
8. Send PWM response to controller
9. Throttle to desired frequency
10. Log results to DB
11. Display results
"""
def run(self):
while True:
try:
images = self.capture_images()
masks = self.plant_filter(images)
offsets, sums = self.find_offset(masks)
(est, avg, diff) = self.estimate_row(offsets, sums)
pwm, volts = self.calculate_output(est, avg, diff)
err = self.set_controller(pwm)
sample = {
'offsets' : offsets,
'estimated' : est,
'average' : avg,
'differential' : diff,
'pwm': pwm,
'time' : datetime.strftime(datetime.now(), self.TIME_FORMAT),
'long' : self.longitude,
'lat' : self.latitude,
'speed' : self.speed,
}
self.pwm = pwm
self.images = images
self.masks = masks
self.average = avg
self.estimated = est
self.volts = volts
if self.MONGO_ON: doc_id = self.log_db(sample)
if self.LOGFILE_ON: self.log_file(sample)
if self.DISPLAY_ON:
try:
os.environ['DISPLAY']
thread.start_new_thread(self.update_display, ())
except Exception as error:
pretty_print('SYS', 'ERROR: %s' % str(error))
except KeyboardInterrupt as error:
self.close()
break
except UnboundLocalError as error:
pass
## Main
if __name__ == '__main__':
session = AgriVision(CONFIG_FILE)
session.run()
| mit |
vrkrishn/FBHacks | src/Backend/VideoDB.py | 1 | 3435 | import numpy as np
from scipy.ndimage import gaussian_filter1d
class VideoSelect(object):
def smoothFeature(self, feature, sigma):
return gaussian_filter1d(feature, sigma)
def normalizeFeature(self, feature):
scale = np.max(feature) - np.min(feature)
return (feature - np.min(feature))/float(scale)
def getLocalMaxima(self, feature, times):
maxima = []
for i in xrange(len(feature)):
leftMaxima = (i == 0) or (feature[i] > feature[i-1])
rightMaxima = (i == len(feature) - 1) or (feature[i] > feature[i+1])
if (leftMaxima and rightMaxima):
maxima.append((times[i], feature[i]))
return maxima
def getSmoothedIntervals(self, maxima, windowSize, sigma):
if (len(maxima) == 0):
return []
nClusters = 1
clusters = [0 for i in xrange(len(maxima))]
for i in xrange(1,len(maxima)):
last_time, last_value = maxima[i-1]
time, value = maxima[i]
if (abs(time - last_time) <= windowSize):
clusters[i] = clusters[i-1]
else:
clusters[i] = clusters[i-1] + 1
nClusters = nClusters + 1
intervals = []
# Get the start time for the first maxima
currentCluster = 0
startMaxima = maxima[0]
endMaxima = maxima[0]
maxValue = maxima[0][1]
for i in xrange(1, len(clusters)):
if (clusters[i] == currentCluster):
#increment the interval time
endMaxima = maxima[i]
if (maxima[i][1] > maxValue):
maxValue = maxima[i][1]
else:
# Create interval between maxima[startCluster] and maxima[i-1]
# interval is (start time, endtime, max_value)
intervals.append((startMaxima[0]-windowSize/2, endMaxima[0]+windowSize/2, maxValue))
startMaxima = maxima[i]
endMaxima = maxima[i]
maxValue = maxima[i][1]
intervals.append((startMaxima[0]-windowSize/2, endMaxima[0]+windowSize/2, maxValue))
return intervals
def sortIntervalsByValue(self, intervals):
return sorted(intervals, key=lambda interval: -interval[2])
def getTopKVideoClips(self, sortedIntervals, k):
if (k > len(sortedIntervals)):
return sortedIntervals
else:
return sortedIntervals[:k]
def processFeature(self, times, feature, windowSize, k, sigma):
smoothed = self.smoothFeature(feature, sigma)
normalized = self.normalizeFeature(smoothed)
maxima = self.getLocalMaxima(normalized, times)
intervals = self.getSmoothedIntervals(maxima, windowSize,sigma)
sortedIntervals = self.sortIntervalsByValue(intervals)
topK = self.getTopKVideoClips(sortedIntervals, k)
return topK
if __name__ == '__main__':
import matplotlib.pyplot as plt
V = VideoSelect()
n = 100
data = np.random.rand(n)
#data[10] = 20
#data[15] = 30
times = np.array(range(n))
k = 3
sigma = 2
windows = [3, 5,10]
plt.subplot(len(windows)+3,1,1)
plt.plot(times, data)
smoothed = V.smoothFeature(data, sigma)
plt.subplot(len(windows)+3,1,2)
plt.plot(times, smoothed)
normalized = V.normalizeFeature(smoothed)
maxima = V.getLocalMaxima(normalized, times)
plt.subplot(len(windows)+3,1,3)
plt.plot([m[0] for m in maxima], [m[1] for m in maxima])
for i in xrange(len(windows)):
w = windows[i]
topInterval = V.processFeature(times, data, w, k, sigma)
plt.subplot(len(windows)+3,1,4+i)
print(len(topInterval))
for interval in topInterval:
plt.plot([interval[0], interval[1]], [interval[2], interval[2]])
plt.axis([min(times), max(times), 0, 2])
plt.show()
| mit |
f3r/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
gph82/PyEMMA | pyemma/coordinates/clustering/interface.py | 1 | 13210 |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 18.02.2015
@author: marscher
'''
from __future__ import absolute_import
import os
from pyemma._base.model import Model
from pyemma._ext.sklearn.base import ClusterMixin
from pyemma.coordinates.clustering import regspatial
from pyemma.coordinates.transform.transformer import StreamingTransformer
from pyemma.util.discrete_trajectories import index_states, sample_indexes_by_state
from pyemma.util.files import mkdir_p
from six.moves import range, zip
import numpy as np
class AbstractClustering(StreamingTransformer, Model, ClusterMixin):
"""
provides a common interface for cluster algorithms.
Parameters
----------
metric: str, default='euclidean'
metric to pass to c extension
n_jobs: int or None, default=None
How much threads to use during assignment
If None, all available CPUs will be used.
"""
def __init__(self, metric='euclidean', n_jobs=None):
super(AbstractClustering, self).__init__()
self.metric = metric
self._clustercenters = None
self._previous_stride = -1
self._dtrajs = []
self._overwrite_dtrajs = False
self._index_states = []
self.n_jobs = n_jobs
@property
def n_jobs(self):
""" Returns number of jobs/threads to use during assignment of data.
Returns
-------
If None it will return number of processors /or cores or the setting of 'OMP_NUM_THREADS' env variable.
Notes
-----
By setting the environment variable 'OMP_NUM_THREADS' to an integer,
one will override the default argument of n_jobs (currently None).
"""
assert isinstance(self._n_jobs, int)
return self._n_jobs
@n_jobs.setter
def n_jobs(self, val):
""" set number of jobs/threads to use via assignment of data.
Parameters
----------
val: int or None
a positive int for the number of jobs. Or None to usage all available resources.
Notes
-----
"""
from pyemma.util.reflection import get_default_args
def_args = get_default_args(self.__init__)
# default value from constructor?
if val == def_args['n_jobs']:
omp_threads_from_env = os.getenv('OMP_NUM_THREADS', None)
import psutil
n_cpus = psutil.cpu_count()
if omp_threads_from_env:
try:
self._n_jobs = int(omp_threads_from_env)
self.logger.info("number of threads obtained from env variable"
" 'OMP_NUM_THREADS'=%s" % omp_threads_from_env)
except ValueError as ve:
self.logger.warning("could not parse env variable 'OMP_NUM_THREADS'."
"Value='%s'. Error=%s" % (omp_threads_from_env, ve))
self._n_jobs = n_cpus
else:
self._n_jobs = n_cpus
else:
self._n_jobs = int(val)
@property
def clustercenters(self):
""" Array containing the coordinates of the calculated cluster centers. """
return self._clustercenters
@clustercenters.setter
def clustercenters(self, val):
val = np.asarray(val, dtype='float32', order='C')
self._clustercenters = val
@property
def overwrite_dtrajs(self):
"""
Should existing dtraj files be overwritten. Set this property to True to overwrite.
"""
return self._overwrite_dtrajs
@overwrite_dtrajs.setter
def overwrite_dtrajs(self, value):
self._overwrite_dtrajs = value
@property
def dtrajs(self):
"""Discrete trajectories (assigned data to cluster centers)."""
if len(self._dtrajs) == 0: # nothing assigned yet, doing that now
self._dtrajs = self.assign(stride=1)
return self._dtrajs # returning what we have saved
@property
def index_clusters(self):
"""Returns trajectory/time indexes for all the clusters
Returns
-------
indexes : list of ndarray( (N_i, 2) )
For each state, all trajectory and time indexes where this cluster occurs.
Each matrix has a number of rows equal to the number of occurrences of the corresponding state,
with rows consisting of a tuple (i, t), where i is the index of the trajectory and t is the time index
within the trajectory.
"""
if len(self._dtrajs) == 0: # nothing assigned yet, doing that now
self._dtrajs = self.assign()
if len(self._index_states) == 0: # has never been run
self._index_states = index_states(self._dtrajs)
return self._index_states
def sample_indexes_by_cluster(self, clusters, nsample, replace=True):
"""Samples trajectory/time indexes according to the given sequence of states.
Parameters
----------
clusters : iterable of integers
It contains the cluster indexes to be sampled
nsample : int
Number of samples per cluster. If replace = False, the number of returned samples per cluster could be smaller
if less than nsample indexes are available for a cluster.
replace : boolean, optional
Whether the sample is with or without replacement
Returns
-------
indexes : list of ndarray( (N, 2) )
List of the sampled indices by cluster.
Each element is an index array with a number of rows equal to N=len(sequence), with rows consisting of a
tuple (i, t), where i is the index of the trajectory and t is the time index within the trajectory.
"""
# Check if the catalogue (index_states)
if len(self._index_states) == 0: # has never been run
self._index_states = index_states(self.dtrajs)
return sample_indexes_by_state(self._index_states[clusters], nsample, replace=replace)
def _transform_array(self, X):
"""get closest index of point in :attr:`clustercenters` to x."""
dtraj = np.empty(X.shape[0], dtype=self.output_type())
regspatial.assign(X.astype(np.float32, order='C', copy=False),
self.clustercenters, dtraj, self.metric, self.n_jobs)
res = dtraj[:, None] # always return a column vector in this function
return res
def dimension(self):
"""output dimension of clustering algorithm (always 1)."""
return 1
def output_type(self):
return np.int32
def assign(self, X=None, stride=1):
"""
Assigns the given trajectory or list of trajectories to cluster centers by using the discretization defined
by this clustering method (usually a Voronoi tesselation).
You can assign multiple times with different strides. The last result of assign will be saved and is available
as the attribute :func:`dtrajs`.
Parameters
----------
X : ndarray(T, n) or list of ndarray(T_i, n), optional, default = None
Optional input data to map, where T is the number of time steps and n is the number of dimensions.
When a list is provided they can have differently many time steps, but the number of dimensions need
to be consistent. When X is not provided, the result of assign is identical to get_output(), i.e. the
data used for clustering will be assigned. If X is given, the stride argument is not accepted.
stride : int, optional, default = 1
If set to 1, all frames of the input data will be assigned. Note that this could cause this calculation
to be very slow for large data sets. Since molecular dynamics data is usually
correlated at short timescales, it is often sufficient to obtain the discretization at a longer stride.
Note that the stride option used to conduct the clustering is independent of the assign stride.
This argument is only accepted if X is not given.
Returns
-------
Y : ndarray(T, dtype=int) or list of ndarray(T_i, dtype=int)
The discretized trajectory: int-array with the indexes of the assigned clusters, or list of such int-arrays.
If called with a list of trajectories, Y will also be a corresponding list of discrete trajectories
"""
if X is None:
# if the stride did not change and the discrete trajectory is already present,
# just return it
if self._previous_stride is stride and len(self._dtrajs) > 0:
return self._dtrajs
self._previous_stride = stride
# map to column vectors
mapped = self.get_output(stride=stride, chunk=self.chunksize)
# flatten and save
self._dtrajs = [np.transpose(m)[0] for m in mapped]
# return
return self._dtrajs
else:
if stride != 1:
raise ValueError('assign accepts either X or stride parameters, but not both. If you want to map '+
'only a subset of your data, extract the subset yourself and pass it as X.')
# map to column vector(s)
mapped = self.transform(X)
# flatten
if isinstance(mapped, np.ndarray):
mapped = np.transpose(mapped)[0]
else:
mapped = [np.transpose(m)[0] for m in mapped]
# return
return mapped
def save_dtrajs(self, trajfiles=None, prefix='',
output_dir='.',
output_format='ascii',
extension='.dtraj'):
"""saves calculated discrete trajectories. Filenames are taken from
given reader. If data comes from memory dtrajs are written to a default
filename.
Parameters
----------
trajfiles : list of str (optional)
names of input trajectory files, will be used generate output files.
prefix : str
prepend prefix to filenames.
output_dir : str
save files to this directory.
output_format : str
if format is 'ascii' dtrajs will be written as csv files, otherwise
they will be written as NumPy .npy files.
extension : str
file extension to append (eg. '.itraj')
"""
if extension[0] != '.':
extension = '.' + extension
# obtain filenames from input (if possible, reader is a featurereader)
if output_format == 'ascii':
from msmtools.dtraj import write_discrete_trajectory as write_dtraj
else:
from msmtools.dtraj import save_discrete_trajectory as write_dtraj
import os.path as path
output_files = []
if trajfiles is not None: # have filenames available?
for f in trajfiles:
p, n = path.split(f) # path and file
basename, _ = path.splitext(n)
if prefix != '':
name = "%s_%s%s" % (prefix, basename, extension)
else:
name = "%s%s" % (basename, extension)
# name = path.join(p, name)
output_files.append(name)
else:
for i in range(len(self.dtrajs)):
if prefix is not '':
name = "%s_%i%s" % (prefix, i, extension)
else:
name = str(i) + extension
output_files.append(name)
assert len(self.dtrajs) == len(output_files)
if not os.path.exists(output_dir):
mkdir_p(output_dir)
for filename, dtraj in zip(output_files, self.dtrajs):
dest = path.join(output_dir, filename)
self._logger.debug('writing dtraj to "%s"' % dest)
try:
if path.exists(dest) and not self.overwrite_dtrajs:
raise EnvironmentError('Attempted to write dtraj "%s" which already existed. To automatically'
' overwrite existing files, set source.overwrite_dtrajs=True.' % dest)
write_dtraj(dest, dtraj)
except IOError:
self._logger.exception('Exception during writing dtraj to "%s"' % dest)
| lgpl-3.0 |
chenyyx/scikit-learn-doc-zh | examples/zh/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| gpl-3.0 |
mhostetter/gnuradio | gr-filter/examples/fir_filter_fff.py | 47 | 4014 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_fff(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_f(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_float, self._nsamps)
self.filt0 = filter.fir_filter_fff(self._decim, taps)
self.vsnk_src = blocks.vector_sink_f()
self.vsnk_out = blocks.vector_sink_f()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_fff(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
saketkc/statsmodels | statsmodels/tools/print_version.py | 23 | 7951 | #!/usr/bin/env python
from __future__ import print_function
from statsmodels.compat.python import reduce
import sys
from os.path import dirname
def safe_version(module, attr='__version__'):
if not isinstance(attr, list):
attr = [attr]
try:
return reduce(getattr, [module] + attr)
except AttributeError:
return "Cannot detect version"
def _show_versions_only():
print("\nINSTALLED VERSIONS")
print("------------------")
print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
try:
import os
(sysname, nodename, release, version, machine) = os.uname()
print("OS: %s %s %s %s" % (sysname, release, version, machine))
print("byteorder: %s" % sys.byteorder)
print("LC_ALL: %s" % os.environ.get('LC_ALL', "None"))
print("LANG: %s" % os.environ.get('LANG', "None"))
except:
pass
try:
from statsmodels import version
has_sm = True
except ImportError:
has_sm = False
print('\nStatsmodels\n===========\n')
if has_sm:
print('Installed: %s' % safe_version(version, 'full_version'))
else:
print('Not installed')
print("\nRequired Dependencies\n=====================\n")
try:
import Cython
print("cython: %s" % safe_version(Cython))
except ImportError:
print("cython: Not installed")
try:
import numpy
print("numpy: %s" % safe_version(numpy, ['version', 'version']))
except ImportError:
print("numpy: Not installed")
try:
import scipy
print("scipy: %s" % safe_version(scipy, ['version', 'version']))
except ImportError:
print("scipy: Not installed")
try:
import pandas
print("pandas: %s" % safe_version(pandas, ['version', 'version']))
except ImportError:
print("pandas: Not installed")
try:
import dateutil
print(" dateutil: %s" % safe_version(dateutil))
except ImportError:
print(" dateutil: not installed")
try:
import patsy
print("patsy: %s" % safe_version(patsy))
except ImportError:
print("patsy: Not installed")
print("\nOptional Dependencies\n=====================\n")
try:
import matplotlib as mpl
print("matplotlib: %s" % safe_version(mpl))
except ImportError:
print("matplotlib: Not installed")
try:
from cvxopt import info
print("cvxopt: %s" % safe_version(info, 'version'))
except ImportError:
print("cvxopt: Not installed")
print("\nDeveloper Tools\n================\n")
try:
import IPython
print("IPython: %s" % safe_version(IPython))
except ImportError:
print("IPython: Not installed")
try:
import jinja2
print(" jinja2: %s" % safe_version(jinja2))
except ImportError:
print(" jinja2: Not installed")
try:
import sphinx
print("sphinx: %s" % safe_version(sphinx))
except ImportError:
print("sphinx: Not installed")
try:
import pygments
print(" pygments: %s" % safe_version(pygments))
except ImportError:
print(" pygments: Not installed")
try:
import nose
print("nose: %s" % safe_version(nose))
except ImportError:
print("nose: Not installed")
try:
import virtualenv
print("virtualenv: %s" % safe_version(virtualenv))
except ImportError:
print("virtualenv: Not installed")
print("\n")
def show_versions(show_dirs=True):
if not show_dirs:
_show_versions_only()
print("\nINSTALLED VERSIONS")
print("------------------")
print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
try:
import os
(sysname, nodename, release, version, machine) = os.uname()
print("OS: %s %s %s %s" % (sysname, release, version, machine))
print("byteorder: %s" % sys.byteorder)
print("LC_ALL: %s" % os.environ.get('LC_ALL', "None"))
print("LANG: %s" % os.environ.get('LANG', "None"))
except:
pass
try:
import statsmodels
from statsmodels import version
has_sm = True
except ImportError:
has_sm = False
print('\nStatsmodels\n===========\n')
if has_sm:
print('Installed: %s (%s)' % (safe_version(version, 'full_version'),
dirname(statsmodels.__file__)))
else:
print('Not installed')
print("\nRequired Dependencies\n=====================\n")
try:
import Cython
print("cython: %s (%s)" % (safe_version(Cython),
dirname(Cython.__file__)))
except ImportError:
print("cython: Not installed")
try:
import numpy
print("numpy: %s (%s)" % (safe_version(numpy, ['version', 'version']),
dirname(numpy.__file__)))
except ImportError:
print("numpy: Not installed")
try:
import scipy
print("scipy: %s (%s)" % (safe_version(scipy, ['version', 'version']),
dirname(scipy.__file__)))
except ImportError:
print("scipy: Not installed")
try:
import pandas
print("pandas: %s (%s)" % (safe_version(pandas, ['version',
'version']),
dirname(pandas.__file__)))
except ImportError:
print("pandas: Not installed")
try:
import dateutil
print(" dateutil: %s (%s)" % (safe_version(dateutil),
dirname(dateutil.__file__)))
except ImportError:
print(" dateutil: not installed")
try:
import patsy
print("patsy: %s (%s)" % (safe_version(patsy),
dirname(patsy.__file__)))
except ImportError:
print("patsy: Not installed")
print("\nOptional Dependencies\n=====================\n")
try:
import matplotlib as mpl
print("matplotlib: %s (%s)" % (safe_version(mpl),
dirname(mpl.__file__)))
except ImportError:
print("matplotlib: Not installed")
try:
from cvxopt import info
print("cvxopt: %s (%s)" % (safe_version(info, 'version'),
dirname(info.__file__)))
except ImportError:
print("cvxopt: Not installed")
print("\nDeveloper Tools\n================\n")
try:
import IPython
print("IPython: %s (%s)" % (safe_version(IPython),
dirname(IPython.__file__)))
except ImportError:
print("IPython: Not installed")
try:
import jinja2
print(" jinja2: %s (%s)" % (safe_version(jinja2),
dirname(jinja2.__file__)))
except ImportError:
print(" jinja2: Not installed")
try:
import sphinx
print("sphinx: %s (%s)" % (safe_version(sphinx),
dirname(sphinx.__file__)))
except ImportError:
print("sphinx: Not installed")
try:
import pygments
print(" pygments: %s (%s)" % (safe_version(pygments),
dirname(pygments.__file__)))
except ImportError:
print(" pygments: Not installed")
try:
import nose
print("nose: %s (%s)" % (safe_version(nose), dirname(nose.__file__)))
except ImportError:
print("nose: Not installed")
try:
import virtualenv
print("virtualenv: %s (%s)" % (safe_version(virtualenv),
dirname(virtualenv.__file__)))
except ImportError:
print("virtualenv: Not installed")
print("\n")
if __name__ == "__main__":
show_versions()
| bsd-3-clause |
simonelanucara/script | k_meam_multiple_raster.py | 1 | 1805 | import os
import glob
import gdal
import numpy as np
from sklearn import cluster
directory_to_check = "/home/jovyan/work/ClippingFeatures/" # Which directory do you want to start with?
def my_function(directory):
# get rasters' file names
fnames = glob.glob('*_max_values_ndvi.tif')
# read general properties of the first raster (assuming all the rasters share these properties)
ds = gdal.Open(fnames[0], 0)
gt = ds.GetGeoTransform()
sr = ds.GetProjection()
xsize = ds.RasterXSize
ysize = ds.RasterYSize
nd = ds.GetRasterBand(1).GetNoDataValue()
del ds
arrays = []
for fn in fnames:
ds = gdal.Open(fn, 0)
arr = ds.ReadAsArray() # 2D array (rows by columns)
arrays.append(arr)
del ds
# arr = np.stack(arrays) # 3D array (date by rows by columns)
X = arr.reshape((-1,1))
#calculate k_means
k_means = cluster.KMeans(n_clusters=4)
k_means.fit(X)
X_cluster = k_means.labels_
X_cluster = X_cluster.reshape(arr.shape)
[cols, rows] = arr.shape
# create the output raster
out_fn = ('k_means_max_values_ndvi.tif')
driver = gdal.GetDriverByName('GTiff')
out_ds = driver.Create(out_fn, rows, cols, 1, gdal.GDT_Byte) # you might want to change the pixel type
out_ds.SetGeoTransform(gt)
out_ds.SetProjection(sr)
out_band = out_ds.GetRasterBand(1)
out_band.SetNoDataValue(nd)
out_band.WriteArray(X_cluster)
# outDataRaster.GetRasterBand(1).WriteArray(X_cluster)
directories = [os.path.abspath(x[0]) for x in os.walk(directory_to_check)]
directories.remove(os.path.abspath(directory_to_check)) # If you don't want your main directory included
for i in directories:
os.chdir(i) # Change working Directory
my_function(i) # Run your function
| gpl-3.0 |
Ginkgo-Biloba/Misc-Python | sklearn/SKLearn3KNNForR.py | 1 | 1126 | # coding = utf-8
"""
3.9 将 kNN 用于回归
http://git.oschina.net/wizardforcel/sklearn-cb/blob/master/3.md
"""
import numpy as np
from sklearn import datasets
iris = datasets.load_iris()
print(iris.feature_names)
x = iris.data[:, [0, 1]]
y = iris.data[:, 2]
# 使用线性回归做对比
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(x, y)
lrPred = lr.predict(x)
print("lr MSE: {:f}".format(np.power(y - lrPred, 2).mean()))
# 使用 kNN 回归
from sklearn.neighbors import KNeighborsRegressor
knnr = KNeighborsRegressor(n_neighbors=10)
knnr.fit(x, y)
knnrPred = knnr.predict(x)
print("knnr MSE: {:f}".format(np.power(y - knnrPred, 2).mean()))
# 使用最近的 10 个点用于回归
from matplotlib import pyplot as plt
plt.style.use("ggplot")
(fig, ax) = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(7, 7))
ax[0].scatter(x[:, 0], x[:, 1], s=lrPred*10, label="线性回归预测", color="orange")
ax[0].legend()
ax[0].set_title("预测")
ax[1].scatter(x[:, 0], x[:, 1], s=knnrPred*10, label="$k$-NN 回归预测", color="green")
ax[1].legend()
fig.tight_layout()
fig.show()
| gpl-3.0 |
GrumpyNounours/PySeidon | pyseidon/validationClass/valTable.py | 2 | 3469 | #!/usr/bin/python2.7
# encoding: utf-8
import pandas as pd
# Custom error
from pyseidon.utilities.pyseidon_error import PyseidonError
# ALTERNATE VERSION FOR ANDY
def valTable(struct, filename, vars, save_csv=False, debug=False, debug_plot=False):
'''
Takes validation data from the struct and saves it into a .csv file .
Takes a single argument, a dictionary
'''
# initialize lists
kind, name, ovORun, RMSE, CF, SD, POF, NOF, MDPO, MDNO, skill, r2, phase = \
[], [], [], [], [], [], [], [], [], [], [], [], []
bias, pbias, NRMSE, NSE, corr, SI, gear = [], [], [], [], [], [], []
# append to the lists the stats from each site for each variable
for var in vars:
(kind, name, ovORun, RMSE, CF, SD, POF, NOF, MDPO, MDNO, skill, r2, phase, bias, pbias, NRMSE, NSE, corr, SI, gear) \
= siteStats(struct, var, kind, name, ovORun, RMSE, CF, SD, POF, NOF, MDPO, MDNO, skill, r2, phase,
bias, pbias, NRMSE, NSE, corr, SI, gear, debug=False, debug_plot=False)
# put stats into dict and create dataframe
val_dict = {'Type':kind, 'ovORun':ovORun, 'RMSE':RMSE, 'CF':CF, 'SD':SD, 'POF':POF,
'NOF':NOF, 'MDPO':MDPO, 'MDNO':MDNO, 'skill':skill, 'r2':r2, 'phase':phase,
'bias':bias, 'pbias':pbias,'NRMSE':NRMSE, 'NSE':NSE, 'corr':corr, 'SI':SI, 'gear':gear}
table = pd.DataFrame(data=val_dict, index=name, columns=val_dict.keys())
# export as .csv file
if save_csv:
out_file = '{}_val.csv'.format(filename)
table.to_csv(out_file)
return table
def siteStats(site, variable, type, name, ovORun, RMSE, CF, SD, POF, NOF, MDPO, MDNO, skill, r2, phase,
bias, pbias, NRMSE, NSE, corr, SI, gear, debug=False, debug_plot=False):
"""
Takes in the run (an array of dictionaries) and the type of the run (a
string). Also takes in the list representing each statistic.
"""
if debug: print "siteStats..."
# check if it's a tidegauge site
if ((site['type'] != 'TideGauge') and (variable != 'tg')):
stats = site['{}_val'.format(variable)]
type.append(variable)
name.append(site['name'].split('/')[-1].split('.')[0])
elif ((site['type'] == 'TideGauge') and (variable == 'tg')):
stats = site['tg_val']
type.append('elev')
name.append(site['name'].split('/')[-1].split('.')[0])
# do nothing if a tidegauge is encountered but variable isn't tg
else:
raise PyseidonError("---The variable tg is missing---")
# add the statistics to the list, round to 2 decimal places
ovORun.append(stats['ovORun'])
RMSE.append(round(stats['RMSE'], 2))
CF.append(round(stats['CF'], 2))
SD.append(round(stats['SD'], 2))
POF.append(round(stats['POF'], 2))
NOF.append(round(stats['NOF'], 2))
MDPO.append(stats['MDPO'])
MDNO.append(stats['MDNO'])
skill.append(round(stats['skill'], 2))
r2.append(round(stats['r_squared'], 2))
phase.append(stats['phase'])
bias.append(round(stats['bias'], 2))
pbias.append(round(stats['pbias'], 2))
NRMSE.append(round(stats['NRMSE'], 2))
NSE.append(round(stats['NSE'], 2))
corr.append(round(stats['CORR'], 2))
SI.append(round(stats['SI'], 2))
gear.append(site['type'])
if debug: print "...siteStats done."
return (type, name, ovORun, RMSE, CF, SD, POF, NOF, MDPO, MDNO, skill, r2, phase, bias, pbias, NRMSE, NSE, corr, SI, gear)
| agpl-3.0 |
BIDS-collaborative/EDAM | data/LH/meanshift.py | 1 | 4584 | import pandas as pd, numpy as np, warnings, sklearn
from sklearn.cluster import AffinityPropagation, DBSCAN, MeanShift, estimate_bandwidth
from sklearn.decomposition import PCA
from sklearn import metrics
import matplotlib.pyplot as plt
from itertools import cycle
def load_data(fileName, dropFirstColumn = True):
df = pd.read_csv(fileName)
if dropFirstColumn:
df = df.drop(df[[0]], axis = 1)
return df.as_matrix()
def predictAffinityPropagation(X, labels_true):
#ranX, ranY = shuffle(X, y, random_state=0)
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
def predictDBSCAN(X, labels_true):
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# print('Estimated number of clusters: %d' % n_clusters_)
# print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
# print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
# print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
# print("Adjusted Rand Index: %0.3f"
# % metrics.adjusted_rand_score(labels_true, labels))
# print("Adjusted Mutual Information: %0.3f"
# % metrics.adjusted_mutual_info_score(labels_true, labels))
# print("Silhouette Coefficient: %0.3f"
# % metrics.silhouette_score(X, labels))
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
def predictMeanShift(X, labels):
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
results = ms.fit_predict(X)
print list(results)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
# Create a PCA model.
pca_2 = PCA(2)
# Fit the PCA model on the numeric columns from earlier.
plot_columns = pca_2.fit_transform(X)
# Make a scatter plot of each game, shaded according to cluster assignment.
plt.scatter(x=plot_columns[:,0], y=plot_columns[:,1], c=results)
plt.title("Mean Shift- 4 clusters")
# Show the plot.
plt.show()
warnings.filterwarnings('ignore')
X = load_data("pier_ne_data.csv")
col_mean = np.nanmean(X,axis=0)
inds = np.where(np.isnan(X))
X[inds]=np.take(col_mean,inds[1])
y = np.divide(np.ravel(load_data("pier_ne_labels.csv", False)), 2)
print predictMeanShift(X, y)
| bsd-2-clause |
lavanyag9713/ml_lab_ecsc_306 | labwork/lab7/sci-learn/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| apache-2.0 |
LiaoPan/scikit-learn | sklearn/cluster/bicluster.py | 211 | 19443 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
BeiLuoShiMen/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_agg.py | 69 | 11729 | """
An agg http://antigrain.com/ backend
Features that are implemented
* capstyles and join styles
* dashes
* linewidth
* lines, rectangles, ellipses
* clipping to a rectangle
* output to RGBA and PNG
* alpha blending
* DPI scaling properly - everything scales properly (dashes, linewidths, etc)
* draw polygon
* freetype2 w/ ft2font
TODO:
* allow save to file handle
* integrate screen dpi w/ ppi and text
"""
from __future__ import division
import numpy as npy
from matplotlib import verbose, rcParams
from matplotlib.backend_bases import RendererBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, maxdict
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont
from matplotlib.ft2font import FT2Font, LOAD_FORCE_AUTOHINT
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox
from _backend_agg import RendererAgg as _RendererAgg
from matplotlib import _png
backend_version = 'v2.2'
class RendererAgg(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles
"""
debug=1
texd = maxdict(50) # a cache of tex image rasters
_fontd = maxdict(50)
def __init__(self, width, height, dpi):
if __debug__: verbose.report('RendererAgg.__init__', 'debug-annoying')
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
if __debug__: verbose.report('RendererAgg.__init__ width=%s, height=%s'%(width, height), 'debug-annoying')
self._renderer = _RendererAgg(int(width), int(height), dpi, debug=False)
if __debug__: verbose.report('RendererAgg.__init__ _RendererAgg done',
'debug-annoying')
#self.draw_path = self._renderer.draw_path # see below
self.draw_markers = self._renderer.draw_markers
self.draw_path_collection = self._renderer.draw_path_collection
self.draw_quad_mesh = self._renderer.draw_quad_mesh
self.draw_image = self._renderer.draw_image
self.copy_from_bbox = self._renderer.copy_from_bbox
self.restore_region = self._renderer.restore_region
self.tostring_rgba_minimized = self._renderer.tostring_rgba_minimized
self.mathtext_parser = MathTextParser('Agg')
self.bbox = Bbox.from_bounds(0, 0, self.width, self.height)
if __debug__: verbose.report('RendererAgg.__init__ done',
'debug-annoying')
def draw_path(self, gc, path, transform, rgbFace=None):
nmax = rcParams['agg.path.chunksize'] # here at least for testing
npts = path.vertices.shape[0]
if nmax > 100 and npts > nmax and path.should_simplify and rgbFace is None:
nch = npy.ceil(npts/float(nmax))
chsize = int(npy.ceil(npts/nch))
i0 = npy.arange(0, npts, chsize)
i1 = npy.zeros_like(i0)
i1[:-1] = i0[1:] - 1
i1[-1] = npts
for ii0, ii1 in zip(i0, i1):
v = path.vertices[ii0:ii1,:]
c = path.codes
if c is not None:
c = c[ii0:ii1]
c[0] = Path.MOVETO # move to end of last chunk
p = Path(v, c)
self._renderer.draw_path(gc, p, transform, rgbFace)
else:
self._renderer.draw_path(gc, path, transform, rgbFace)
def draw_mathtext(self, gc, x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if __debug__: verbose.report('RendererAgg.draw_mathtext',
'debug-annoying')
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
x = int(x) + ox
y = int(y) - oy
self._renderer.draw_text_image(font_image, x, y + 1, angle, gc)
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
Render the text
"""
if __debug__: verbose.report('RendererAgg.draw_text', 'debug-annoying')
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
font = self._get_agg_font(prop)
if font is None: return None
if len(s) == 1 and ord(s) > 127:
font.load_char(ord(s), flags=LOAD_FORCE_AUTOHINT)
else:
# We pass '0' for angle here, since it will be rotated (in raster
# space) in the following call to draw_text_image).
font.set_text(s, 0, flags=LOAD_FORCE_AUTOHINT)
font.draw_glyphs_to_bitmap()
#print x, y, int(x), int(y)
self._renderer.draw_text_image(font.get_image(), int(x), int(y) + 1, angle, gc)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
# passing rgb is a little hack to make cacheing in the
# texmanager more efficient. It is not meant to be used
# outside the backend
"""
if ismath=='TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: descent of TeX text (I am imitating backend_ps here -JKS)
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
font = self._get_agg_font(prop)
font.set_text(s, 0.0, flags=LOAD_FORCE_AUTOHINT) # the width and height of unrotated string
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def draw_tex(self, gc, x, y, s, prop, angle):
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key)
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = npy.array(Z * 255.0, npy.uint8)
self._renderer.draw_text_image(Z, x, y, angle, gc)
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def _get_agg_font(self, prop):
"""
Get the font for text instance t, cacheing for efficiency
"""
if __debug__: verbose.report('RendererAgg._get_agg_font',
'debug-annoying')
key = hash(prop)
font = self._fontd.get(key)
if font is None:
fname = findfont(prop)
font = self._fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self._fontd[fname] = font
self._fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def points_to_pixels(self, points):
"""
convert point measures to pixes using dpi and the pixels per
inch of the display
"""
if __debug__: verbose.report('RendererAgg.points_to_pixels',
'debug-annoying')
return points*self.dpi/72.0
def tostring_rgb(self):
if __debug__: verbose.report('RendererAgg.tostring_rgb',
'debug-annoying')
return self._renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('RendererAgg.tostring_argb',
'debug-annoying')
return self._renderer.tostring_argb()
def buffer_rgba(self,x,y):
if __debug__: verbose.report('RendererAgg.buffer_rgba',
'debug-annoying')
return self._renderer.buffer_rgba(x,y)
def clear(self):
self._renderer.clear()
def option_image_nocomposite(self):
# It is generally faster to composite each image directly to
# the Figure, and there's no file size benefit to compositing
# with the Agg backend
return True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if __debug__: verbose.report('backend_agg.new_figure_manager',
'debug-annoying')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasAgg(thisFig)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasAgg(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def copy_from_bbox(self, bbox):
renderer = self.get_renderer()
return renderer.copy_from_bbox(bbox)
def restore_region(self, region):
renderer = self.get_renderer()
return renderer.restore_region(region)
def draw(self):
"""
Draw the figure using the renderer
"""
if __debug__: verbose.report('FigureCanvasAgg.draw', 'debug-annoying')
self.renderer = self.get_renderer()
self.figure.draw(self.renderer)
def get_renderer(self):
l, b, w, h = self.figure.bbox.bounds
key = w, h, self.figure.dpi
try: self._lastKey, self.renderer
except AttributeError: need_new_renderer = True
else: need_new_renderer = (self._lastKey != key)
if need_new_renderer:
self.renderer = RendererAgg(w, h, self.figure.dpi)
self._lastKey = key
return self.renderer
def tostring_rgb(self):
if __debug__: verbose.report('FigureCanvasAgg.tostring_rgb',
'debug-annoying')
return self.renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('FigureCanvasAgg.tostring_argb',
'debug-annoying')
return self.renderer.tostring_argb()
def buffer_rgba(self,x,y):
if __debug__: verbose.report('FigureCanvasAgg.buffer_rgba',
'debug-annoying')
return self.renderer.buffer_rgba(x,y)
def get_default_filetype(self):
return 'png'
def print_raw(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = file(filename_or_obj, 'wb')
renderer._renderer.write_rgba(filename_or_obj)
renderer.dpi = original_dpi
print_rgba = print_raw
def print_png(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = file(filename_or_obj, 'wb')
_png.write_png(renderer._renderer.buffer_rgba(0, 0),
renderer.width, renderer.height,
filename_or_obj, self.figure.dpi)
renderer.dpi = original_dpi
| agpl-3.0 |
rexshihaoren/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
ankurankan/scikit-learn | sklearn/decomposition/__init__.py | 99 | 1331 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD']
| bsd-3-clause |
BubuLK/sfepy | examples/diffusion/time_poisson_interactive.py | 2 | 9967 | #!/usr/bin/env python
"""
Transient Laplace equation (heat equation) with non-constant initial conditions
given by a function, using commands for interactive use.
The script allows setting various simulation parameters, namely:
- the diffusivity coefficient
- the max. initial condition value
- temperature field approximation order
- uniform mesh refinement
The example shows also how to probe the results.
In the SfePy top-level directory the following command can be used to get usage
information::
python examples/diffusion/time_poisson_interactive.py -h
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import assert_, output, ordered_iteritems, IndexedStruct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.problem import prepare_matrix
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.solvers.ts_solvers import SimpleTimeSteppingSolver
from sfepy.discrete.probes import LineProbe, CircleProbe
from sfepy.discrete.projections import project_by_component
def gen_probes(problem):
"""
Define a line probe and a circle probe.
"""
# Use enough points for higher order approximations.
n_point = 1000
p0, p1 = nm.array([0.0, 0.0, 0.0]), nm.array([0.1, 0.0, 0.0])
line = LineProbe(p0, p1, n_point, share_geometry=True)
# Workaround current probe code shortcoming.
line.set_options(close_limit=0.5)
centre = 0.5 * (p0 + p1)
normal = [0.0, 1.0, 0.0]
r = 0.019
circle = CircleProbe(centre, normal, r, n_point, share_geometry=True)
circle.set_options(close_limit=0.0)
probes = [line, circle]
labels = ['%s -> %s' % (p0, p1),
'circle(%s, %s, %s' % (centre, normal, r)]
return probes, labels
def probe_results(ax_num, T, dvel, probe, label):
"""
Probe the results using the given probe and plot the probed values.
"""
results = {}
pars, vals = probe(T)
results['T'] = (pars, vals)
pars, vals = probe(dvel)
results['dvel'] = (pars, vals)
fig = plt.figure(1)
ax = plt.subplot(2, 2, 2 * ax_num + 1)
ax.cla()
pars, vals = results['T']
ax.plot(pars, vals, label=r'$T$', lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('temperature')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
ax = plt.subplot(2, 2, 2 * ax_num + 2)
ax.cla()
pars, vals = results['dvel']
for ic in range(vals.shape[1]):
ax.plot(pars, vals[:, ic], label=r'$w_{%d}$' % (ic + 1),
lw=1, ls='-', marker='+', ms=3)
dx = 0.05 * (pars[-1] - pars[0])
ax.set_xlim(pars[0] - dx, pars[-1] + dx)
ax.set_ylabel('diffusion velocity')
ax.set_xlabel('probe %s' % label, fontsize=8)
ax.legend(loc='best', fontsize=10)
return fig, results
helps = {
'diffusivity' : 'the diffusivity coefficient [default: %(default)s]',
'ic_max' : 'the max. initial condition value [default: %(default)s]',
'order' : 'temperature field approximation order [default: %(default)s]',
'refine' : 'uniform mesh refinement level [default: %(default)s]',
'probe' : 'probe the results',
'show' : 'show the probing results figure, if --probe is used',
}
def main():
from sfepy import data_dir
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--diffusivity', metavar='float', type=float,
action='store', dest='diffusivity',
default=1e-5, help=helps['diffusivity'])
parser.add_argument('--ic-max', metavar='float', type=float,
action='store', dest='ic_max',
default=2.0, help=helps['ic_max'])
parser.add_argument('--order', metavar='int', type=int,
action='store', dest='order',
default=2, help=helps['order'])
parser.add_argument('-r', '--refine', metavar='int', type=int,
action='store', dest='refine',
default=0, help=helps['refine'])
parser.add_argument('-p', '--probe',
action="store_true", dest='probe',
default=False, help=helps['probe'])
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
assert_((0 < options.order),
'temperature approximation order must be at least 1!')
output('using values:')
output(' diffusivity:', options.diffusivity)
output(' max. IC value:', options.ic_max)
output('uniform mesh refinement level:', options.refine)
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.mesh')
domain = FEDomain('domain', mesh)
if options.refine > 0:
for ii in range(options.refine):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
omega = domain.create_region('Omega', 'all')
left = domain.create_region('Left',
'vertices in x < 0.00001', 'facet')
right = domain.create_region('Right',
'vertices in x > 0.099999', 'facet')
field = Field.from_args('fu', nm.float64, 'scalar', omega,
approx_order=options.order)
T = FieldVariable('T', 'unknown', field, history=1)
s = FieldVariable('s', 'test', field, primary_var_name='T')
m = Material('m', diffusivity=options.diffusivity * nm.eye(3))
integral = Integral('i', order=2*options.order)
t1 = Term.new('dw_diffusion(m.diffusivity, s, T)',
integral, omega, m=m, s=s, T=T)
t2 = Term.new('dw_volume_dot(s, dT/dt)',
integral, omega, s=s, T=T)
eq = Equation('balance', t1 + t2)
eqs = Equations([eq])
# Boundary conditions.
ebc1 = EssentialBC('T1', left, {'T.0' : 2.0})
ebc2 = EssentialBC('T2', right, {'T.0' : -2.0})
# Initial conditions.
def get_ic(coors, ic):
x, y, z = coors.T
return 2 - 40.0 * x + options.ic_max * nm.sin(4 * nm.pi * x / 0.1)
ic_fun = Function('ic_fun', get_ic)
ic = InitialCondition('ic', omega, {'T.0' : ic_fun})
pb = Problem('heat', equations=eqs)
pb.set_bcs(ebcs=Conditions([ebc1, ebc2]))
pb.set_ics(Conditions([ic]))
state0 = pb.get_initial_state()
init_fun, prestep_fun, _poststep_fun = pb.get_tss_functions(state0)
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({'is_linear' : True}, lin_solver=ls, status=nls_status)
tss = SimpleTimeSteppingSolver({'t0' : 0.0, 't1' : 100.0, 'n_step' : 11},
nls=nls, context=pb, verbose=True)
pb.set_solver(tss)
if options.probe:
# Prepare probe data.
probes, labels = gen_probes(pb)
ev = pb.evaluate
order = 2 * (options.order - 1)
gfield = Field.from_args('gu', nm.float64, 'vector', omega,
approx_order=options.order - 1)
dvel = FieldVariable('dvel', 'parameter', gfield,
primary_var_name='(set-to-None)')
cfield = Field.from_args('gu', nm.float64, 'scalar', omega,
approx_order=options.order - 1)
component = FieldVariable('component', 'parameter', cfield,
primary_var_name='(set-to-None)')
nls_options = {'eps_a' : 1e-16, 'i_max' : 1}
suffix = tss.ts.suffix
def poststep_fun(ts, vec):
_poststep_fun(ts, vec)
# Probe the solution.
dvel_qp = ev('ev_diffusion_velocity.%d.Omega(m.diffusivity, T)'
% order, copy_materials=False, mode='qp')
project_by_component(dvel, dvel_qp, component, order,
nls_options=nls_options)
all_results = []
for ii, probe in enumerate(probes):
fig, results = probe_results(ii, T, dvel, probe, labels[ii])
all_results.append(results)
plt.tight_layout()
fig.savefig('time_poisson_interactive_probe_%s.png'
% (suffix % ts.step), bbox_inches='tight')
for ii, results in enumerate(all_results):
output('probe %d (%s):' % (ii, probes[ii].name))
output.level += 2
for key, res in ordered_iteritems(results):
output(key + ':')
val = res[1]
output(' min: %+.2e, mean: %+.2e, max: %+.2e'
% (val.min(), val.mean(), val.max()))
output.level -= 2
else:
poststep_fun = _poststep_fun
pb.time_update(tss.ts)
state0.apply_ebc()
# This is required if {'is_linear' : True} is passed to Newton.
mtx = prepare_matrix(pb, state0)
pb.try_presolve(mtx)
tss_status = IndexedStruct()
tss(state0.get_vec(pb.active_only),
init_fun=init_fun, prestep_fun=prestep_fun, poststep_fun=poststep_fun,
status=tss_status)
output(tss_status)
if options.show:
plt.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
apache/spark | python/pyspark/sql/tests/test_pandas_udf.py | 22 | 10200 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.sql.functions import udf, pandas_udf, PandasUDFType
from pyspark.sql.types import DoubleType, StructType, StructField, LongType
from pyspark.sql.utils import ParseException, PythonException
from pyspark.rdd import PythonEvalType
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class PandasUDFTests(ReusedSQLTestCase):
def test_pandas_udf_basic(self):
udf = pandas_udf(lambda x: x, DoubleType())
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, DoubleType(), PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'double', PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, StructType([StructField("v", DoubleType())]),
PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, returnType='v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_pandas_udf_decorator(self):
@pandas_udf(DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
schema = StructType([StructField("v", DoubleType())])
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf('v double', PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(returnType='double', functionType=PandasUDFType.SCALAR)
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_udf_wrong_arg(self):
with QuietTest(self.sc):
with self.assertRaises(ParseException):
@pandas_udf('blah')
def foo(x):
return x
with self.assertRaisesRegex(ValueError, 'Invalid return type.*None'):
@pandas_udf(functionType=PandasUDFType.SCALAR)
def foo(x):
return x
with self.assertRaisesRegex(ValueError, 'Invalid function'):
@pandas_udf('double', 100)
def foo(x):
return x
with self.assertRaisesRegex(ValueError, '0-arg pandas_udfs.*not.*supported'):
pandas_udf(lambda: 1, LongType(), PandasUDFType.SCALAR)
with self.assertRaisesRegex(ValueError, '0-arg pandas_udfs.*not.*supported'):
@pandas_udf(LongType(), PandasUDFType.SCALAR)
def zero_with_type():
return 1
with self.assertRaisesRegex(TypeError, 'Invalid return type'):
@pandas_udf(returnType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegex(TypeError, 'Invalid return type'):
@pandas_udf(returnType='double', functionType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegex(ValueError, 'Invalid function'):
@pandas_udf(returnType='k int, v double', functionType=PandasUDFType.GROUPED_MAP)
def foo(k, v, w):
return k
def test_stopiteration_in_udf(self):
def foo(x):
raise StopIteration()
def foofoo(x, y):
raise StopIteration()
exc_message = "Caught StopIteration thrown from user's code; failing the task"
df = self.spark.range(0, 100)
# plain udf (test for SPARK-23754)
self.assertRaisesRegex(
PythonException,
exc_message,
df.withColumn('v', udf(foo)('id')).collect
)
# pandas scalar udf
self.assertRaisesRegex(
PythonException,
exc_message,
df.withColumn(
'v', pandas_udf(foo, 'double', PandasUDFType.SCALAR)('id')
).collect
)
# pandas grouped map
self.assertRaisesRegex(
PythonException,
exc_message,
df.groupBy('id').apply(
pandas_udf(foo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
self.assertRaisesRegex(
PythonException,
exc_message,
df.groupBy('id').apply(
pandas_udf(foofoo, df.schema, PandasUDFType.GROUPED_MAP)
).collect
)
# pandas grouped agg
self.assertRaisesRegex(
PythonException,
exc_message,
df.groupBy('id').agg(
pandas_udf(foo, 'double', PandasUDFType.GROUPED_AGG)('id')
).collect
)
def test_pandas_udf_detect_unsafe_type_conversion(self):
import pandas as pd
import numpy as np
values = [1.0] * 3
pdf = pd.DataFrame({'A': values})
df = self.spark.createDataFrame(pdf).repartition(1)
@pandas_udf(returnType="int")
def udf(column):
return pd.Series(np.linspace(0, 1, len(column)))
# Since 0.11.0, PyArrow supports the feature to raise an error for unsafe cast.
with self.sql_conf({
"spark.sql.execution.pandas.convertToArrowArraySafely": True}):
with self.assertRaisesRegex(Exception,
"Exception thrown when converting pandas.Series"):
df.select(['A']).withColumn('udf', udf('A')).collect()
# Disabling Arrow safe type check.
with self.sql_conf({
"spark.sql.execution.pandas.convertToArrowArraySafely": False}):
df.select(['A']).withColumn('udf', udf('A')).collect()
def test_pandas_udf_arrow_overflow(self):
import pandas as pd
df = self.spark.range(0, 1)
@pandas_udf(returnType="byte")
def udf(column):
return pd.Series([128] * len(column))
# When enabling safe type check, Arrow 0.11.0+ disallows overflow cast.
with self.sql_conf({
"spark.sql.execution.pandas.convertToArrowArraySafely": True}):
with self.assertRaisesRegex(Exception,
"Exception thrown when converting pandas.Series"):
df.withColumn('udf', udf('id')).collect()
# Disabling safe type check, let Arrow do the cast anyway.
with self.sql_conf({"spark.sql.execution.pandas.convertToArrowArraySafely": False}):
df.withColumn('udf', udf('id')).collect()
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
spallavolu/scikit-learn | benchmarks/bench_rcv1_logreg_convergence.py | 149 | 7173 | # Authors: Tom Dupre la Tour <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
| bsd-3-clause |
theislab/scanpy | scanpy/tests/conftest.py | 1 | 2491 | import sys
from pathlib import Path
import matplotlib as mpl
mpl.use('agg')
from matplotlib import pyplot
from matplotlib.testing.compare import compare_images, make_test_filename
import pytest
import scanpy
scanpy.settings.verbosity = "hint"
# define this after importing scanpy but before running tests
IMPORTED = frozenset(sys.modules.keys())
@pytest.fixture(autouse=True)
def close_figures_on_teardown():
yield
pyplot.close("all")
def clear_loggers():
"""Remove handlers from all loggers
Fixes: https://github.com/theislab/scanpy/issues/1736
Code from: https://github.com/pytest-dev/pytest/issues/5502#issuecomment-647157873
"""
import logging
loggers = [logging.getLogger()] + list(logging.Logger.manager.loggerDict.values())
for logger in loggers:
handlers = getattr(logger, 'handlers', [])
for handler in handlers:
logger.removeHandler(handler)
@pytest.fixture(scope="session", autouse=True)
def close_logs_on_teardown(request):
request.addfinalizer(clear_loggers)
@pytest.fixture
def imported_modules():
return IMPORTED
@pytest.fixture
def check_same_image(add_nunit_attachment):
def _(pth1, pth2, *, tol: int, basename: str = ""):
def fmt_descr(descr):
if basename != "":
return f"{descr} ({basename})"
else:
return descr
pth1, pth2 = Path(pth1), Path(pth2)
try:
result = compare_images(str(pth1), str(pth2), tol=tol)
assert result is None, result
except Exception as e:
diff_pth = make_test_filename(pth2, 'failed-diff')
add_nunit_attachment(str(pth1), fmt_descr("Expected"))
add_nunit_attachment(str(pth2), fmt_descr("Result"))
if Path(diff_pth).is_file():
add_nunit_attachment(str(diff_pth), fmt_descr("Difference"))
raise e
return _
@pytest.fixture
def image_comparer(check_same_image):
def make_comparer(path_expected: Path, path_actual: Path, *, tol: int):
def save_and_compare(basename, tol=tol):
path_actual.mkdir(parents=True, exist_ok=True)
out_path = path_actual / f'{basename}.png'
pyplot.savefig(out_path, dpi=40)
pyplot.close()
check_same_image(path_expected / f'{basename}.png', out_path, tol=tol)
return save_and_compare
return make_comparer
@pytest.fixture
def plt():
return pyplot
| bsd-3-clause |
AlexisEidelman/Til | til/pgm/liam2of.py | 2 | 6000 | # -*- coding:utf-8 -*-
'''
Created on 25 Apr 2013
@author: alexis_e
'''
from pandas import HDFStore, merge, DataFrame
import numpy as np
import pdb
import time
import os
from til import __path__ as path_til
from utils import of_name_to_til, concatenated_ranges
def table_for_of(simulation, period=None, check_validity=False, save_tables=False):
temps = time.clock()
output_tab = os.path.join(path_til[0], "output", "to_run_leg.h5" )
# on travaille d'abord sur l'ensemble des tables puis on selectionne chaque annee
# on étudie d'abord la table individu pour pouvoir séléctionner les identifiants
# step 1
table = {}
entities = simulation.entities
entities_name = map( lambda e: e.name, simulation.entities)
def _get_entity(name):
position = entities_name.index(name)
return simulation.entities[position]
ind = _get_entity('person')
table['ind'] = DataFrame(ind.array.columns)
table['ind'] = table['ind'].rename(columns={'men': 'idmen', 'foy': 'idfoy', 'id': 'noi', 'statmarit': 'civilstate'})
# création de variable
table['ind']['ageq'] = table['ind']['age']/5 - 4
table['ind']['ageq'] = table['ind']['ageq']*(table['ind']['ageq'] > 0)
table['ind']['ageq'] = 12 + (table['ind']['ageq']-12)*(table['ind']['ageq'] < 12)
#TODO: modifier pour les jeunes veufs
# create fam entity
try:
table['ind'][['idfam','quifam']] = table['ind'].loc[:,['idmen','quimen']]
except:
pdb.set_trace()
# # Travail sur les qui quand on ne controle pas dans la simulation que tout le monde n'est pas qui==2
## inutile car fait maintenant dans la simulation mais peut-être mieux à refaire ici un jour
## parce que ça prend du temps dans la simulation
# time_qui = time.clock()
# for ent in ('men','foy'): # 'fam' un jour...
# print "Deal with qui for ", ent
# qui= 'qui'+ent
# ident = 'id'+ent
# trav = table['ind'].ix[table['ind'][qui]==2, [ident,qui,'period']]
# for name, groupfor nom in ('menage','declar','fam'):for nom in ('menage','declar','fam'): in trav.groupby([ident,'period']):
# to_add = range(len(group))
# group[qui] = group[qui]+to_add
# table['ind'].ix[group[qui].index, qui] = group[qui]
# print "les qui pour ", ent," sont réglés"
# time_qui = time.clock() - time_qui
# print "le temps passé à s'occuper des qui a été",time_qui
ind = table['ind']
for ent in ['men','foy']:
entity = _get_entity(of_name_to_til[ent])
table[ent] = DataFrame(entity.array.columns)
id = 'id' + ent
qui = 'qui' + ent
table[ent] = table[ent].rename(columns={'id': id})
# travail sur les qui
nb_qui = ind.loc[ind[qui]>1, ['noi',id,qui]].groupby(id, sort=True).size()
if len(nb_qui)>0:
new_qui = concatenated_ranges(nb_qui) + 2
table['ind'] = table['ind'].sort(id) #note the sort
col_qui = table['ind'][qui]
col_qui[col_qui>1] = new_qui
table['ind'][qui] = col_qui
# informations on qui == 0
qui0 = table['ind'].loc[table['ind']['qui' + ent]==0,['noi','idfoy','idmen','idfam','period']]
table[ent] = merge(table[ent], qui0, how='left', left_on=[id,'period'], right_on=[id,'period'])
if ent=='men':
# nbinde est limité à 6 personnes et donc valeur = 5 en python
table[ent]['nbinde'] = (table[ent]['nb_persons']-1) * (table[ent]['nb_persons']-1 <=5) +5*(table[ent]['nb_persons']-1 >5)
table['fam'] = qui0
# remove non-ordinary household
cond = (table['ind']['idmen'] >= 10) & (table['ind']['idfoy'] >= 10)
table['ind'] = table['ind'][cond]
table['men'] = table['men'][table['men']['idmen']>=10]
table['foy'] = table['foy'][table['foy']['idfoy']>=10]
table['fam'] = table['fam'][table['fam']['idfam']>=10]
# get years
years = np.unique(table['ind']['period'].values/100)
if period is not None:
years=[period]
print years
if check_validity:
for year in years:
ind = table['ind']
for ent in ['men','foy']: #fam
id = 'id' + ent
qui = 'qui' + ent
tab = table[ent]
try:
assert ind.groupby([id,qui]).size().max() == 1
except:
print ent
pb = ind.groupby([id,qui]).size() > 1
print(ind.groupby([id,qui]).size()[pb])
pdb.set_trace()
print(ind[ind[id]==43][['noi',id,qui]])
qui0 = ind[ind[qui]==0]
try:
assert qui0[id].isin(tab[id]).all()
except:
cond = tab[id].isin(qui0[id])
print(tab[~cond])
pdb.set_trace()
try:
assert tab[id].isin(qui0[id]).all()
except:
cond = tab[id].isin(qui0[id])
print(tab[~cond])
pdb.set_trace()
for year in years:
if save_tables:
try:
os.remove(output_tab)
except:
print("Attention, la table intermediaire n'a pas ete supprimee")
goal = HDFStore(output_tab)
goal.remove('survey_'+str(year))
for ent in ('ind','men','foy','fam'):
tab = table[ent].loc[table[ent]['period']/100==year]
key = 'survey_'+str(year) + '/'+ent
goal.put(key, tab)
goal.close()
else:
for ent in ('ind','men','foy','fam'):
table[ent] = table[ent].loc[table[ent]['period']/100==year]
return table
if __name__ == "__main__":
table_for_of() | gpl-3.0 |
GGiecold/PySCUBA | setup.py | 1 | 2816 | #!/usr/bin/env python
# PySCUBA/setup.py
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: [email protected], [email protected]
from codecs import open
from os import path
from setuptools import setup
exec(open(path.join('src', 'PySCUBA', '__version__.py')).read())
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README'), encoding = 'utf-8') as f:
long_description = f.read()
setup(name = 'PySCUBA',
version = __version__,
description = "Python for Single-cell Clustering Using Bifurcation Analysis",
long_description = long_description,
url = 'https://github.com/GGiecold/PySCUBA',
download_url = 'https://github.com/GGiecold/PySCUBA',
author = 'Gregory Giecold',
author_email = '[email protected]',
maintainer = 'Gregory Giecold',
maintainer_email = '[email protected]',
license = 'MIT License',
platforms = ('Any',),
install_requires = ['matplotlib>=1.4.3', 'numpy>=1.9.0', 'Pillow>=3.2.0',
'python-igraph', 'rpy2>=2.8.1', 'scipy>=0.17.0',
'setuptools', 'sklearn', 'Wand>=0.4.3'],
classifiers = ['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: User Interfaces', ],
packages = ['PySCUBA'],
package_dir = {'PySCUBA': 'src/PySCUBA'},
keywords = "bioinformatics biology clustering cytometry gap-statistics "
"genomics machine-learning pattern-recognition PCR principal-curve "
"qPCR RNASeq single-cell time-series unsupervised-learning",
entry_points = {
'console_scripts': ['PySCUBA = PySCUBA.__main__:main'],
}
)
| mit |
thientu/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
m11s/MissionPlanner | Lib/site-packages/numpy/lib/npyio.py | 53 | 59490 | __all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
import numpy as np
import format
import sys
import os
import sys
import itertools
import warnings
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
if sys.platform != 'cli':
from _compiled_base import packbits, unpackbits
else:
def packbits(*args, **kw):
raise NotImplementedError()
def unpackbits(*args, **kw):
raise NotImplementedError()
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype, _bytes_to_name
from numpy.compat import asbytes, asstr, asbytes_nested, bytes
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError, "Illegal argument"
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
g = GzipFile(fileobj=f.fileobj)
g.name = f.name
g.mode = f.mode
f = g
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
self._obj = obj
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError, key
def zipfile_factory(*args, **kwargs):
import zipfile
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = BytesIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError, "%s is not a file in the archive" % key
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load a pickled, ``.npy``, or ``.npz`` binary file.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap`). The mode has no effect for pickled or
zipped files.
A memory-mapped array is stored on disk, and not directly loaded
into memory. However, it can be accessed and sliced like any
ndarray. Memory mapping is especially useful for accessing
small fragments of large files without reading the entire file
into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever is stored in the
pickle is returned.
- If the file is a ``.npy`` file, then an array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
own_fid = True
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
own_fid = False
return NpzFile(fid, own_fid=True)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError, \
"Failed to interpret file %s as a pickle" % repr(file)
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
*args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
**kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with *args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with **kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
See Also
--------
numpy.savez_compressed : Save several arrays into a compressed .npz file format
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : string
File name of .npz file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed .npz file format
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError, "Cannot use un-named variables and keyword %s" % key
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zip = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is ``.gz`` or
``.bz2``, the file is first decompressed.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a record
data-type, the resulting array will be 1-dimensional, and each row
will be interpreted as an element of the array. In this case, the
number of columns used must match the number of fields in the
data-type.
comments : str, optional
The character used to indicate the start of a comment; default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data:
``converters = {3: lambda s: float(s or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. The default is False.
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
if delimiter is not None:
delimiter = asbytes(delimiter)
user_converters = converters
if usecols is not None:
usecols = list(usecols)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
fh = seek_gzip_factory(fname)
elif fname.endswith('.bz2'):
import bz2
fh = bz2.BZ2File(fname)
else:
fh = open(fname, 'U')
elif hasattr(fname, 'readline'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
return [dt.base] * int(np.prod(dt.shape))
else:
types = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt = flatten_dtype(tp)
types.extend(flat_dt)
return types
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip()
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.readline()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
while not first_vals:
first_line = fh.readline()
if not first_line: # EOF reached
raise IOError('End-of-file reached before encountering data.')
first_vals = split_line(first_line)
N = len(usecols or first_vals)
dtype_types = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
X.append(tuple([conv(val) for (conv, val) in zip(converters, vals)]))
finally:
if own_fh:
fh.close()
if len(dtype_types) > 1:
# We're dealing with a structured array, with a dtype such as
# [('x', int), ('y', [('s', int), ('t', float)])]
#
# First, create the array using a flattened dtype:
# [('x', int), ('s', int), ('t', float)]
#
# Then, view the array using the specified dtype.
try:
X = np.array(X, dtype=np.dtype([('', t) for t in dtype_types]))
X = X.view(dtype)
except TypeError:
# In the case we have an object dtype
X = np.array(X, dtype=dtype)
else:
X = np.array(X, dtype)
X = np.squeeze(X)
if unpack:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n'):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
newline : str
.. versionadded:: 1.5.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
finally:
if own_fh:
fh.close()
import re
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
fh.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skiprows` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is `.gz` or
`.bz2`, the file is first decompressed.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable or None, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable or None, optional
The set of strings corresponding to missing data.
filling_values : variable or None, optional
The set of values to be used as default when the data are missing.
usecols : sequence or None, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skiprows` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables names.
By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
if isinstance(fname, basestring):
fhd = np.lib._datasource.open(fname, 'U')
own_fhd = True
elif not hasattr(fname, 'read'):
raise TypeError("The input should be a string or a filehandle. "\
"(got %s instead)" % type(fname))
else:
fhd = fname
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn("The use of `skiprows` is deprecated.\n"\
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.readline()
# Keep on until we find the first valid values
first_values = None
while not first_values:
first_line = fhd.readline()
if not first_line:
raise IOError('End-of-file reached before encountering data.')
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = dtype.names
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn("The use of `missing` is deprecated.\n"\
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
else:
rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| gpl-3.0 |
mfjb/scikit-learn | sklearn/cross_validation.py | 10 | 62355 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
'''Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : integer or cross-validation generator, optional, default=3
A cross-validation generator to use. If int, determines the number
of folds in StratifiedKFold if estimator is a classifier and the
target y is binary or multiclass, or the number of folds in KFold
otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional, default=3
A cross-validation generator to use. If int, determines the number
of folds in StratifiedKFold if estimator is a classifier and the
target y is binary or multiclass, or the number of folds in KFold
otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional, default=3
A cross-validation generator to use. If int, determines the number
of folds in StratifiedKFold if estimator is a classifier and the
target y is binary or multiclass, or the number of folds in KFold
otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/backends/wx_compat.py | 8 | 5224 | #!/usr/bin/env python
"""
A wx API adapter to hide differences between wxPython classic and phoenix.
It is assumed that the user code is selecting what version it wants to use,
here we just ensure that it meets the minimum required by matplotlib.
For an example see embedding_in_wx2.py
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from distutils.version import LooseVersion
missingwx = "Matplotlib backend_wx and backend_wxagg require wxPython >=2.8.12"
try:
import wx
backend_version = wx.VERSION_STRING
is_phoenix = 'phoenix' in wx.PlatformInfo
except ImportError:
raise ImportError(missingwx)
# Ensure we have the correct version imported
if LooseVersion(wx.VERSION_STRING) < LooseVersion("2.8.12"):
print(" wxPython version %s was imported." % backend_version)
raise ImportError(missingwx)
if is_phoenix:
# define all the wxPython phoenix stuff
# font styles, families and weight
fontweights = {
100: wx.FONTWEIGHT_LIGHT,
200: wx.FONTWEIGHT_LIGHT,
300: wx.FONTWEIGHT_LIGHT,
400: wx.FONTWEIGHT_NORMAL,
500: wx.FONTWEIGHT_NORMAL,
600: wx.FONTWEIGHT_NORMAL,
700: wx.FONTWEIGHT_BOLD,
800: wx.FONTWEIGHT_BOLD,
900: wx.FONTWEIGHT_BOLD,
'ultralight': wx.FONTWEIGHT_LIGHT,
'light': wx.FONTWEIGHT_LIGHT,
'normal': wx.FONTWEIGHT_NORMAL,
'medium': wx.FONTWEIGHT_NORMAL,
'semibold': wx.FONTWEIGHT_NORMAL,
'bold': wx.FONTWEIGHT_BOLD,
'heavy': wx.FONTWEIGHT_BOLD,
'ultrabold': wx.FONTWEIGHT_BOLD,
'black': wx.FONTWEIGHT_BOLD
}
fontangles = {
'italic': wx.FONTSTYLE_ITALIC,
'normal': wx.FONTSTYLE_NORMAL,
'oblique': wx.FONTSTYLE_SLANT}
# wxPython allows for portable font styles, choosing them appropriately
# for the target platform. Map some standard font names to the portable
# styles
# QUESTION: Is it be wise to agree standard fontnames across all backends?
fontnames = {'Sans': wx.FONTFAMILY_SWISS,
'Roman': wx.FONTFAMILY_ROMAN,
'Script': wx.FONTFAMILY_SCRIPT,
'Decorative': wx.FONTFAMILY_DECORATIVE,
'Modern': wx.FONTFAMILY_MODERN,
'Courier': wx.FONTFAMILY_MODERN,
'courier': wx.FONTFAMILY_MODERN}
dashd_wx = {'solid': wx.PENSTYLE_SOLID,
'dashed': wx.PENSTYLE_SHORT_DASH,
'dashdot': wx.PENSTYLE_DOT_DASH,
'dotted': wx.PENSTYLE_DOT}
# functions changes
BitmapFromBuffer = wx.Bitmap.FromBufferRGBA
EmptyBitmap = wx.Bitmap
EmptyImage = wx.Image
Cursor = wx.Cursor
EventLoop = wx.GUIEventLoop
NamedColour = wx.Colour
StockCursor = wx.Cursor
else:
# define all the wxPython classic stuff
# font styles, families and weight
fontweights = {
100: wx.LIGHT,
200: wx.LIGHT,
300: wx.LIGHT,
400: wx.NORMAL,
500: wx.NORMAL,
600: wx.NORMAL,
700: wx.BOLD,
800: wx.BOLD,
900: wx.BOLD,
'ultralight': wx.LIGHT,
'light': wx.LIGHT,
'normal': wx.NORMAL,
'medium': wx.NORMAL,
'semibold': wx.NORMAL,
'bold': wx.BOLD,
'heavy': wx.BOLD,
'ultrabold': wx.BOLD,
'black': wx.BOLD
}
fontangles = {
'italic': wx.ITALIC,
'normal': wx.NORMAL,
'oblique': wx.SLANT}
# wxPython allows for portable font styles, choosing them appropriately
# for the target platform. Map some standard font names to the portable
# styles
# QUESTION: Is it be wise to agree standard fontnames across all backends?
fontnames = {'Sans': wx.SWISS,
'Roman': wx.ROMAN,
'Script': wx.SCRIPT,
'Decorative': wx.DECORATIVE,
'Modern': wx.MODERN,
'Courier': wx.MODERN,
'courier': wx.MODERN}
dashd_wx = {'solid': wx.SOLID,
'dashed': wx.SHORT_DASH,
'dashdot': wx.DOT_DASH,
'dotted': wx.DOT}
# functions changes
BitmapFromBuffer = wx.BitmapFromBufferRGBA
EmptyBitmap = wx.EmptyBitmap
EmptyImage = wx.EmptyImage
Cursor = wx.StockCursor
EventLoop = wx.EventLoop
NamedColour = wx.NamedColour
StockCursor = wx.StockCursor
def _AddTool(parent, wx_ids, text, bmp, tooltip_text):
if is_phoenix:
if text in ['Pan', 'Zoom']:
kind = wx.ITEM_CHECK
else:
kind = wx.ITEM_NORMAL
parent.AddTool(wx_ids[text], label=text,
bitmap=bmp,
bmpDisabled=wx.NullBitmap,
shortHelpString=text,
longHelpString=tooltip_text,
kind=kind)
else:
if text in ['Pan', 'Zoom']:
parent.AddCheckTool(
wx_ids[text],
bmp,
shortHelp=text,
longHelp=tooltip_text)
else:
parent.AddSimpleTool(wx_ids[text], bmp, text, tooltip_text)
| apache-2.0 |
jarn0ld/gnuradio | gr-dtv/examples/atsc_ctrlport_monitor.py | 21 | 6089 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import sys
import matplotlib
matplotlib.use("QT4Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from gnuradio.ctrlport.GNURadioControlPortClient import GNURadioControlPortClient
import scipy
from scipy import fftpack
"""
If a host is running the ATSC receiver chain with ControlPort
turned on, this script will connect to the host using the hostname and
port pair of the ControlPort instance and display metrics of the
receiver. The ATSC publishes information about the succes of the
Reed-Solomon decoder and Viterbi metrics for use here in displaying
the link quality. This also gets the equalizer taps of the receiver
and displays the frequency response.
"""
class atsc_ctrlport_monitor:
def __init__(self, host, port):
argv = [None, host, port]
radiosys = GNURadioControlPortClient(argv=argv, rpcmethod='thrift')
self.radio = radiosys.client
print self.radio
vt_init_key = 'dtv_atsc_viterbi_decoder0::decoder_metrics'
data = self.radio.getKnobs([vt_init_key])[vt_init_key]
init_metric = scipy.mean(data.value)
self._viterbi_metric = 100*[init_metric,]
table_col_labels = ('Num Packets', 'Error Rate', 'Packet Error Rate',
'Viterbi Metric', 'SNR')
self._fig = plt.figure(1, figsize=(12,12), facecolor='w')
self._sp0 = self._fig.add_subplot(4,1,1)
self._sp1 = self._fig.add_subplot(4,1,2)
self._sp2 = self._fig.add_subplot(4,1,3)
self._plot_taps = self._sp0.plot([], [], 'k', linewidth=2)
self._plot_psd = self._sp1.plot([], [], 'k', linewidth=2)
self._plot_data = self._sp2.plot([], [], 'ok', linewidth=2, markersize=4, alpha=0.05)
self._ax2 = self._fig.add_subplot(4,1,4)
self._table = self._ax2.table(cellText=[len(table_col_labels)*['0']],
colLabels=table_col_labels,
loc='center')
self._ax2.axis('off')
cells = self._table.properties()['child_artists']
for c in cells:
c.set_lw(0.1) # set's line width
c.set_ls('solid')
c.set_height(0.2)
ani = animation.FuncAnimation(self._fig, self.update_data, frames=200,
fargs=(self._plot_taps[0], self._plot_psd[0],
self._plot_data[0], self._table),
init_func=self.init_function,
blit=True)
plt.show()
def update_data(self, x, taps, psd, syms, table):
try:
eqdata_key = 'dtv_atsc_equalizer0::taps'
symdata_key = 'dtv_atsc_equalizer0::data'
rs_nump_key = 'dtv_atsc_rs_decoder0::num_packets'
rs_numbp_key = 'dtv_atsc_rs_decoder0::num_bad_packets'
rs_numerrs_key = 'dtv_atsc_rs_decoder0::num_errors_corrected'
vt_metrics_key = 'dtv_atsc_viterbi_decoder0::decoder_metrics'
snr_key = 'probe2_f0::SNR'
data = self.radio.getKnobs([])
eqdata = data[eqdata_key]
symdata = data[symdata_key]
rs_num_packets = data[rs_nump_key]
rs_num_bad_packets = data[rs_numbp_key]
rs_num_errors_corrected = data[rs_numerrs_key]
vt_decoder_metrics = data[vt_metrics_key]
snr_est = data[snr_key]
vt_decoder_metrics = scipy.mean(vt_decoder_metrics.value)
self._viterbi_metric.pop()
self._viterbi_metric.insert(0, vt_decoder_metrics)
except:
sys.stderr.write("Lost connection, exiting")
sys.exit(1)
ntaps = len(eqdata.value)
taps.set_ydata(eqdata.value)
taps.set_xdata(xrange(ntaps))
self._sp0.set_xlim(0, ntaps)
self._sp0.set_ylim(min(eqdata.value), max(eqdata.value))
fs = 6.25e6
freq = scipy.linspace(-fs/2, fs/2, 10000)
H = fftpack.fftshift(fftpack.fft(eqdata.value, 10000))
HdB = 20.0*scipy.log10(abs(H))
psd.set_ydata(HdB)
psd.set_xdata(freq)
self._sp1.set_xlim(0, fs/2)
self._sp1.set_ylim([min(HdB), max(HdB)])
self._sp1.set_yticks([min(HdB), max(HdB)])
self._sp1.set_yticklabels(["min", "max"])
nsyms = len(symdata.value)
syms.set_ydata(symdata.value)
syms.set_xdata(nsyms*[0,])
self._sp2.set_xlim([-1, 1])
self._sp2.set_ylim([-10, 10])
per = float(rs_num_bad_packets.value) / float(rs_num_packets.value)
ber = float(rs_num_errors_corrected.value) / float(187*rs_num_packets.value)
table._cells[(1,0)]._text.set_text("{0}".format(rs_num_packets.value))
table._cells[(1,1)]._text.set_text("{0:.2g}".format(ber))
table._cells[(1,2)]._text.set_text("{0:.2g}".format(per))
table._cells[(1,3)]._text.set_text("{0:.1f}".format(scipy.mean(self._viterbi_metric)))
table._cells[(1,4)]._text.set_text("{0:.4f}".format(snr_est.value[0]))
return (taps, psd, syms, table)
def init_function(self):
return self._plot_taps + self._plot_psd + self._plot_data
if __name__ == "__main__":
host = sys.argv[1]
port = sys.argv[2]
m = atsc_ctrlport_monitor(host, port)
| gpl-3.0 |
yunfanz/ReionBub | Choud14/TestESP.py | 1 | 9152 | import numpy as np, matplotlib.pyplot as p, scipy.special
import cosmolopy.perturbation as pb
import cosmolopy.density as cd
from scipy.integrate import quad, tplquad
import itertools
from scipy.interpolate import interp1d
from scipy.interpolate import RectBivariateSpline as RBS
import optparse, sys
from scipy.optimize import brenth, brentq
from sigmas import *
from scipy.ndimage.morphology import binary_dilation
from joblib import Parallel, delayed
import multiprocessing
num_cores = multiprocessing.cpu_count()
o = optparse.OptionParser()
o.add_option('-d','--del0', dest='del0', default=5.)
o.add_option('-m','--mul', dest='mul', default=1.)
o.add_option('-z','--red', dest='red', default=12.)
opts,args = o.parse_args(sys.argv[1:])
print opts, args
rhobar = cd.cosmo_densities(**cosmo)[1] #msun/Mpc
def m2R(m):
RL = (3*m/4/np.pi/rhobar)**(1./3)
return RL
def m2V(m):
return m/rhobar
def R2m(RL):
m = 4*np.pi/3*rhobar*RL**3
return m
dmS = np.load('sig0.npz')
RLtemp, MLtemp,SLtemp = dmS['radius'], dmS['mass'],dmS['sig0']
fs2m = interp1d(SLtemp,MLtemp)
# fsig0 = interp1d(RLtemp,SLtemp)
# def sig0(RL):
# return fsig0(RL)
print 'generated fs2m'
def S2M(S):
return fs2m(S)
def m2S(m):
return sig0(m2R(m))
def mmin(z,Tvir=1.E4):
return pb.virial_mass(Tvir,z,**cosmo)
def gam(RL):
return sig1m(RL)/np.sqrt(sig0(RL)*sigG(RL,2))
def Vstar(RL):
return (6*np.pi)**1.5*np.sqrt(sigG(RL,1)/sigG(RL,2))**3.
def erf(x):
return scipy.special.erf(x)
def prob(x,av=0.5,var=0.25):
return 1/np.sqrt(2*np.pi*var)/x*np.exp(-(np.log(x)-av)**2/2/var)
def F(x):
return (x**3-3*x)/2*(erf(x*np.sqrt(5./2))+erf(x*np.sqrt(5./8)))+np.sqrt(2./5/np.pi)*((31.*x**2/4+8./5)*np.exp(-5.*x**2/8)+(x**2/2-8./5)*np.exp(-5.*x**2/2))
def Deltac(z):
fgrowth = pb.fgrowth(z, cosmo['omega_M_0']) # = D(z)/D(0)
return 1.686/fgrowth
#return 1.686*fgrowth #?????
def pG(y,av,var):
return 1/np.sqrt(2*np.pi*var)*np.exp(-(y-av)**2/2/var)
def B(z,beta,s):
#return Deltac(z)+beta*np.sqrt(s)
return 1.686#+beta*np.sqrt(s)
def Q(m,M0, eps=1.e-6):
r,R0 = m2R(m), m2R(M0)
s,s0 = sig0(r), sig0(R0)
sx = SX(r,R0)
Q = 1-sx**2/s/s0
if Q <= 0.:
print 'Q {}<0, recompute with quad'.format(Q)
s,s0 = sig0(r, method='quad'), sig0(R0, method='quad')
sx = SX(r,R0, method='quad')
Q = 1-sx**2/s/s0
print 'quad Q=', Q
if Q <= 0: raise(Exception)
#print m, M0, Q
return Q
def epX(m,M0):
r,R0 = m2R(m), m2R(M0)
s,s0 = sig0(r), sig0(R0)
sx = SX(r,R0)
sg1m = sig1m(r)
sg1mX = sig1mX(r,R0)
return s*sg1mX/sx/sg1m
def testqua1(m,M0):
return (1-epX(m, M0))/Q(m, M0)
def testqua2(m,M0):
return (1-epX(m, M0))**2/Q(m, M0)
#def trapz(x,y):
# return (x[-1]*y[-1]-x[0]*y[0]+np.sum(x[1:]*y[:-1]-y[1:]*x[:-1]))/2
def trapz(x,y):
return np.trapz(y,x=x)
# def subgrand_trapz_log(b,del0,s,s0,sx,epx,q,meanmu,varmu,varx,gamm,R0,V,z,err=False):
# # EqA8, log intervaled integration axis
# Bb = B(z,b,s)
# #print 'gamm,epx,q =',gamm,epx,q
# meanx = gamm*((Bb-del0*sx/s0)*(1-epx)/q/np.sqrt(s)+Bb*epx/np.sqrt(s))
# fact = V/Vstar(R0)*pG(Bb/np.sqrt(s),meanmu, varmu)
# #print b, Bb/np.sqrt(s),meanmu,varmu,pG(Bb/np.sqrt(s),meanmu, varmu)
# #print b
# lxmin,lxmax = np.log(b*gamm), np.log(100.)
# lx = np.linspace(lxmin,lxmax,100)
# x = np.exp(lx)
# y = (x/gamm-b)*F(x)*pG(x,meanx,varx)*x
# factint = trapz(x,y)
# #print y
# #print factint
# #factint = quad(lambda x: (x/gamm-b)*F(x)*pG(x,meanx,varx),b*gamm,100)[0]
# #print fact, factint
# return fact*factint
def test1(s):
# EqA8, non-log intervaled integration axis
Bb = 1.686
nu = Bb/np.sqrt(s)
#print 'gamm,epx,q =',gamm,epx,q
r = m2R(S2M(s))
gamm = gam(r)
meanx = gamm*nu
varx = 1-gamm**2
V = 4*np.pi*r**3./3.
fact = V/Vstar(r)*pG(nu,0., 1.)
#print b, Bb/np.sqrt(s),meanmu,varmu,pG(Bb/np.sqrt(s),meanmu, varmu)
#print b
#x = np.linspace(b*gamm,100.,200)
#TUNE
x = np.logspace(-5,5,200)
y = (x/gamm)*F(x)*pG(x,meanx,varx)
factint = np.trapz(y,x)
#print np.log10(b*gamm), fact, factint
return fact*factint/2
def _blims(b, y, factor=1.e-6):
"""Integration limits used internally by the sigma_r functionp."""
maxintegrand = np.max(np.abs(y))
highmask = np.abs(y) > maxintegrand * factor
highmask = binary_dilation(highmask)
minb = np.min(b[highmask])
maxb = np.max(b[highmask])
return minb, maxb
def _integrand_trapz_y(b,del0,s,s0,sx,epx,q,meanmu,varmu,varx,gamm,R0,V,z):
y = []
for bx in b:
newy = prob(bx)*subgrand_trapz(bx,del0,s,s0,sx,epx,q,meanmu,varmu,varx,gamm,R0,V,z)/2/s
if np.isnan(newy):
print 'NAN detected, breaking at: '
print bx,prob(bx),del0,s,s0,sx,epx,q,meanmu,varmu,varx,gamm,R0,V
break
else:
y.append(newy)
return np.asarray(y)
def integrand_trapz(del0,m,M0,R0,z): #2s*f_ESP
# of A7, divided by 2s; this IS f_ESP
s = sig0(m2R(m))
V,r,dmdr = pb.volume_radius_dmdr(m,**cosmo)
s,s0,sx = sig0(r), sig0(R0),SX(r,R0)
gamm = gam(r)
epx,q = epX(m,M0), Q(m,M0)
meanmu = del0/np.sqrt(s)*sx/s0
varmu = q
varx = 1-gamm**2
#print varmu, varx
if varx<0:
print "varx<0, breaking at varx, gamm, epx, q,m,M0="
print varx, gamm, epx, q, m, M0
#b = np.arange(0.00001,30.,0.03) #TUNE
b = np.logspace(-6,3,100)
y = _integrand_trapz_y(b,del0,s,s0,sx,epx,q,meanmu,varmu,varx,gamm,R0,V,z)
if (y==0.).all(): return 0.
blims = _blims(b, y)
while blims[0] == blims[1]:
b = np.logspace(np.log10(blims[0]*0.99),np.log10(blims[1]*1.01),100)
y = _integrand_trapz_y(b,del0,s,s0,sx,epx,q,meanmu,varmu,varx,gamm,R0,V,z)
blims = _blims(b, y)
b = np.logspace(np.log10(blims[0]),np.log10(blims[1]),100)
y = _integrand_trapz_y(b,del0,s,s0,sx,epx,q,meanmu,varmu,varx,gamm,R0,V,z)
if y[-1]/np.max(y)>1.E-3:
print "Warning: choice of bmax too small"
print y
print blims
import IPython; IPython.embed()
raise(Exception)
if y[0]/np.max(y)>1.E-3:
print "Warning: choice of bmin too big"
return np.trapz(y,b)
#return quad(lambda b: prob(b)*subgrand_trapz(b,del0,m,M0,z),0,4.)[0]/2/s
def dsdm(m):
return np.abs(sig0(m2R(m+1))-sig0(m2R(m-1)))/2
# def fcoll(del0,M0,z):
# mm = mmin(z)
# R0 = m2R(M0)
# return quad(lambda m: integrand_trapz(del0,m,M0,R0,z)*dsdm(m),mm,M0)
# def fcoll_trapz(del0,M0,z):
# mm = mmin(z)
# R0 = m2R(M0)
# mx = np.arange(mm,M0,mm)
# y = []
# for m in mx:
# y.append(integrand_trapz(del0,m,M0,R0,z)*dsdm(m))
# print m, y[-1]
# return np.trapz(y,mx,dx=mm)
# #eturn trapz(mx,y)
# def fcoll_trapz_log(del0,M0,z,debug=False):
# # Eq. (6)
# print del0
# mm = mmin(z)
# R0 = m2R(M0)
# lmx = np.linspace(np.log(mm),np.log(M0),200)
# y = []
# for lm in lmx:
# m = np.exp(lm)
# y.append(integrand_trapz(del0,m,M0,R0,z)*dsdm(m)*m) #dsdm*m=ds/dln(m)
# if debug:
# return trapz(lmx,y),np.exp(lmx),y
# else:
# return trapz(lmx,y)
def fcoll_trapz_log(del0,M0,z,debug=False):
# Eq. (6)
print del0
mm = mmin(z)
R0 = m2R(M0)
mx = np.logspace(np.log10(mm),np.log10(M0),200)
ls = sig0(m2R(mx))
y = []
for m in mx:
y.append(integrand_trapz(del0,m,M0,R0,z))
if debug:
return trapz(lmx,y),np.exp(lmx),y
else:
#print ls[::-1],y[::-1]
return trapz(ls[::-1],y[::-1])
#
def resinterp(x1,x2,y1,y2):
if y1*y2>0: raise ValueError('resinterp: root not in range')
else:
return (y2*x1-y1*x2)/(y2-y1)
if __name__ == "__main__":
zeta = 40.
# Z = float(opts.red)
# M0 = zeta*mmin(Z)*float(opts.mul)
# del0 = float(opts.del0)
Z = 12.
#M0 = zeta*mmin(Z)
#Mlist = np.exp(np.linspace(np.log(M0),np.log(1000*M0),10))
Slist = np.arange(5.,6.,1.)
Mlist = S2M(Slist)
#dlist = np.linspace(8,10,16)
# for del0 in dlist:
# res = fcoll_trapz_log(del0,M0,Z)
# print m2S(M0), res[0]
#Bracks = (())
# def parafunc(S0,Z):
# M0 = S2M(S0)
# def newfunc(del0):
# return fcoll_trapz_log(del0,M0,Z)*40-1
# return brentq(newfunc,11,14.5,xtol=1.E-3,maxiter=100)
if False:
reslist = Parallel(n_jobs=num_cores)(delayed(parafunc)(S0,Z) for S0 in Slist)
print reslist
p.figure()
p.plot(Slist,reslist)
p.show()
elif True:
try:
rootlist = []
for M0 in Mlist:
def newfunc(del0):
res = fcoll_trapz_log(del0,M0,Z)*40-1
return res
Dlist = np.linspace(3.,20.,4)
NJOBS = min(Dlist.size, num_cores)
reslist = Parallel(n_jobs=NJOBS)(delayed(newfunc)(d0) for d0 in Dlist)
print reslist
if reslist[0]*reslist[-1]>0:
print "root not in range"
break
else:
print "enter second round of process"
i = 0
while reslist[i]*reslist[-1]<0: i+=1
Dlist2 = np.linspace(Dlist[i-1],Dlist[i],8)
reslist = Parallel(n_jobs=NJOBS)(delayed(newfunc)(d0) for d0 in Dlist2)
print reslist
i = 0
while reslist[i]*reslist[-1]<0: i+=1
resroot = resinterp(Dlist2[i-1],Dlist2[i],reslist[i-1],reslist[i])
print 'Barrier height:', resroot
rootlist.append(resroot)
print rootlist
p.figure()
p.plot(Slist,rootlist)
p.savefig('barrier_z{}.png'.format(Z))
except:
e = sys.exc_info()
print e, '\n'
finally:
memory.clear()
else:
print 'doing nothing'
#tplquad(All,mmin,M0,lambda x: 0, lambda x: 5., lambda x,y: gam(m2R(x))*y,lambda x,y: 10.,args=(del0,M0,z))
| mit |
luminescence/PolyLibScan | Analysis/job.py | 1 | 9716 | #import parser
import numpy as np
import pathlib2 as pl
import pandas as pd
import warnings
import plotting
import bayesModels as bayes
import itertools as it
import pymol_visualisation
import sim_run
import numerics as num_
import PolyLibScan.Database.db as DB
import PolyLibScan.Tools.config as cfg
warnings.filterwarnings("ignore")
class Job(bayes.Job):
def __init__(self, project, db_path, with_pymol=True):
self.project = project
self.poly_type = None
self.db_path = pl.Path(db_path)
if with_pymol:
self.pymol = pymol_visualisation.PymolVisJob(self)
self._parse = DB.JobDataBase(self.db_path, 'r')
self.Id = None
self.meta = self._parse.misc
self.lmp_parameters = {key:val for key,val in self._parse.parameter}
self.trajectory_meta = self._parse.traj_info
self.trajectory_order = self._parse.traj_type_order
self.particle_list = self._parse.particle_list
self.sequence = self._parse.sequence
self.weights = dict(self._parse.weights)
# set self.active_site if there's a protein present
self.config = cfg.JobConfig(self.db_path.parent.joinpath('config_with_setup.yml').as_posix())
if 'stoichiometry' in self.config.sim_parameter:
if self.config.sim_parameter['stoichiometry'][0] > 0:
self.active_site = self._parse.active_site
else:
self.active_site = self._parse.active_site
self._runs = self._read_runs(with_pymol=with_pymol)
self.particle_ids = self._get_particle_ids()
self._charge = None
self._parse.close()
self._distance_frequency = None
self._energy_distance_distribution = None
super(Job, self).__init__()
def __len__(self):
return len(self._runs)
def __getitem__(self, key):
return self._runs[key]
def __iter__(self):
return iter(self._runs)
def __repr__(self):
info = (self.meta['poly_name'], self.meta['protein'], len(self))
return 'Lammps Run: %s-%s | Runs: %d' % info
def __str__(self):
info = (self.meta['poly_name'], self.meta['protein'], self.Id, len(self))
weights = zip(*self.weights.items())
out = []
out += ['Lammps Run: %s-%s | ID: %d | Runs: %d' % info]
out += ['Monomers: %s' % '-'.join(weights[0])]
out += ['Weights: %s' % '-'.join(map(str, weights[1]))]
return '\n'.join(out)
def _get_particle_ids(self):
# in case there are no sim runs
if len(self) == 0:
raise Exception('No sim data available.')
# this approach works if:
# 1. there is only one protein and one polymer molecule
# 2. the protein was created first, the polymer second
p_type_ids = {}
p_type_ids['polymer'] = np.unique(self.sequence['ID'])
polymer_length = len(self.sequence)
p_type_ids['protein'] = np.unique(self.trajectory_order[:-polymer_length])
return p_type_ids
def charge():
doc = "The charge property."
def fget(self):
if self.project.parameters:
if not self._charge:
self._charge = self._calculate_polymer_charge()
return self._charge
else:
raise AttributeError('Set parameters by path or dict.')
def fset(self, value):
self._charge = value
def fdel(self):
del self._charge
return locals()
charge = property(**charge())
def _calculate_polymer_charge(self):
total_charge = 0
for monomer in self.sequence['monomer']:
p_name, sub_name = monomer.split('_')
total_charge += self.project.parameters['Atoms'][p_name][sub_name]['charge']
return total_charge
def _read_runs(self, with_pymol=True):
'''Reads in all runs from the database and
creates a run object. All runs are sorted by id.
'''
runs = []
# get inputs
for data in self._parse.end_states:
if 'Distance' in data.dtype.names:
runs.append(sim_run.Run(self, data['ID'], data['Energy'], data["Distance"], with_pymol=True))
else:
runs.append(sim_run.Run(self, data['ID'], data['Energy'], with_pymol=True))
return sorted(runs, key=lambda x:x.Id)
def distance_frequency():
doc = "The distance_frequency property."
def fget(self):
if self._energy_distance_distribution == None:
try:
results = self._parse.histogramm
except DB.tb.NoSuchNodeError:
results = self._calc_distance_density(self)
hist_array = self._parse_hist_data(results[['distance', 'frequency']],
results['energy'])
self._parse.histogram = hist_array
self._distance_frequency = results[['distance', 'frequency']]
self._energy_distance_distribution = results[['distance', 'energy']]
return self._distance_frequency
def fset(self, value):
self._distance_frequency = value
def fdel(self):
self._distance_frequency = None
return locals()
distance_frequency = property(**distance_frequency())
def energy_distance_distribution():
doc = "The energy_distance_distribution property."
def fget(self):
if self._energy_distance_distribution == None:
try:
results = self._parse.hist_data()
except parser.DB.tb.NoSuchNodeError:
results = self._calc_distance_density(self)
hist_array = self._parse_hist_data(results[['distance', 'frequency']],
results[ 'energy'])
self._parse.histogram = hist_array
self._distance_frequency = results[['distance', 'frequency']]
self._energy_distance_distribution = results[['distance', 'energy']]
return self._energy_distance_distribution
def fset(self, value):
self._energy_distance_distribution = value
def fdel(self):
self._energy_distance_distribution = None
return locals()
energy_distance_distribution = property(**energy_distance_distribution())
def _parse_hist_data(self, distance, energy):
'''Save the observables of distance and energy to the database.
Since this involves writing to the database, it opens the
database in write mode and writes to the /histogram table.
The timestep information is in the distance argument.
'''
results_dtype = [('distance', np.float), ('frequency', np.int),
('energy', np.float)]
data = np.empty(distance.shape[0], results_dtype)
data['distance'] = distance['distance']
data['frequency'] = distance['frequency']
data['energy'] = energy
return data
def _calc_distance_density(self, runs):
dist_v = np.zeros(2000, dtype=[('count', np.int), ('energy', np.float)])
for run in runs:
energy = run.binding_energy()[:,0]
distance = run.distance_time_series()['distance'][-len(energy):]
# discretize energy and distance timeseries into bins ofr histogram
distance_density = num_.binning(distance, energy, dist_v)
reduced_hist = num_.discard_tailing_zeros(dist_v['count'])
reduced_len = reduced_hist.shape[0]
energy_mean = dist_v['energy'][:reduced_len] / reduced_hist
results_dtype = [('distance', np.float), ('frequency', np.int),
('energy', np.float)]
results = np.empty(reduced_len, results_dtype)
results['distance'] = np.arange(reduced_len, dtype=np.float)/10
results['frequency'] = reduced_hist
results['energy'] = energy_mean
return results
def to_dataFrame(self):
if self._parse.db.is_open():
self._parse.open()
data = pd.DataFrame(self._parse.end_states)
data = data.set_index('ID')
data.set_index([[self.Id for i in xrange(data.shape[0])], data.index], inplace=True)
data.index.names = ['PolymerId', 'RunId']
self._parse.close()
return data
def _calc_protein_box(self, margin=20):
"""calculate the minimal box of the protein coordinates, add margin
and round box_coordinates to the next integer.
The coordinates are taken from the first timestep; this assumes that
the protein stays fixes during the simulation.
Arguments:
margin: margin added to each side of the box [float/integer]
Output:
box -- 2x3 numpy array
"""
protein_mask = np.in1d(self.trajectory_order, self.particle_ids['protein'])
trajectory_iterator = self._parse.trajectory_load(0)
protein_coords = np.array(list(it.islice(trajectory_iterator, len(protein_mask))))[protein_mask]
box = num_.calc_box(protein_coords, margin=margin)
discrete_box = np.zeros((2,3))
discrete_box[0] = np.floor(box[0])
discrete_box[1] = np.ceil(box[1])
return discrete_box
def energies(self):
return self.project.endstate_matrix.loc[self.Id ,(self.meta['poly_name'], 'Energy')]
def distances(self):
return self.project.endstate_matrix.loc[self.Id ,(self.meta['poly_name'], 'Distance')]
def mean_energy(self):
return np.mean(self.energies())
| mit |
shaneknapp/spark | python/pyspark/sql/tests/test_pandas_cogrouped_map.py | 20 | 9306 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.sql.functions import array, explode, col, lit, udf, pandas_udf
from pyspark.sql.types import DoubleType, StructType, StructField, Row
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.testing import assert_frame_equal
if have_pyarrow:
import pyarrow as pa # noqa: F401
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class CogroupedMapInPandasTests(ReusedSQLTestCase):
@property
def data1(self):
return self.spark.range(10).toDF('id') \
.withColumn("ks", array([lit(i) for i in range(20, 30)])) \
.withColumn("k", explode(col('ks')))\
.withColumn("v", col('k') * 10)\
.drop('ks')
@property
def data2(self):
return self.spark.range(10).toDF('id') \
.withColumn("ks", array([lit(i) for i in range(20, 30)])) \
.withColumn("k", explode(col('ks'))) \
.withColumn("v2", col('k') * 100) \
.drop('ks')
def test_simple(self):
self._test_merge(self.data1, self.data2)
def test_left_group_empty(self):
left = self.data1.where(col("id") % 2 == 0)
self._test_merge(left, self.data2)
def test_right_group_empty(self):
right = self.data2.where(col("id") % 2 == 0)
self._test_merge(self.data1, right)
def test_different_schemas(self):
right = self.data2.withColumn('v3', lit('a'))
self._test_merge(self.data1, right, 'id long, k int, v int, v2 int, v3 string')
def test_complex_group_by(self):
left = pd.DataFrame.from_dict({
'id': [1, 2, 3],
'k': [5, 6, 7],
'v': [9, 10, 11]
})
right = pd.DataFrame.from_dict({
'id': [11, 12, 13],
'k': [5, 6, 7],
'v2': [90, 100, 110]
})
left_gdf = self.spark\
.createDataFrame(left)\
.groupby(col('id') % 2 == 0)
right_gdf = self.spark \
.createDataFrame(right) \
.groupby(col('id') % 2 == 0)
def merge_pandas(l, r):
return pd.merge(l[['k', 'v']], r[['k', 'v2']], on=['k'])
result = left_gdf \
.cogroup(right_gdf) \
.applyInPandas(merge_pandas, 'k long, v long, v2 long') \
.sort(['k']) \
.toPandas()
expected = pd.DataFrame.from_dict({
'k': [5, 6, 7],
'v': [9, 10, 11],
'v2': [90, 100, 110]
})
assert_frame_equal(expected, result)
def test_empty_group_by(self):
left = self.data1
right = self.data2
def merge_pandas(l, r):
return pd.merge(l, r, on=['id', 'k'])
result = left.groupby().cogroup(right.groupby())\
.applyInPandas(merge_pandas, 'id long, k int, v int, v2 int') \
.sort(['id', 'k']) \
.toPandas()
left = left.toPandas()
right = right.toPandas()
expected = pd \
.merge(left, right, on=['id', 'k']) \
.sort_values(by=['id', 'k'])
assert_frame_equal(expected, result)
def test_mixed_scalar_udfs_followed_by_cogrouby_apply(self):
df = self.spark.range(0, 10).toDF('v1')
df = df.withColumn('v2', udf(lambda x: x + 1, 'int')(df['v1'])) \
.withColumn('v3', pandas_udf(lambda x: x + 2, 'int')(df['v1']))
result = df.groupby().cogroup(df.groupby()) \
.applyInPandas(lambda x, y: pd.DataFrame([(x.sum().sum(), y.sum().sum())]),
'sum1 int, sum2 int').collect()
self.assertEqual(result[0]['sum1'], 165)
self.assertEqual(result[0]['sum2'], 165)
def test_with_key_left(self):
self._test_with_key(self.data1, self.data1, isLeft=True)
def test_with_key_right(self):
self._test_with_key(self.data1, self.data1, isLeft=False)
def test_with_key_left_group_empty(self):
left = self.data1.where(col("id") % 2 == 0)
self._test_with_key(left, self.data1, isLeft=True)
def test_with_key_right_group_empty(self):
right = self.data1.where(col("id") % 2 == 0)
self._test_with_key(self.data1, right, isLeft=False)
def test_with_key_complex(self):
def left_assign_key(key, l, _):
return l.assign(key=key[0])
result = self.data1 \
.groupby(col('id') % 2 == 0)\
.cogroup(self.data2.groupby(col('id') % 2 == 0)) \
.applyInPandas(left_assign_key, 'id long, k int, v int, key boolean') \
.sort(['id', 'k']) \
.toPandas()
expected = self.data1.toPandas()
expected = expected.assign(key=expected.id % 2 == 0)
assert_frame_equal(expected, result)
def test_wrong_return_type(self):
# Test that we get a sensible exception invalid values passed to apply
left = self.data1
right = self.data2
with QuietTest(self.sc):
with self.assertRaisesRegex(
NotImplementedError,
'Invalid return type.*ArrayType.*TimestampType'):
left.groupby('id').cogroup(right.groupby('id')).applyInPandas(
lambda l, r: l, 'id long, v array<timestamp>')
def test_wrong_args(self):
left = self.data1
right = self.data2
with self.assertRaisesRegex(ValueError, 'Invalid function'):
left.groupby('id').cogroup(right.groupby('id')) \
.applyInPandas(lambda: 1, StructType([StructField("d", DoubleType())]))
def test_case_insensitive_grouping_column(self):
# SPARK-31915: case-insensitive grouping column should work.
df1 = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = df1.groupby("ColUmn").cogroup(
df1.groupby("COLUMN")
).applyInPandas(lambda r, l: r + l, "column long, value long").first()
self.assertEqual(row.asDict(), Row(column=2, value=2).asDict())
df2 = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = df1.groupby("ColUmn").cogroup(
df2.groupby("COLUMN")
).applyInPandas(lambda r, l: r + l, "column long, value long").first()
self.assertEqual(row.asDict(), Row(column=2, value=2).asDict())
def test_self_join(self):
# SPARK-34319: self-join with FlatMapCoGroupsInPandas
df = self.spark.createDataFrame([(1, 1)], ("column", "value"))
row = df.groupby("ColUmn").cogroup(
df.groupby("COLUMN")
).applyInPandas(lambda r, l: r + l, "column long, value long")
row = row.join(row).first()
self.assertEqual(row.asDict(), Row(column=2, value=2).asDict())
@staticmethod
def _test_with_key(left, right, isLeft):
def right_assign_key(key, l, r):
return l.assign(key=key[0]) if isLeft else r.assign(key=key[0])
result = left \
.groupby('id') \
.cogroup(right.groupby('id')) \
.applyInPandas(right_assign_key, 'id long, k int, v int, key long') \
.toPandas()
expected = left.toPandas() if isLeft else right.toPandas()
expected = expected.assign(key=expected.id)
assert_frame_equal(expected, result)
@staticmethod
def _test_merge(left, right, output_schema='id long, k int, v int, v2 int'):
def merge_pandas(l, r):
return pd.merge(l, r, on=['id', 'k'])
result = left \
.groupby('id') \
.cogroup(right.groupby('id')) \
.applyInPandas(merge_pandas, output_schema)\
.sort(['id', 'k']) \
.toPandas()
left = left.toPandas()
right = right.toPandas()
expected = pd \
.merge(left, right, on=['id', 'k']) \
.sort_values(by=['id', 'k'])
assert_frame_equal(expected, result)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_cogrouped_map import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
keflavich/scikit-image | doc/examples/plot_tinting_grayscale_images.py | 14 | 5336 | """
=========================
Tinting gray-scale images
=========================
It can be useful to artificially tint an image with some color, either to
highlight particular regions of an image or maybe just to liven up a grayscale
image. This example demonstrates image-tinting by scaling RGB values and by
adjusting colors in the HSV color-space.
In 2D, color images are often represented in RGB---3 layers of 2D arrays, where
the 3 layers represent (R)ed, (G)reen and (B)lue channels of the image. The
simplest way of getting a tinted image is to set each RGB channel to the
grayscale image scaled by a different multiplier for each channel. For example,
multiplying the green and blue channels by 0 leaves only the red channel and
produces a bright red image. Similarly, zeroing-out the blue channel leaves
only the red and green channels, which combine to form yellow.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage import color
from skimage import img_as_float
grayscale_image = img_as_float(data.camera()[::2, ::2])
image = color.gray2rgb(grayscale_image)
red_multiplier = [1, 0, 0]
yellow_multiplier = [1, 1, 0]
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4))
ax1.imshow(red_multiplier * image)
ax2.imshow(yellow_multiplier * image)
"""
.. image:: PLOT2RST.current_figure
In many cases, dealing with RGB values may not be ideal. Because of that, there
are many other `color spaces`_ in which you can represent a color image. One
popular color space is called HSV, which represents hue (~the color),
saturation (~colorfulness), and value (~brightness). For example, a color
(hue) might be green, but its saturation is how intense that green is---where
olive is on the low end and neon on the high end.
In some implementations, the hue in HSV goes from 0 to 360, since hues wrap
around in a circle. In scikit-image, however, hues are float values from 0 to
1, so that hue, saturation, and value all share the same scale.
.. _color spaces:
http://en.wikipedia.org/wiki/List_of_color_spaces_and_their_uses
Below, we plot a linear gradient in the hue, with the saturation and value
turned all the way up:
"""
import numpy as np
hue_gradient = np.linspace(0, 1)
hsv = np.ones(shape=(1, len(hue_gradient), 3), dtype=float)
hsv[:, :, 0] = hue_gradient
all_hues = color.hsv2rgb(hsv)
fig, ax = plt.subplots(figsize=(5, 2))
# Set image extent so hues go from 0 to 1 and the image is a nice aspect ratio.
ax.imshow(all_hues, extent=(0, 1, 0, 0.2))
ax.set_axis_off()
"""
.. image:: PLOT2RST.current_figure
Notice how the colors at the far left and far right are the same. That reflects
the fact that the hues wrap around like the color wheel (see HSV_ for more
info).
.. _HSV: http://en.wikipedia.org/wiki/HSL_and_HSV
Now, let's create a little utility function to take an RGB image and:
1. Transform the RGB image to HSV
2. Set the hue and saturation
3. Transform the HSV image back to RGB
"""
def colorize(image, hue, saturation=1):
""" Add color of the given hue to an RGB image.
By default, set the saturation to 1 so that the colors pop!
"""
hsv = color.rgb2hsv(image)
hsv[:, :, 1] = saturation
hsv[:, :, 0] = hue
return color.hsv2rgb(hsv)
"""
Notice that we need to bump up the saturation; images with zero saturation are
grayscale, so we need to a non-zero value to actually see the color we've set.
Using the function above, we plot six images with a linear gradient in the hue
and a non-zero saturation:
"""
hue_rotations = np.linspace(0, 1, 6)
fig, axes = plt.subplots(nrows=2, ncols=3)
for ax, hue in zip(axes.flat, hue_rotations):
# Turn down the saturation to give it that vintage look.
tinted_image = colorize(image, hue, saturation=0.3)
ax.imshow(tinted_image, vmin=0, vmax=1)
ax.set_axis_off()
fig.tight_layout()
"""
.. image:: PLOT2RST.current_figure
You can combine this tinting effect with numpy slicing and fancy-indexing to
selectively tint your images. In the example below, we set the hue of some
rectangles using slicing and scale the RGB values of some pixels found by
thresholding. In practice, you might want to define a region for tinting based
on segmentation results or blob detection methods.
"""
from skimage.filters import rank
# Square regions defined as slices over the first two dimensions.
top_left = (slice(100),) * 2
bottom_right = (slice(-100, None),) * 2
sliced_image = image.copy()
sliced_image[top_left] = colorize(image[top_left], 0.82, saturation=0.5)
sliced_image[bottom_right] = colorize(image[bottom_right], 0.5, saturation=0.5)
# Create a mask selecting regions with interesting texture.
noisy = rank.entropy(grayscale_image, np.ones((9, 9)))
textured_regions = noisy > 4
# Note that using `colorize` here is a bit more difficult, since `rgb2hsv`
# expects an RGB image (height x width x channel), but fancy-indexing returns
# a set of RGB pixels (# pixels x channel).
masked_image = image.copy()
masked_image[textured_regions, :] *= red_multiplier
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4))
ax1.imshow(sliced_image)
ax2.imshow(masked_image)
plt.show()
"""
.. image:: PLOT2RST.current_figure
For coloring multiple regions, you may also be interested in
`skimage.color.label2rgb <http://scikit-image.org/docs/0.9.x/api/skimage.color.html#label2rgb>`_.
"""
| bsd-3-clause |
intersense/cudaDMA-gw | src/examples/deprecated/saxpy_ratio/replot.py | 2 | 1096 |
import sys
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
def main():
import pickle
output = open('data.pkl', 'r')
data = pickle.load(output)
print data
lbl_sz = 12
styles = ['-','--',':']
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
for name, vals, style in zip(data.keys(),data.values(),styles):
xs, ys_gbs, ys_gflops = vals
ax1.plot(xs,ys_gbs,'b'+style, label=name)
ax2.plot(xs, ys_gflops, 'r'+style, label=name)
ax1.set_xlabel('B/FLOP', size=lbl_sz)
ax1.set_ylabel('GB/s', size=lbl_sz, color='b')
for t1 in ax1.get_yticklabels():
t1.set_color('b')
ax2.set_ylabel('GFLOP/s', size=lbl_sz, color='r')
for t1 in ax2.get_yticklabels():
t1.set_color('r')
ax1.set_xscale('log', basex=2)
ax2.set_xscale('log', basex=2)
ax1.invert_xaxis()
ax2.invert_xaxis()
ax1.set_ylim(0)
ax2.set_ylim(0)
leg = plt.legend(loc=7)
for t in leg.get_texts():
t.set_fontsize('small')
for l in leg.get_lines():
l.set_color('k')
plt.show()
if __name__ == "__main__":
main()
| apache-2.0 |
dgellis90/nipype | nipype/utils/config.py | 9 | 5668 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
'''
Created on 20 Apr 2010
logging options : INFO, DEBUG
hash_method : content, timestamp
@author: Chris Filo Gorgolewski
'''
from future import standard_library
standard_library.install_aliases()
from builtins import object
import configparser
from json import load, dump
import os
import shutil
import errno
from warnings import warn
from ..external import portalocker
from ..external.six import StringIO
# Get home directory in platform-agnostic way
homedir = os.path.expanduser('~')
default_cfg = """
[logging]
workflow_level = INFO
filemanip_level = INFO
interface_level = INFO
log_to_file = false
log_directory = %s
log_size = 16384000
log_rotate = 4
[execution]
create_report = true
crashdump_dir = %s
display_variable = :1
hash_method = timestamp
job_finished_timeout = 5
keep_inputs = false
local_hash_check = true
matplotlib_backend = Agg
plugin = Linear
remove_node_directories = false
remove_unnecessary_outputs = true
try_hard_link_datasink = true
single_thread_matlab = true
stop_on_first_crash = false
stop_on_first_rerun = false
use_relative_paths = false
stop_on_unknown_version = false
write_provenance = false
parameterize_dirs = true
poll_sleep_duration = 60
xvfb_max_wait = 10
[check]
interval = 1209600
""" % (homedir, os.getcwd())
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class NipypeConfig(object):
"""Base nipype config class
"""
def __init__(self, *args, **kwargs):
self._config = configparser.ConfigParser()
config_dir = os.path.expanduser('~/.nipype')
mkdir_p(config_dir)
old_config_file = os.path.expanduser('~/.nipype.cfg')
new_config_file = os.path.join(config_dir, 'nipype.cfg')
# To be deprecated in two releases
if os.path.exists(old_config_file):
if os.path.exists(new_config_file):
msg = ("Detected presence of both old (%s, used by versions "
"< 0.5.2) and new (%s) config files. This version will "
"proceed with the new one. We advise to merge settings "
"and remove old config file if you are not planning to "
"use previous releases of nipype.") % (old_config_file,
new_config_file)
warn(msg)
else:
warn("Moving old config file from: %s to %s" % (old_config_file,
new_config_file))
shutil.move(old_config_file, new_config_file)
self.data_file = os.path.join(config_dir, 'nipype.json')
self._config.readfp(StringIO(default_cfg))
self._config.read([new_config_file, old_config_file, 'nipype.cfg'])
def set_default_config(self):
self._config.readfp(StringIO(default_cfg))
def enable_debug_mode(self):
"""Enables debug configuration
"""
self._config.set('execution', 'stop_on_first_crash', 'true')
self._config.set('execution', 'remove_unnecessary_outputs', 'false')
self._config.set('execution', 'keep_inputs', 'true')
self._config.set('logging', 'workflow_level', 'DEBUG')
self._config.set('logging', 'interface_level', 'DEBUG')
def set_log_dir(self, log_dir):
"""Sets logging directory
This should be the first thing that is done before any nipype class
with logging is imported.
"""
self._config.set('logging', 'log_directory', log_dir)
def get(self, section, option):
return self._config.get(section, option)
def set(self, section, option, value):
if isinstance(value, bool):
value = str(value)
return self._config.set(section, option, value)
def getboolean(self, section, option):
return self._config.getboolean(section, option)
def has_option(self, section, option):
return self._config.has_option(section, option)
@property
def _sections(self):
return self._config._sections
def get_data(self, key):
if not os.path.exists(self.data_file):
return None
with open(self.data_file, 'rt') as file:
portalocker.lock(file, portalocker.LOCK_EX)
datadict = load(file)
if key in datadict:
return datadict[key]
return None
def save_data(self, key, value):
datadict = {}
if os.path.exists(self.data_file):
with open(self.data_file, 'rt') as file:
portalocker.lock(file, portalocker.LOCK_EX)
datadict = load(file)
with open(self.data_file, 'wt') as file:
portalocker.lock(file, portalocker.LOCK_EX)
datadict[key] = value
dump(datadict, file)
def update_config(self, config_dict):
for section in ['execution', 'logging', 'check']:
if section in config_dict:
for key, val in list(config_dict[section].items()):
if not key.startswith('__'):
self._config.set(section, key, str(val))
def update_matplotlib(self):
import matplotlib
matplotlib.use(self.get('execution', 'matplotlib_backend'))
def enable_provenance(self):
self._config.set('execution', 'write_provenance', 'true')
self._config.set('execution', 'hash_method', 'content')
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/user_interfaces/embedding_in_tk.py | 3 | 1095 | #!/usr/bin/env python
import matplotlib
matplotlib.use('TkAgg')
from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import Tkinter as Tk
import sys
root = Tk.Tk()
root.wm_title("Embedding in TK")
f = Figure(figsize=(5,4), dpi=100)
a = f.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
a.plot(t,s)
# a tk.DrawingArea
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg( canvas, root )
toolbar.update()
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def _quit():
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
button = Tk.Button(master=root, text='Quit', command=_quit)
button.pack(side=Tk.BOTTOM)
Tk.mainloop()
# If you put root.destroy() here, it will cause an error if
# the window is closed with the window manager.
| gpl-2.0 |
LeiDai/meep_metamaterials | meep_utils.py | 1 | 40785 | #!/usr/bin/env python
#coding:utf8
"""
Here you can find various functions and classes that facilitate the work with python-meep.
I believe some of these functions ought to be implemented in the meep module.
Filip Dominec 2012-2013
"""
import numpy as np
import os, os.path, sys, subprocess, time
from scipy.constants import c, epsilon_0, mu_0
#import meep
import meep_mpi as meep
## XXX
import _meep_mpi
## Define the simulated models as a class (much simpler and more flexible than handling callbacks)
class AbstractMeepModel(meep.Callback): #{{{
def __init__(self):
meep.Callback.__init__(self)
self.double_vec = None # (callback function to be redirected to the desired function)
self.return_value = True
def register_local(self, param, val):
""" Adds a parameter as an attribute of the model (either number or float), adds it also to the simulation name"""
setattr(self, param, val)
## prepare the parameter to be added into name (if not conversible to float, add it as a string)
try:
self.simulation_name += ("_%s=%.2e") % (param, float(val))
self.parameterstring += "#param %s,%.4e\n" % (param, val)
except:
self.simulation_name += ("_%s=%s") % (param, val)
self.parameterstring += "#param %s,%s\n" % (param, val)
def register_locals(self, params):
""" Scans through the parameters and calls register_local() for each """
self.parameterstring = ""
## First look up for the "VIP" parameters that should come first in the name:
preferred_params = ['resolution', 'comment', 'frequency', 'simtime']
for param in preferred_params:
if params.get(param):
val = params.get(param)
self.register_local(param, val)
## Then add all remaining parameters of the model
for (param, val) in params.iteritems():
if param != 'self' and param not in preferred_params:
self.register_local(param, val)
def eps(self, r):
""" Scans through materials and adds the high-frequency part of permittivity for each of them.
This is why materials should never overlap. """
for mat in self.materials:
if mat.where(r): return mat.eps
else: return 1.
def build_polarizabilities(self, structure):
"""
This is a helper to define the susceptibilities using the callback.
It goes through all polarizabilities for all materials.
Applicable for time-domain simulation only, because dispersive model is not implemented for
frequency-domain simulation yet.
"""
avail_cbs = [meep.DBL5, meep.DBL4, meep.DBL3, meep.DBL2, meep.DBL1,]
avail_cb_setters = [meep.set_DBL5_Callback, meep.set_DBL4_Callback, meep.set_DBL3_Callback,
meep.set_DBL2_Callback, meep.set_DBL1_Callback,]
for material in self.materials:
meep.master_printf("\tAdding material: %s with epsilon: %s at frequency %.4g Hz\n" %
(material.name, analytic_eps(material, self.srcFreq).__str__(), self.srcFreq))
for polariz in material.pol:
if avail_cbs == []:
meep.master_printf("Error: too many polarizabilities defined (5) Reduce their number.")
exit()
next_cb, next_cb_setter = avail_cbs.pop(), avail_cb_setters.pop()
self.return_value = polariz['sigma']
self.double_vec = material.where ## redirect the double_vec() function callback
next_cb_setter(self.__disown__())
if "lorentzian_susceptibility" in dir(meep):
## for meep 1.2 or newer
structure.add_susceptibility(next_cb, meep.E_stuff,
meep.lorentzian_susceptibility(polariz['omega']/c, polariz['gamma']/c))
#else:todo: fix in python-meep
#print dir(meep)
#structure.add_susceptibility(next_cb, meep.E_stuff,
#meep.drude_susceptibility(polariz['omega']/c, polariz['gamma']/c))
else:
## for meep 1.1.1 or older
structure.add_polarizability(next_cb, polariz['omega']/c, polariz['gamma']/c)
def TestMaterials(self):
""" Call the where() function for each material, in order to make sure there are no errors
(SWIG callback does not report where the error occured, it just crashes) """
for material in self.materials:
for x in np.linspace(-self.size_x/2, self.size_x/2, 1):
for y in np.linspace(-self.size_y/2, self.size_y/2, 1):
for z in np.linspace(-self.size_z/2, self.size_z/2, 1):
if material.where(meep.vec(x, y, z)): print "teotueot"
#}}}
def phys_to_float(s):#{{{
"""
Float() that also recognizes the short SI prefixes. Returns string if value is not a number.
>>> phys_to_float('0.0121')
0.121
>>> phys_to_float('12.1m')
0.121
>>> phys_to_float('121e-4')
0.121
>>> phys_to_float('abcd')
'abcd'
"""
m = {'z':1e-21, 'a':1e-18, 'f':1e-15, 'p':1e-12, 'n':1e-9, 'u':1e-6, 'm':1e-3,
'c':1e-2, 'd':1e-1, 'k':1e3, 'M':1e6, 'G':1e9, 'T':1e12, 'P':1e15, 'E':1e18, 'Y':1e21}
try:
if s[-1] in '.0123456789':
return float(s)
elif s[-1] in m.keys():
return float(s[:-1]) * m[s[-1]]
else:
return s
except ValueError:
return s#}}}
def process_param(args):#{{{
""" Parse command-line parameters
Some of them control the simulation (`sim_param'), but all remaining will be passed to
the model (`model_param')
"""
sim_param = { 'frequency_domain':False,
'frequency': None,
'MaxIter': 5000,
'MaxTol': 1e-2,
'BiCGStab': 8 } ## BiCGStab order of 8 proved to have best performance
model_param = {}
for namevalue in args:
name, value = namevalue.split("=")
if name == "frequency":
sim_param['frequency'] = phys_to_float(value)
sim_param['frequency_domain'] = True
elif name == "maxtol": sim_param['MaxTol'] = phys_to_float(value)
elif name == "maxiter": sim_param['MaxIter'] = int(value)
elif name == "bicgstab": sim_param['BiCGStab'] = int(value)
else: ## all other parameters will be passed to the model:
model_param[name] = phys_to_float(value)
return sim_param, model_param
#}}}
## Geometrical primitives to help defining the geometry
def in_xslab(r,cx,d):#{{{
return (abs(r.x()-cx) < d/2)
def in_yslab(r,cy,d):
return (abs(r.y()-cy) < d/2)
def in_zslab(r,cz,d):
return (abs(r.z()-cz) < d/2)
def in_xcyl(r,cy,cz,rad):
return ((r.y()-cy)**2+(r.z()-cz)**2) < rad**2
def in_ycyl(r,cx,cz,rad):
return ((r.x()-cx)**2+(r.z()-cz)**2) < rad**2
def in_zcyl(r,cx,cy,rad):
return ((r.x()-cx)**2+(r.y()-cy)**2) < rad**2
def in_sphere(r,cx,cy,cz,rad):
return ((cx-r.x())**2 + (cy-r.y())**2 + (cz-r.z())**2)**.5 < rad
def in_ellipsoid(r,cx,cy,cz,rad,ex):
xd, yd, zd = (cx-r.x()), (cy-r.y()), (cz-r.z())
#return ((xd+yd)**2/2*ex + (xd-yd)**2/2/ex + zd**2)**.5 < rad
return ((xd+yd)**2/2.*ex**2 + (xd-yd)**2/2./ex**2 + zd**2)**.5 < rad
#def in_xcone(r,cy,cz,cx,d):
#return (abs(r.x()-cx) < d/2)
#}}}
## Use the same dispersive materials for time- and frequency-domain simulation
def permittivity2conductivity(complex_eps, freq):#{{{
"""
Complex permittivity can express also the conductivity of the sample (in the same
manner as dielectric losses) with the corresponding relation:
complex_eps = real_eps - 1j conductivity / (frequency * 2*pi * epsilon_0)
Therefore it should be inverted for classic D-conductivity:
conductivity = -
In order to simulate any lossy medium with the freq-domain solver, we invert this relation
to obtain a (nondispersive) conductivity for one frequency. But it does not give the same results
as time-domain simulation.
What we know:
function of c, f, 2pi, eps0, eps.im/eps.r
should give dimension 1 to feed unitless meep
should be proportional to eps.im/eps.r
should give ca. 50000 for omega = 2pi * 800 GHz and eps.im/eps.r=0.02
=> should give 2.5e6 for (eps.im/eps.r=1)
should be proportional to frequency omega
=> should give 5e-7 for omega = 1 and (eps.im/eps.r=1)
already was pre-divided for meep by c = 3e8 (which counts here)
=> in real life it shall be 3e-6 * 3e8 = 148
should be proportional to epsilon0 [C/Vsm], which is similar in units to conductivity
=> if epsilon0 was 1, it would be 1.7e13 -> roughly c**2
"""
# return complex_eps.imag * freq * 2*np.pi * epsilon_0 * complex_eps.real ## orig. idea
# return complex_eps.imag * freq * 2*np.pi * epsilon_0 * complex_eps.real ## also wrong
# return complex_eps.imag / complex_eps.real * 2*np.pi * c
#return complex_eps.imag / complex_eps.real * 6.28*freq * 8.85e-12 * c
magic_constant = 1.65e13 ## A. K. A. bulgarian constant...
return complex_eps.imag / complex_eps.real * 6.28 * freq / c * 8.85e-12 * magic_constant
#}}}
def analytic_eps(mat, freq):#{{{
complex_eps = mat.eps
for polariz in mat.pol:
complex_eps += polariz['sigma'] * polariz['omega']**2 / (polariz['omega']**2 - freq**2 - 1j*freq*polariz['gamma'])
return complex_eps # + sum(0)
#}}}
class MyHiFreqPermittivity(meep.Callback):#{{{
def __init__(self, model, frequency):
meep.Callback.__init__(self)
self.model = model
self.frequency = frequency
def double_vec(self, r):
for material in self.model.materials:
if material.where(r):
return analytic_eps(material, self.frequency).real
else: return 1
#}}}
class MyConductivity(meep.Callback):#{{{
def __init__(self, model, frequency):
meep.Callback.__init__(self)
self.model = model
self.frequency = frequency
def double_vec(self, r):
for material in self.model.materials:
if material.where(r):
return permittivity2conductivity(analytic_eps(material, self.frequency), self.frequency)
else: return 0
#}}}
def plot_eps(to_plot, filename="epsilon.png", plot_conductivity=False, freq_range=(1e9, 1e17), mark_freq=[]):#{{{
""" Plots complex permittivity of the materials to a PNG file
Accepts list of materials
"""
from scipy.constants import epsilon_0
import matplotlib
matplotlib.use('Agg') ## Enable plotting even in the GNU screen session
import matplotlib.pyplot as plt
plt.figure(figsize=(8,6))
frequency = 10**np.arange(np.log10(freq_range[0]), np.log10(freq_range[1]), .001)
colors = ['#554400', '#004400', '#003366', '#000088', '#440077',
'#661100', '#AA8800', '#00AA00', '#0099DD', '#2200FF',
'#8800DD', '#BB3300']
subplotnumber = 2 if plot_conductivity else 1
for material in list(to_plot):
plt.subplot(subplotnumber,1,1)
if colors: color = colors.pop()
else: color = 'black'
label = getattr(material, 'shortname', material.name)
#print material
eps = analytic_eps(material, frequency)
#print eps, frequency
print len(eps), len(frequency)
#R = abs((1-eps**.5)/(1+eps**.5))**2 ## Intensity reflectivity
plt.plot(frequency, np.real(eps), color=color, label=label, ls='-')
plt.plot(frequency, np.imag(eps), color=color, label="", ls='--')
plt.ylabel(u"solid: Re($\\varepsilon_r$), dashed: Im($\\varepsilon_r$) ")
#plt.ylabel(u"Intensity reflectivity")
for mfreq in mark_freq: plt.plot([mfreq,mfreq], [-1,1])
plt.yscale('symlog')
plt.xscale('log')
plt.legend(loc='lower left', prop={'size':7});
#plt.ylim(ymin=1e-2);
plt.grid(True)
if plot_conductivity:
plt.subplot(subplotnumber,1,2)
label = ""
cond = eps * frequency * epsilon_0 / 1j
plt.plot(frequency, np.real(cond), color=color, label=label, ls='-')
plt.plot(frequency, np.imag(cond), color=color, label="", ls='--')
plt.ylabel(u"$Re\\sigma$ (solid), Im$\\sigma$ (dashed)")
plt.yscale('symlog'); plt.xscale('log'); plt.legend(); plt.grid(True)
## Finish the graph
plt.xlabel(u"Frequency [Hz]")
#plt.xlim((, ))
plt.savefig(filename, bbox_inches='tight')
#}}}
def lorentzian_unstable_check_new(model, dt, quit_on_warning=True): #{{{
for mat in model.materials:
eps_ts = analytic_eps(mat, 1/dt/np.pi)
if np.real(eps_ts)<0:
meep.master_printf("Warning: for material '%s', the permittivity is negative at timestepping frequency eps(1/pi/dt)=eps(%g)=%s.\n" % \
(mat.name, 1/dt/np.pi, eps_ts.__str__()));
if quit_on_warning: quit()
for pol in mat.pol:
omega_0, gamma = pol['omega'], pol['gamma']
if (omega_0 > gamma/2):
z2 = np.sqrt(gamma*gamma + 4*omega_0*omega_0)/2
else:
z2 = gamma/2 + np.sqrt(gamma*gamma - 4*omega_0*omega_0)/2
if z2 > 1/dt/np.pi:
meep.master_printf("Warning: for material '%s', the oscillator pole having magnitude |z|=%g will be probably unstable when 1/pi/dt=%g.\n" % \
(mat.name, z2, 1/dt/np.pi));
if quit_on_warning: quit()
#}}}
## Useful to make 3D snapshots of the fields
def run_bash(cmd, anyprocess=False): #{{{
if meep.my_rank() == 0 or anyprocess:
#meep.master_printf("CMD: " + cmd+ "\n")
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out = p.stdout.read().strip()
return out
#}}}
class SnapshotMaker(): #{{{
"""
Saves the field vectors to a HDF5/VTK file at a given time.
This function unfortunately requires command-line tools from Linux system.
"""
def __init__(self, field=None, snapshot_times=[], outputdir=None, volume=None):
""" Remember the time when to take a snapshot. A list of two or more numbers may be provided. """
self.field = field
self.snapshot_times = snapshot_times
self.outputdir = outputdir
self.volume = volume
if not os.path.exists(self.outputdir): run_bash("mkdir -p %s" % self.outputdir)
meep.master_printf("Registered %d time points(s) to take snapshot\n" % len(self.snapshot_times))
def poll(self, now=None):
""" Check if the time has come to make a snapshot. """
if len(self.snapshot_times)>0 and now > self.snapshot_times[0]:
self.take_snapshot(now)
self.snapshot_times[0:1] = [] ## Remove the current snapshot now from the list of remaining times
def take_snapshot(self, now):
"""
This function creates a directory, exports the dielectric structure, then exports the electric
and magnetic field vectors to separate HDF5 files. Finally it converts the fields to VTK format
(to make it readable e.g. by Mayavi2 or Paraview). Note that it is not only called by self.poll(),
but it may be also called at will anywhere in your code.
"""
meep.master_printf(" * Saving field snapshot at time=%.4e... \n" % now)
## Export the dielectric structure so that we can visually verify it
structure_filename = os.path.join(self.outputdir, 'structure') ## name of the file to save eps (without extension)
#if meep.my_rank() == 0 and not os.path.exists(self.outputdir): os.mkdir(self.outputdir)
if not os.path.exists(self.outputdir): run_bash("mkdir -p %s" % self.outputdir, anyprocess=True) ## NO DATA
#if not os.path.exists(structure_filename):
#if meep.my_rank() ==0 :
#if not "outputfile" in locals():
outputfile = meep.prepareHDF5File(structure_filename+'.h5')
self.field.output_hdf5(meep.Dielectric, self.volume, outputfile)
del(outputfile)
## Export the fields snapshot
snapshotfiles = []
for (component, compname) in [(meep.Ex, "Ex"), (meep.Ey, "Ey"), (meep.Ez, "Ez"),
(meep.Hx, "Hx"), (meep.Hy, "Hy"), (meep.Hz, "Hz")]:
snapshotfiles.append("%s/snapshot_%s_t%e.h5" % (self.outputdir, compname, now))
snapshotfile = meep.prepareHDF5File(snapshotfiles[-1])
self.field.output_hdf5(component, self.volume, snapshotfile, 1)
del(snapshotfile)
## Convert the files for Mayavi2; join E-fields and H-fields components into vector fields
run_bash("h5tovtk %s.h5 -o %s.vtk" % (structure_filename, structure_filename))
run_bash("h5tovtk %s -t 0 -o %s/t%0.3e_Evec.vtk" % (" ".join(snapshotfiles[:3]), self.outputdir, now)) ## todo: use path.join
run_bash("h5tovtk %s -t 0 -o %s/t%0.3e_Hvec.vtk" % (" ".join(snapshotfiles[3:]), self.outputdir, now))
# (Note: the -t parameter selects real part only)
#}}}
class SliceMaker(): #{{{
"""
Saves the field vectors from a slice to a HDF5 file during the simulation. The slice may be specified either
by two-dimensional meep.volume() object, or by a normal ("x", "y" or "z") and the slice position on this normal.
After simulation ends, the data are exported as GIF animation and 3D VTK file.
Optionally, you may disable these output formats and/or enable output to many many PNG files or to a 3D HDF file.
This function unfortunately requires command-line tools from Linux system.
"""
def __init__(self, field=None, component=meep.Ex, timebounds=(0, np.inf), timestep=0,
volume=None, normal=None, position=None, model=None, pad=0,
outputdir="", name=None, outputPNGs=False, outputGIF=True, outputHDF=False, outputVTK=False):
""" """
self.field = field
self.outputdir = outputdir
self.component = component
self.timebounds = timebounds
self.timestep = timestep
self.outputPNGs = outputPNGs
self.outputGIF = outputGIF
self.outputHDF = outputHDF
self.outputVTK = outputVTK
if volume:
self.volume = volume
meep.master_printf("Will record slices at times %.3g, %.3g ... %.3g s \n" % (timebounds[0], timebounds[0]+timestep, timebounds[1]))
else:
#if not position:
#raise RuntimeError("Specify the position of the cut plane (on the axis perpendicular to it)")
if normal=="x":
self.volume = meep.volume(
meep.vec(position, -model.size_y/2+pad, -model.size_z/2+pad),
meep.vec(position, model.size_y/2-pad, model.size_z/2-pad))
elif normal=="y":
self.volume = meep.volume(
meep.vec(-model.size_x/2+pad, position, -model.size_z/2+pad),
meep.vec(model.size_x/2-pad, position, model.size_z/2-pad))
elif normal=="z":
self.volume = meep.volume(
meep.vec(-model.size_x/2+pad, -model.size_y/2+pad, position),
meep.vec(model.size_x/2-pad, model.size_y/2-pad, position))
#else:
#print normal
#raise RuntimeError("Specify the normal parameter as 'x', 'y' or 'z'")
meep.master_printf("Will record slices at %s=%.3g m, at times %g, %g ... %g s \n" \
% (normal, position, timebounds[0], timebounds[0]+timestep, timebounds[1]))
self.outputdir = outputdir
if not name:
if not position or not normal:
self.name = "SliceByVolume"
else:
self.name = "Slice_%s%.3e" % (normal, position)
else: self.name = name
self.images_number = 0
self.last_slice_time = 0.
#slices=[]
#slices.append({"name":"Ex_xz_slice", "component":meep.Ex, "geom":
if not os.path.exists(outputdir): run_bash("mkdir -p %s" % outputdir, anyprocess=True)
self.openfile = meep.prepareHDF5File("%s.h5" % (os.path.join(self.outputdir, self.name)))
def poll(self, now):
""" Check if the now has come to add a new slice """
if (now-self.last_slice_time > self.timestep) and (now > self.timebounds[0]) and (now < self.timebounds[1]):
self.images_number += 1
self.field.output_hdf5(self.component, self.volume, self.openfile, 1)
self.last_slice_time = now
def finalize(self):
#run_bash("cd %s; rm *png" % outputdir)
meep.master_printf("\n\nImages to gif\n")
del(self.openfile)
if self.outputGIF or self.outputPNGs:
run_bash("cd %s; h5topng -t 0:%d -R -Zc dkbluered -a yarg %s.h5 -S 1" % (self.outputdir, self.images_number-1, self.name))
if self.outputGIF:
run_bash("cd %s; convert -compress None -delay 10 *png %s.gif" % (self.outputdir, self.name))
if self.outputGIF and not self.outputPNGs:
run_bash("cd %s; rm %s*.png" % (self.outputdir, self.name))
if self.outputVTK: run_bash("cd %s; h5tovtk %s.h5 -o %s.vtk" % (self.outputdir, self.name, self.name))
if not self.outputHDF: run_bash("cd %s; rm %s.h5" % (self.outputdir, self.name))
#}}}
## Print the progress and estimated time
class Timer():#{{{
def __init__(self, simtime):
self.starttime = time.time()
self.simtime = simtime
meep.master_printf("\tSimulation time: %e [s] = %e time units\n" % (simtime, simtime*c))
self.reporttimes = [.001, .01, 0.03] + [t/10. for t in range(1,9)] + [2.]
def get_time(self):
return time.time()-self.starttime
def print_progress(self, now):
if now/self.simtime > self.reporttimes[0]:
meep.master_printf("Progress %.2f of expected total %d s\n" % \
(now / self.simtime, (self.simtime / now * self.get_time())))
self.reporttimes[0:1] = []
#}}}
def notify(title, run_time=None):#{{{
"""
Shows a bubble with notification that your results are about to be ready!
Requires python-notify installed, otherwise just quits
Note: you may also use similar call with libnotify-bin from your bash scripts:
run_bash('notify-send -t 3000 -i "face-glasses" "MEEP simulation finished %s" "%s"' % (timestring, title))
"""
if meep.my_rank() != 0: return
try:
if run_time: timestring = "in %d s" % int(run_time)
else: timestring = ""
import pynotify
pynotify.init("image")
n = pynotify.Notification("MEEP simulation finished %s" % (timestring), title, "face-glasses")
n.show()
except:
pass
#}}}
## Obtain and process the s-parameters of the structure (works for both time- and freq-domain)
def get_s_parameters(monitor1_Ex, monitor1_Hy, monitor2_Ex, monitor2_Hy,
frequency_domain=False, frequency=None, pad_zeros=0.0, maxf=np.inf, minf=0, Kx=0, Ky=0):
""" Returns the frequency, s11 (reflection) and s12 (transmission) in frequency domain """#{{{
## TODO allow omitting second monitor (-> returns s12=None)
t, Ex1 = monitor1_Ex.get_waveforms()
t, Hy1 = monitor1_Hy.get_waveforms()
t, Ex2 = monitor2_Ex.get_waveforms()
t, Hy2 = monitor2_Hy.get_waveforms()
## Hann-window fade-out to suppress spectral leakage
if not frequency_domain:
for field in (Ex1, Hy1, Ex2, Hy2 ):
field[t>max(t)*.8] = field[t>max(t)*.8]*(.5 + .5*np.cos(np.pi * (t[t>max(t)*.8]/max(t)-.8)/(1-.8)))
try:
import matplotlib
#matplotlib.use('Agg') ## Enable plotting even in the GNU screen session?
from matplotlib import pyplot as plt
plt.figure(figsize=(6,5))
#matplotlib.rc('text', usetex=True)
#matplotlib.rc('font', size=8)
#matplotlib.rc('text.latex', preamble = \
#'\usepackage{amsmath}, \usepackage{yfonts}, \usepackage{txfonts}, \usepackage{lmodern},')
plt.plot(t, abs(Ex1), label="Ex1")
plt.plot(t, abs(Hy1), label="Hy1")
plt.plot(t, abs(Ex2), label="Ex2")
plt.plot(t, abs(Hy2), label="Hy2")
np.savetxt("timedomain_Ex1_Kx%f.dat"%Kx, zip(t, Ex1), fmt="%.8e")
np.savetxt("timedomain_Ex2_Kx%f.dat"%Kx, zip(t, Ex2), fmt="%.8e")
#plt.ylim(1, 1e6)
plt.gca().set_ylim(ymin=1e-10)
plt.legend(prop={'size':10}, loc='upper right')
plt.xlabel('Time'); plt.ylabel('Field amplitudes, $|E|$, $|H|$')
#plt.title('Time-domain field amplitudes')
plt.yscale("log")
plt.savefig("sim_timedomain_debug.png", bbox_inches='tight')
except:
print "Timedomain plot failed"
## Obtain the electric and magnetic fields spectra
if frequency_domain: ## No need for FFT in frequency domain, just copy the value
freq = np.array([frequency])
(Ex1f, Hy1f, Ex2f, Hy2f) = (Ex1, Hy1, Ex2, Hy2)
else:
## Optionally extend the data range by zeros (for stable eff param retrieval)
target_len = 2**np.ceil(np.log(len(Ex1)*(1+pad_zeros))/np.log(2)) ## must be power of two for efficient FFT!
append_len = target_len - len(Ex1)
print 'target_len,append_len',target_len,append_len
#def pad(arr, length):
#appended = np.zeros(int(length*len(arr))) # * np.exp(- np.arange(0, len(,
#return np.append(arr, appended)
if pad_zeros:
#Ex1, Hy1, Ex2, Hy2 = map(lambda x: pad(x, pad_zeros), (Ex1, Hy1, Ex2, Hy2))
Ex1, Hy1, Ex2, Hy2 = map(lambda x: np.append(x, np.zeros(append_len)), (Ex1, Hy1, Ex2, Hy2))
print "app"
## Calculate the Fourier transform of the recorded time-domain waves
numpoints = len(Ex1)
#fftfreq(signal.size, Sample spacing.[d]) Return the Discrete Fourier Transform sample frequencies.
freq = np.arange(0., int(numpoints/2)) / numpoints / (t[1]-t[0]) # take positive frequency range only
## TODO use np.fft.fftfreq() instead
## TODO Positive frequency range should be separated just by truncation below, as 'minf => 0'
print "freq x"
#fftshift(x[, axes]) Shift the zero-frequency component to the center of the spectrum.
(Ex1f, Hy1f, Ex2f, Hy2f) = map(lambda x: np.fft.fft(np.real(x))[0:int(numpoints/2)], (Ex1, Hy1, Ex2, Hy2))
print "fft"
## Truncate the data ranges to allowed radiating angles, and possibly to minf<freq<maxf
truncated = np.logical_and((Ky**2+Kx**2)<(2*np.pi*freq/c)**2, freq>minf, freq<maxf)
(Ex1f, Hy1f, Ex2f, Hy2f, freq) = map(lambda x: x[truncated], (Ex1f, Hy1f, Ex2f, Hy2f, freq))
print "trunc"
## Prepare the angles at which the wave propagates (dependent on frequency, Kx and Ky)
#rho = np.arcsin(Kx / (2*np.pi*freq/c)) TODO
beta0 = np.arcsin((Kx**2+Ky**2)**.5 / (2*np.pi*freq/c))
#print 'beta0', beta0
## Separate the forward and backward wave in frequency domain
## (Efield+Hfield)/2 -> forward wave amplitude,
## (Efield-Hfield)/2 -> backward wave amplitude
#in1, out1 = (Ex1f+Hy1f)/2, (Ex1f-Hy1f)/2 ## old: works only for perp. incidence beta0=0
#in2, out2 = (Ex2f-Hy2f)/2, (Ex2f+Hy2f)/2
in1, out1 = (Ex1f+Hy1f/np.cos(beta0))/2, (Ex1f-Hy1f/np.cos(beta0))/2
in2, out2 = (Ex2f-Hy2f/np.cos(beta0))/2, (Ex2f+Hy2f/np.cos(beta0))/2
## Todo optimize cos(arcsin x ) = sqrt(1-x**2)
print "sep"
## TEMPORARY Plot spectral profile
try:
plt.figure(figsize=(4,3))
plt.plot(freq, abs(in1), label="in1")
plt.plot(freq, abs(out1), label="out1")
plt.plot(freq, abs(in2), label="in2")
plt.plot(freq, abs(out2), label="out2")
plt.xlim(0, 1e12)
plt.legend(prop={'size':10}, loc='lower left')
plt.xlabel('Frequency'); plt.ylabel('Transmitted amplitude')
#plt.title('Frequency-domain wave amplitudes')
plt.yscale("log")
plt.savefig("sim_ampli_debug_band.png", bbox_inches='tight')
except:
print "Frequency-domain plot failed"
## Check if PML works well (optional)
#meep.master_printf("PML reflection max: %.4e" % max(in2 / in1))
## Get the s-parameters
s11 = out1 / in1
s12 = out2 / in1
## Return the S-parameters (i. e. complex reflection and transmission)
return freq, s11, s12
#}}}
def get_phase(complex_data):#{{{
""" Unwraps and shifts the phase from Fourier transformation """
if len(complex_data) <= 1: return np.angle(complex_data)
phase = np.unwrap(np.angle(complex_data))
center_phase = phase[min(5, len(phase)-1)] ## 5 is chosen to avoid zero freq.
return phase-(round(center_phase/2/np.pi)*2*np.pi)
#}}}
class AmplitudeMonitorPlane():#{{{
""" Calculates an average of electric field and perpendicular magnetic field.
I asked for a similar field-averaging function built in MEEP, but it seems not to be implemented yet.
http://www.mail-archive.com/[email protected]/msg04447.html
Note this implementation requires the planes are in vacuum (where impedance = 1.0)
"""
def __init__(self, comp=None, size_x=None, size_y=None, z_position=None, Kx=0, Ky=0):
self.comp=comp
self.size_x = size_x
self.size_y = size_y
self.z_position = z_position
self.Kx = Kx
self.Ky = Ky
self.t = []
self.waveform = []
def average_field(self, field):
"""
Average field component in some plane, return amplitudes
This function is ineffective - it should be implemented in C++ in meep itself
5x5 grid is usually optimal (no visible difference between 10x10 grid and 5x5 grid)
TODO: This class implements a workaround for unavailable amplitude averaging in python-meep.
This implementation is ineffective and inflexible, but one would have to edit the MEEP source otherwise.
"""
xcount, ycount = (2, 1)
field_sum = 0
# The mode function has the form of an oblique plane wave
#for x in [x0*self.size_x/xcount+(self.size_x/2/xcount)-self.size_x/2 for x0 in range(xcount)]:
#for y in [y0*self.size_y/ycount+(self.size_y/2/ycount)-self.size_y/2 for y0 in range(ycount)]:
#field_sum += (field.get_field(self.comp, meep.vec(x, y, self.z_position)) *
#np.exp(1j*(self.Kx*x + self.Ky*y)) )
#return field_sum/(xcount*ycount)
## New way (removes explicit cycle, few percent faster)
xr = [x0*self.size_x/xcount+(self.size_x/2/xcount)-self.size_x/2 for x0 in range(xcount)]
yr = [y0*self.size_y/ycount+(self.size_y/2/ycount)-self.size_y/2 for y0 in range(ycount)]
xm, ym = np.meshgrid(xr,yr)
points = zip(xm.flatten(), ym.flatten())
sum_ = sum(map(lambda pos: field.get_field(self.comp, meep.vec(pos[0], pos[1], self.z_position)), points))
return sum_/(xcount*ycount)
## Yet newer way
#xr = [x0*self.size_x/xcount+(self.size_x/2/xcount)-self.size_x/2 for x0 in range(xcount)]
#yr = [x0*self.size_x/xcount+(self.size_x/2/xcount)-self.size_x/2 for x0 in range(xcount)]
#xm, ym = np.meshgrid(xr,yr)
#v = meep.vec(0,0, self.z_position)
#points = zip(xm.flatten(), ym.flatten())
#sum_ = sum(map(lambda pos: _meep_mpi.fields_get_field(field, self.comp, v), points))
#sum_ = sum(map(lambda pos: _meep_mpi.fields_get_field(field, self.comp, _meep_mpi.new_vec(0,0,0)), points))
#cp =self.comp
#zp = self.z_position
#sum_ = sum(map(lambda pos: _meep_mpi.fields_get_field(field, cp, _meep_mpi.new_vec(pos[0], pos[1], zp)), points))
#return sum_/(xcount*ycount)
#def NEW_average_field_xy_plane(field, component, zpos, model): ## TODO use the internal integration of fields by MEEP
# TODO rewrite:
# (fields &f, linear_integrand_data &d, const volume &v, component cgrid)
# f.integrate(0, 0, linear_integrand, (void *) &d, v)
#integrate(meep::fields *,int,meep::component const *,meep::field_function,void *,meep::volume const &,double *)
#v = meep.volume(
#meep.vec(-model.size_x/2, -model.size_y/2, -model.size_z/2+model.pml_thickness),
#meep.vec(model.size_x/2, model.size_y/2, -model.size_z/2+model.pml_thickness))
#return field.integrate(1, [component], meep.one_vec, [], v)
#Possible C/C++ prototypes are:
#meep::fields::integrate(int,meep::component const *,meep::field_function,void *,meep::volume const &,double *)
#meep::fields::integrate(int,meep::component const *,meep::field_function,void *,meep::volume const &)
#meep::fields::integrate(int,meep::component const *,meep::field_rfunction,void *,meep::volume const &,double *)
#meep::fields::integrate(int,meep::component const *,meep::field_rfunction,void *,meep::volume const &)
#fields::integrate(int num_fvals, const component *components,
#field_function integrand,
#void *integrand_data_,
#const volume &where,
#double *maxabs
def record(self, field=None):
"""
Useful for time-domain simulation only
"""
self.t.append(field.time()/c)
self.waveform.append(self.average_field(field))
def get_waveforms(self):
""" Return the recorded waveform (in time domain) """
if len(self.t) <= 1:
t, result_wform = np.array(self.t), np.array(self.waveform)
else:
t = np.array(self.t[:-1])
## The FDTD calculation introduces half-step time shift between Ex and Hy. Compensated by averaging the Hy field
## with its value in a next timestep. The error is reduced from O1 to O2.
## See http://ab-initio.mit.edu/wiki/index.php/Synchronizing_the_magnetic_and_electric_fields
if meep.is_magnetic(self.comp) or meep.is_B(self.comp):
result_wform = np.array(self.waveform[:-1])/2. + np.array(self.waveform[1:])/2.
else:
result_wform = np.array(self.waveform[:-1])
return t, result_wform
## time,
## TODO this will have to be modified in order to account for oblique incidence
## TODO take into account the medium impedance (... divide the Hfield)
#}}}
class AmplitudeMonitorPoint(AmplitudeMonitorPlane):#{{{
""" Calculates an average of electric field and perpendicular magnetic field.
I asked for a similar field-averaging function built in MEEP, but it seems not to be implemented yet.
http://www.mail-archive.com/[email protected]/msg04447.html
Note this implementation requires the planes are in vacuum (where impedance = 1.0)
"""
def __init__(self, Ecomp=None, Hcomp=None, pos=None):
self.Ecomp=Ecomp
self.Hcomp=Hcomp
self.pos = pos ## where to record the field
self.t = []
self.Efield = []
self.Hfield = []
def get_amplitude(self, field, component):
""" Record field in some point. No averaging here, but (inherited) time recording is pretty useful. """
#count = 5
#field_sum = 0
#for x in [x0*self.size_x/count+(self.size_x/2/count)-self.size_x/2 for x0 in range(count)]:
#for y in [y0*self.size_y/count+(self.size_y/2/count)-self.size_y/2 for y0 in range(count)]:
#field_sum += field.get_field(component, meep.vec(x, y, self.z_position))
return field.get_field(component, self.pos)
#}}}
def savetxt(name="output.dat", freq=None, s11=None, s12=None, model=None, polar_notation=True): #{{{
""" Saves the s-parameters to a file including comments """
with open(model.simulation_name+".dat", "w") as outfile:
outfile.write("#Parameters Parameters\n")
outfile.write("#param layer_thickness[m],%.6e\n" % (model.monitor_z2 - model.monitor_z1 - 2*model.padding))
if "interesting_frequencies" in dir(model): interest_freq = model.interesting_frequencies
else: interest_freq = (0, model.srcFreq+model.srcWidth)
outfile.write("#param plot_freq_min[Hz],%.3e\n" % interest_freq[0])
outfile.write("#param plot_freq_max[Hz],%.3e\n" % interest_freq[1])
outfile.write("#param simulation_orig_name,%s\n" % model.simulation_name)
outfile.write(model.parameterstring)
if polar_notation:
## Convert to polar notation
s11amp, s12amp, s11phase, s12phase = abs(s11), abs(s12), get_phase(s11), get_phase(s12)
## Save polar
outfile.write("#x-column Frequency [Hz]\n#Column Reflection amplitude\n#Column Reflection phase\n" + \
"#Column Transmission amplitude\n#Column Transmission phase\n")
np.savetxt(outfile, zip(freq, s11amp, s11phase, s12amp, s12phase), fmt="%.8e")
else:
## Save cartesian
# TODO should save in such format that PKGraph understands complex values
outfile.write("#x-column Frequency [Hz]\n#Column Reflection Re\n#Column Reflection Im\n" + \
"#Column Transmission Re\n#Column Transmission Im\n")
np.savetxt(outfile, zip(freq, s11.real, s11.imag, s12.real, s12.imag), fmt="%.8e")
#}}}
def get_simulation_name(argindex=1): #{{{
"""Get the name of the last simulation run.
Priority: 1) parameter, 2) last_simulation_name.txt, 3) working directory"""
cwd = os.getcwd()
if len(sys.argv)>argindex and sys.argv[argindex] != "-" and __name__ == "__main__":
print "Parameter passed:", sys.argv[argindex]
last_simulation_name = sys.argv[argindex]
elif os.path.exists(os.path.join(cwd, 'last_simulation_name.txt')):
print "Loading from", os.path.join(cwd, 'last_simulation_name.txt')
last_simulation_name = os.path.join(cwd, open(os.path.join(cwd, 'last_simulation_name.txt'),'r').read().strip())
else:
print "Error: No input file provided and 'last_simulation_name.txt' not found!"
last_simulation_name = cwd
if (last_simulation_name[-4:] == ".dat"): last_simulation_name = last_simulation_name[:-4] # strip the .dat extension
return last_simulation_name
#}}}
def load_rt(filename, layer_thickness=None, plot_freq_min=None, plot_freq_max=None, truncate=True): #{{{
""" Loads the reflection and transmission spectra and simulation settings
Returns:
* frequency axis
* reflection s11 and transmission s12 as complex np arrays
Compatible with the PKGraph text data file with polar data:
* parameters in header like: #param name,value
* column identification like: #column Ydata
* data columns in ascii separated by space
Expects polar data with columns: frequency, s11 ampli, s11 phase, s12 ampli, s12 phase
"""
with open(filename+'.dat') as datafile:
for line in datafile:
if line[0:1] in "0123456789": break # end of file header
value = line.replace(",", " ").split()[-1] # the value of the parameter will be separated by space or comma
if ("layer_thickness" in line) and (layer_thickness == None): d = float(value)
if ("plot_freq_min" in line) and (plot_freq_min == None): plot_freq_min = float(value)
if ("plot_freq_max" in line) and (plot_freq_max == None): plot_freq_max = float(value)
xlim = (plot_freq_min, plot_freq_max)
(freq, s11amp, s11phase, s12amp, s12phase) = \
map(lambda a: np.array(a, ndmin=1), np.loadtxt(filename+".dat", unpack=True))
## Limit the frequency range to what will be plotted (recommended)
#TODO better:
#truncated = np.logical_and(freq>minf, freq<maxf)
#(a, b, c, freq) = map(lambda x: x[truncated], (a, b, c, freq))
if truncate:
(d0,d1) = np.interp((plot_freq_min, plot_freq_max), freq, range(len(freq)))
(freq, s11amp, s11phase, s12amp, s12phase) = \
map(lambda a: a[int(d0):int(d1)], (freq, s11amp, s11phase, s12amp, s12phase))
return freq, s11amp, s11phase, s12amp, s12phase, xlim, (d, plot_freq_min, plot_freq_min)
#}}}
| mit |
mmaelicke/scikit-gstat | skgstat/DirectionalVariogram.py | 1 | 29843 | """
Directional Variogram
"""
import numpy as np
from scipy.spatial.distance import pdist
from .Variogram import Variogram
from skgstat import plotting
from .MetricSpace import MetricSpace, MetricSpacePair
class DirectionalVariogram(Variogram):
"""DirectionalVariogram Class
Calculates a variogram of the separating distances in the given
coordinates and relates them to one of the semi-variance measures of the
given dependent values.
The direcitonal version of a Variogram will only form paris of points
that share a specified spatial relationship.
"""
def __init__(self,
coordinates=None,
values=None,
estimator='matheron',
model='spherical',
dist_func='euclidean',
bin_func='even',
normalize=False,
fit_method='trf',
fit_sigma=None,
directional_model='triangle',
azimuth=0,
tolerance=45.0,
bandwidth='q33',
use_nugget=False,
maxlag=None,
n_lags=10,
verbose=False,
**kwargs
):
r"""Variogram Class
Directional Variogram. The calculation is not performant and not
tested yet.
Parameters
----------
coordinates : numpy.ndarray
Array of shape (m, n). Will be used as m observation points of
n-dimensions. This variogram can be calculated on 1 - n
dimensional coordinates. In case a 1-dimensional array is passed,
a second array of same length containing only zeros will be
stacked to the passed one.
values : numpy.ndarray
Array of values observed at the given coordinates. The length of
the values array has to match the m dimension of the coordinates
array. Will be used to calculate the dependent variable of the
variogram.
estimator : str, callable
String identifying the semi-variance estimator to be used.
Defaults to the Matheron estimator. Possible values are:
* matheron [Matheron, default]
* cressie [Cressie-Hawkins]
* dowd [Dowd-Estimator]
* genton [Genton]
* minmax [MinMax Scaler]
* entropy [Shannon Entropy]
If a callable is passed, it has to accept an array of absoulte
differences, aligned to the 1D distance matrix (flattened upper
triangle) and return a scalar, that converges towards small
values for similarity (high covariance).
model : str
String identifying the theoretical variogram function to be used
to describe the experimental variogram. Can be one of:
* spherical [Spherical, default]
* exponential [Exponential]
* gaussian [Gaussian]
* cubic [Cubic]
* stable [Stable model]
* matern [Matérn model]
* nugget [nugget effect variogram]
dist_func : str
String identifying the distance function. Defaults to
'euclidean'. Can be any metric accepted by
scipy.spatial.distance.pdist. Additional parameters are not (yet)
passed through to pdist. These are accepted by pdist for some of
the metrics. In these cases the default values are used.
bin_func : str
.. versionchanged:: 0.3.8
added 'fd', 'sturges', 'scott', 'sqrt', 'doane'
String identifying the binning function used to find lag class
edges. All methods calculate bin edges on the interval [0, maxlag[.
Possible values are:
* `'even'` (default) finds `n_lags` same width bins
* `'uniform'` forms `n_lags` bins of same data count
* `'fd'` applies Freedman-Diaconis estimator to find `n_lags`
* `'sturges'` applies Sturge's rule to find `n_lags`.
* `'scott'` applies Scott's rule to find `n_lags`
* `'doane'` applies Doane's extension to Sturge's rule to find `n_lags`
* `'sqrt'` uses the square-root of :func:`distance <skgstat.Variogram.distance>`. as `n_lags`.
More details are given in the documentation for :func:`set_bin_func <skgstat.Variogram.set_bin_func>`.
normalize : bool
Defaults to False. If True, the independent and dependent
variable will be normalized to the range [0,1].
fit_method : str
String identifying the method to be used for fitting the
theoretical variogram function to the experimental. More info is
given in the Variogram.fit docs. Can be one of:
* 'lm': Levenberg-Marquardt algorithm for unconstrained
problems. This is the faster algorithm, yet is the fitting of
a variogram not unconstrianed.
* 'trf': Trust Region Reflective function for non-linear
constrained problems. The class will set the boundaries
itself. This is the default function.
fit_sigma : numpy.ndarray, str
Defaults to None. The sigma is used as measure of uncertainty
during variogram fit. If fit_sigma is an array, it has to hold
n_lags elements, giving the uncertainty for all lags classes. If
fit_sigma is None (default), it will give no weight to any lag.
Higher values indicate higher uncertainty and will lower the
influcence of the corresponding lag class for the fit.
If fit_sigma is a string, a pre-defined function of separating
distance will be used to fill the array. Can be one of:
* 'linear': Linear loss with distance. Small bins will have
higher impact.
* 'exp': The weights decrease by a e-function of distance
* 'sqrt': The weights decrease by the squareroot of distance
* 'sq': The weights decrease by the squared distance.
More info is given in the Variogram.fit_sigma documentation.
directional_model : string, function
The model used for selecting all points fulfilling the
directional constraint of the Variogram. A predefined
model can be selected by passing the model name as string.
Optionally a callable accepting the difference vectors
between points in polar form as angles and distances and
returning a mask array can be passed. In this case, the
azimuth, tolerance and bandwidth has to be incorporated by
hand into the model.
* 'compass': includes points in the direction of the
azimuth at given tolerance. The bandwidth parameter will be
ignored.
* 'triangle': constructs a triangle with an angle of
tolerance at the point of interest and union an rectangle
parallel to azimuth, once the hypotenuse length reaches
bandwidth.
* 'circle': constructs a half circle touching the point of
interest, dislocating the center at the distance of
bandwidth in the direction of azimuth. The half circle is
union with an rectangle parallel to azimuth.
Visual representations, usage hints and implementation specifics
are given in the documentation.
azimuth : float
The azimuth of the directional dependence for this Variogram,
given as an angle in **degree**. The East of the coordinate
plane is set to be at 0° and is counted clockwise to 180° and
counter-clockwise to -180°. Only Points lying in the azimuth of a
specific point will be used for forming point pairs.
tolerance : float
The tolerance is given as an angle in **degree**- Points being
dislocated from the exact azimuth by half the tolerance will be
accepted as well. It's half the tolerance as the point may be
dislocated in the positive and negative direction from the azimuth.
bandwidth : float
Maximum tolerance acceptable in **coordinate units**, which is
usually meter. Points at higher distances may be far dislocated
from the azimuth in terms of coordinate distance, as the
tolerance is defined as an angle. THe bandwidth defines a maximum
width for the search window. It will be perpendicular to and
bisected by the azimuth.
use_nugget : bool
Defaults to False. If True, a nugget effet will be added to all
Variogram.models as a third (or fourth) fitting parameter. A
nugget is essentially the y-axis interception of the theoretical
variogram function.
maxlag : float, str
Can specify the maximum lag distance directly by giving a value
larger than 1. The binning function will not find any lag class
with an edge larger than maxlag. If 0 < maxlag < 1, then maxlag
is relative and maxlag * max(Variogram.distance) will be used.
In case maxlag is a string it has to be one of 'median', 'mean'.
Then the median or mean of all Variogram.distance will be used.
Note maxlag=0.5 will use half the maximum separating distance,
this is not the same as 'median', which is the median of all
separating distances
n_lags : int
Specify the number of lag classes to be defined by the binning
function.
verbose : bool
Set the Verbosity of the class. Not Implemented yet.
Keyword Arguments
-----------------
entropy_bins : int, str
.. versionadded:: 0.3.7
If the `estimator <skgstat.Variogram.estimator>` is set to
`'entropy'` this argument sets the number of bins, that should be
used for histogram calculation.
percentile : int
.. versionadded:: 0.3.7
If the `estimator <skgstat.Variogram.estimator>` is set to
`'entropy'` this argument sets the percentile to be used.
"""
# Before we do anything else, make kwargs available
self._kwargs = self._validate_kwargs(**kwargs)
# FIXME: Call __init__ of baseclass?
# No, because the sequence at which the arguments get initialized
# does matter. There is way too much transitive dependence, thus
# it was easiest to copy the init over.
self._direction_mask_cache = None
if not isinstance(coordinates, MetricSpace):
coordinates = np.asarray(coordinates)
coordinates = MetricSpace(coordinates.copy(), dist_func)
# FIXME: Currently _direction_mask / _angles / _euclidean_dist don't get correctly calculated for sparse dspaces
# coordinates = MetricSpace(coordinates.copy(), dist_func, maxlag if maxlag and not isinstance(maxlag, str) and maxlag >= 1 else None)
else:
assert self.dist_func == coordinates.dist_metric, "Distance metric of variogram differs from distance metric of coordinates"
assert coordinates.max_dist is None
# Set coordinates
self._X = coordinates
# pairwise difference
self._diff = None
# set verbosity
self.verbose = verbose
# set values
self._values = None
# calc_diff = False here, because it will be calculated by fit() later
self.set_values(values=values, calc_diff=False)
# distance matrix
self._dist = None
# set distance calculation function
self._dist_func_name = None
self.set_dist_function(func=dist_func)
# Angles and euclidean distances used for direction mask calculation
self._angles = None
self._euclidean_dist = None
# lags and max lag
self.n_lags = n_lags
self._maxlag = None
self.maxlag = maxlag
# estimator can be function or a string
self._estimator = None
self.set_estimator(estimator_name=estimator)
# model can be function or a string
self._model = None
self.set_model(model_name=model)
# azimuth direction
self._azimuth = None
self.azimuth = azimuth
# azimuth tolerance
self._tolerance = None
self.tolerance = tolerance
# tolerance bandwidth
self._bandwidth = None
self.bandwidth = bandwidth
# set the directional model
self._directional_model = None
self.set_directional_model(model_name=directional_model)
# the binning settings
self._bin_func = None
self._groups = None
self._bins = None
self.set_bin_func(bin_func=bin_func)
# specify if the lag should be given absolute or relative to the maxlag
self._normalized = normalize
# set the fitting method and sigma array
self.fit_method = fit_method
self._fit_sigma = None
self.fit_sigma = fit_sigma
# set if nugget effect shall be used
self.use_nugget = use_nugget
# set attributes to be filled during calculation
self.cov = None
self.cof = None
# settings, not reachable by init (not yet)
self._cache_experimental = False
# do the preprocessing and fitting upon initialization
# Note that fit() calls preprocessing
self.fit(force=True)
def preprocessing(self, force=False):
self._calc_direction_mask_data(force)
self._calc_diff(force=force)
self._calc_groups(force=force)
def _calc_direction_mask_data(self, force=False):
r"""
Calculate directional mask data.
For this, the angle between the vector between the two
points, and east (see comment about self.azimuth) is calculated.
The result is stored in self._angles and contains the angle of each
point pair vector to the x-axis in radians.
Parameters
----------
force : bool
If True, a new calculation of all angles is forced, even if they
are already in the cache.
Notes
-----
The masked data is in radias, while azimuth is given in degrees.
For the Vector between a point pair A,B :math:`\overrightarrow{AB}=u` and the
x-axis, represented by vector :math:`\overrightarrow{e} = [1,0]`, the angle
:math:`\Theta` is calculated like:
.. math::
cos(\Theta) = \frac{u \circ e}{|e| \cdot |[1,0]|}
See Also
--------
`azimuth <skgstat.DirectionalVariogram.azimuth>`_
"""
# FIXME: This should be optimized for the sparse case (range << bbox(coordinates)),
# i.e. use the MetricSpace in self._X
# check if already calculated
if self._angles is not None and not force:
return
# if self.coordinates is of just one dimension, concat zeros.
if self.coordinates.ndim == 1:
_x = np.vstack(zip(self.coordinates, np.zeros(len(self.coordinates))))
elif self.coordinates.ndim == 2:
_x = self.coordinates
else:
raise NotImplementedError('N-dimensional coordinates cannot be handled')
# for angles, we need Euklidean distance,
# no matter which distance function is used
# if self._dist_func_name == "euclidean":
# self._euclidean_dist = scipy.spatial.distance.squareform(self.distance_matrix)
# else:
self._euclidean_dist = pdist(_x, "euclidean")
# Calculate the angles
# (a - b).[1,0] = ||a - b|| * ||[1,0]|| * cos(v)
# cos(v) = (a - b).[1,0] / ||a - b||
# cos(v) = (a.[1,0] - b.[1,0]) / ||a - b||
scalar = pdist(np.array([np.dot(_x, [1, 0])]).T, np.subtract)
pos_angles = np.arccos(scalar / self._euclidean_dist)
# cos(v) for [2,1] and [2, -1] is the same,
# but v is not (v vs -v), fix that.
ydiff = pdist(np.array([np.dot(_x, [0, 1])]).T, np.subtract)
# store the angle or negative angle, depending on the
# amount of the x coordinate
self._angles = np.where(ydiff >= 0, pos_angles, -pos_angles)
@property
def azimuth(self):
"""Direction azimuth
Main direction for te selection of points in the formation of point
pairs. East of the coordinate plane is defined to be 0° and then the
azimuth is set clockwise up to 180°and count-clockwise to -180°.
Parameters
----------
angle : float
New azimuth angle in **degree**.
Raises
------
ValueError : in case angle < -180° or angle > 180
"""
return self._azimuth
@azimuth.setter
def azimuth(self, angle):
if angle < -180 or angle > 180:
raise ValueError('The azimuth is an angle in degree and has to '
'meet -180 <= angle <= 180')
else:
self._azimuth = angle
# reset groups and mask cache on azimuth change
self._direction_mask_cache = None
self._groups = None
@property
def tolerance(self):
"""Azimuth tolerance
Tolerance angle of how far a point can be off the azimuth for being
still counted as directional. A tolerance angle will be applied to
the azimuth angle symmetrically.
Parameters
----------
angle : float
New tolerance angle in **degree**. Has to meet 0 <= angle <= 360.
Raises
------
ValueError : in case angle < 0 or angle > 360
"""
return self._tolerance
@tolerance.setter
def tolerance(self, angle):
if angle < 0 or angle > 360:
raise ValueError('The tolerance is an angle in degree and has to '
'meet 0 <= angle <= 360')
else:
self._tolerance = angle
# reset groups and mask on tolerance change
self._direction_mask_cache = None
self._groups = None
@property
def bandwidth(self):
"""Tolerance bandwidth
New bandwidth parameter. As the tolerance from azimuth is given as an
angle, point pairs at high distances can be far off the azimuth in
coordinate distance. The bandwidth limits this distance and has the
unnit of the coordinate system.
Parameters
----------
width : float
Positive coordinate distance.
Raises
------
ValueError : in case width is negative
"""
if self._bandwidth is None:
return 0
else:
return self._bandwidth
@bandwidth.setter
def bandwidth(self, width):
# check if quantiles is given
if isinstance(width, str):
# TODO document and handle more exceptions
q = int(width[1:])
self._bandwidth = np.percentile(self.distance, q)
elif width < 0:
raise ValueError('The bandwidth cannot be negative.')
elif width > np.max(self.distance):
print('The bandwidth is larger than the maximum separating '
'distance. Thus it will have no effect.')
else:
self._bandwidth = width
# reset groups and direction mask cache on bandwidth change
self._direction_mask_cache = None
self._groups = None
def set_directional_model(self, model_name):
"""Set new directional model
The model used for selecting all points fulfilling the
directional constraint of the Variogram. A predefined model
can be selected by passing the model name as string.
Optionally a callable accepting the difference vectors between
points in polar form as angles and distances and returning a
mask array can be passed. In this case, the azimuth, tolerance
and bandwidth has to be incorporated by hand into the model.
The predefined options are:
* 'compass': includes points in the direction of the azimuth at given
tolerance. The bandwidth parameter will be ignored.
* 'triangle': constructs a triangle with an angle of tolerance at the
point of interest and union an rectangle parallel to azimuth,
once the hypotenuse length reaches bandwidth.
* 'circle': constructs a half circle touching the point of interest,
dislocating the center at the distance of bandwidth in the
direction of azimuth. The half circle is union with an rectangle
parallel to azimuth.
Visual representations, usage hints and implementation specifics
are given in the documentation.
Parameters
----------
model_name : string, callable
The name of the predefined model (string) or a function
that accepts angle and distance arrays and returns a mask
array.
"""
# handle predefined models
if isinstance(model_name, str):
if model_name.lower() == 'compass':
self._directional_model = self._compass
elif model_name.lower() == 'triangle':
self._directional_model = self._triangle
elif model_name.lower() == 'circle':
self._directional_model = self._circle
else:
raise ValueError('%s is not a valid model.' % model_name)
# handle callable
elif callable(model_name):
self._directional_model = model_name
else:
raise ValueError('The directional model has to be identified by a '
'model name, or it has to be the search area '
'itself')
# reset the groups as the directional model changed
self._groups = None
@property
def bins(self):
if self._bins is None:
# get the distances
d = self.distance.copy()
d[np.where(~self._direction_mask())] = np.nan
self._bins, n = self.bin_func(d, self._n_lags, self.maxlag)
# if the binning function returned an N, the n_lags need
# to be adjusted directly (not through the setter)
if n is not None:
self._n_lags = n
return self._bins.copy()
def to_gstools(self, *args, **kwargs):
raise NotImplementedError(
"Exporting DirectinalVariogram is currently not supported."
)
def _calc_groups(self, force=False):
super(DirectionalVariogram, self)._calc_groups(force=force)
# set to outside maxlag group
self._groups[np.where(~self._direction_mask())] = -1
# @jit
def _direction_mask(self, force=False):
"""Directional Mask
Array aligned to self.distance masking all point pairs which shall be
ignored for binning and grouping. The one dimensional array contains
all row-wise point pair combinations from the upper or lower triangle
of the distance matrix in case either of both is directional.
Returns
-------
mask : numpy.array
Array aligned to self.distance giving for each point pair
combination a boolean value whether the point are directional or
not.
"""
if force or self._direction_mask_cache is None:
self._direction_mask_cache = self._directional_model(self._angles, self._euclidean_dist)
return self._direction_mask_cache
def pair_field(self, ax=None, cmap="gist_rainbow", points='all', add_points=True, alpha=0.3, **kwargs): # pragma: no cover
"""
Plot a pair field.
Plot a network graph for all point pairs that fulfill the direction
filter and lie within each others search area.
Parameters
----------
ax : matplotlib.Subplot
A matplotlib Axes object to plot the pair field onto.
If ``None``, a new new matplotlib figure will be created.
cmap : string
Any color-map name that is supported by matplotlib
points : 'all', int, list
If not ``'all'``, only the given coordinate (int) or
list of coordinates (list) will be plotted. Recommended, if
the input data is quite large.
add_points : bool
If True (default) The coordinates will be added as black points.
alpha : float
Alpha value for the colors to make overlapping vertices
visualize better. Defaults to ``0.3``.
"""
# get the backend
used_backend = plotting.backend()
if used_backend == 'matplotlib':
return plotting.matplotlib_pair_field(self, ax=ax, cmap=cmap, points=points, add_points=add_points, alpha=alpha, **kwargs)
elif used_backend == 'plotly':
return plotting.plotly_pair_field(self, fig=ax, points=points, add_points=add_points, alpha=alpha, **kwargs)
def _triangle(self, angles, dists):
r"""Triangular Search Area
Construct a triangular bounded search area for building directional
dependent point pairs. The Search Area will be located onto the
current point of interest and the local x-axis is rotated onto the
azimuth angle.
Parameters
----------
angles, dists : numpy.array
Vectors between point pairs in polar form (angle relative
to east in radians, length in coordinate space units)
Returns
-------
mask : numpy.array(bool)
Point pair mask, indexed as the results of
scipy.spatial.distance.pdist are.
Notes
-----
.. code-block:: text
C
/|\
a / | \ a
/__|h_\
A c B
The point of interest is C and c is the bandwidth. The angle at C
(gamma) is the tolerance. From this, a and then h can be calculated.
When rotated into the local coordinate system, the two points needed
to build the search area A,B are A := (h, 1/2 c) and B:= (h, -1/2 c)
a can be calculated like:
.. math::
a = \frac{c}{2 * sin\left(\frac{\gamma}{2}\right)}
See Also
--------
DirectionalVariogram._compass
DirectionalVariogram._circle
"""
absdiff = np.abs(angles + np.radians(self.azimuth))
absdiff = np.where(absdiff > np.pi, absdiff - np.pi, absdiff)
absdiff = np.where(absdiff > np.pi / 2, np.pi - absdiff, absdiff)
in_tol = absdiff <= np.radians(self.tolerance / 2)
in_band = self.bandwidth / 2 >= np.abs(dists * np.sin(np.abs(angles + np.radians(self.azimuth))))
return in_tol & in_band
def _circle(self, angles, dists):
r"""Circular Search Area
Construct a half-circled bounded search area for building directional
dependent point pairs. The Search Area will be located onto the
current point of interest and the local x-axis is rotated onto the
azimuth angle.
The radius of the half-circle is set to half the bandwidth.
Parameters
----------
angles, dists : numpy.array
Vectors between point pairs in polar form (angle relative
to east in radians, length in coordinate space units)
Returns
-------
mask : numpy.array(bool)
Point pair mask, indexed as the results of
scipy.spatial.distance.pdist are.
Raises
------
ValueError : In case the DirectionalVariogram.bandwidth is None or 0.
See Also
--------
DirectionalVariogram._triangle
DirectionalVariogram._compass
"""
raise NotImplementedError
def _compass(self, angles, dists):
r"""Compass direction direction mask
Construct a search area for building directional dependent point
pairs. The compass search area will **not** be bounded by the
bandwidth. It will include all point pairs at the azimuth direction
with a given tolerance. The Search Area will be located onto the
current point of interest and the local x-axis is rotated onto the
azimuth angle.
Parameters
----------
angles, dists : numpy.array
Vectors between point pairs in polar form (angle relative
to east in radians, length in coordinate space units)
Returns
-------
mask : numpy.array(bool)
Point pair mask, indexed as the results of
scipy.spatial.distance.pdist are.
See Also
--------
DirectionalVariogram._triangle
DirectionalVariogram._circle
"""
absdiff = np.abs(angles + np.radians(self.azimuth))
absdiff = np.where(absdiff > np.pi, absdiff - np.pi, absdiff)
absdiff = np.where(absdiff > np.pi / 2, np.pi - absdiff, absdiff)
return absdiff <= np.radians(self.tolerance / 2)
| mit |
pavanvidem/tools-iuc | tools/vsnp/vsnp_build_tables.py | 12 | 14719 | #!/usr/bin/env python
import argparse
import os
import re
import pandas
import pandas.io.formats.excel
from Bio import SeqIO
# Maximum columns allowed in a LibreOffice
# spreadsheet is 1024. Excel allows for
# 16,384 columns, but we'll set the lower
# number as the maximum. Some browsers
# (e.g., Firefox on Linux) are configured
# to use LibreOffice for Excel spreadsheets.
MAXCOLS = 1024
OUTPUT_EXCEL_DIR = 'output_excel_dir'
def annotate_table(table_df, group, annotation_dict):
for gbk_chrome, pro in list(annotation_dict.items()):
ref_pos = list(table_df)
ref_series = pandas.Series(ref_pos)
ref_df = pandas.DataFrame(ref_series.str.split(':', expand=True).values, columns=['reference', 'position'])
all_ref = ref_df[ref_df['reference'] == gbk_chrome]
positions = all_ref.position.to_frame()
# Create an annotation file.
annotation_file = "%s_annotations.csv" % group
with open(annotation_file, "a") as fh:
for _, row in positions.iterrows():
pos = row.position
try:
aaa = pro.iloc[pro.index.get_loc(int(pos))][['chrom', 'locus', 'product', 'gene']]
try:
chrom, name, locus, tag = aaa.values[0]
print("{}:{}\t{}, {}, {}".format(chrom, pos, locus, tag, name), file=fh)
except ValueError:
# If only one annotation for the entire
# chromosome (e.g., flu) then having [0] fails
chrom, name, locus, tag = aaa.values
print("{}:{}\t{}, {}, {}".format(chrom, pos, locus, tag, name), file=fh)
except KeyError:
print("{}:{}\tNo annotated product".format(gbk_chrome, pos), file=fh)
# Read the annotation file into a data frame.
annotations_df = pandas.read_csv(annotation_file, sep='\t', header=None, names=['index', 'annotations'], index_col='index')
# Remove the annotation_file from disk since both
# cascade and sort tables are built using the file,
# and it is opened for writing in append mode.
os.remove(annotation_file)
# Process the data.
table_df_transposed = table_df.T
table_df_transposed.index = table_df_transposed.index.rename('index')
table_df_transposed = table_df_transposed.merge(annotations_df, left_index=True, right_index=True)
table_df = table_df_transposed.T
return table_df
def excel_formatter(json_file_name, excel_file_name, group, annotation_dict):
pandas.io.formats.excel.header_style = None
table_df = pandas.read_json(json_file_name, orient='split')
if annotation_dict is not None:
table_df = annotate_table(table_df, group, annotation_dict)
else:
table_df = table_df.append(pandas.Series(name='no annotations'))
writer = pandas.ExcelWriter(excel_file_name, engine='xlsxwriter')
table_df.to_excel(writer, sheet_name='Sheet1')
writer_book = writer.book
ws = writer.sheets['Sheet1']
format_a = writer_book.add_format({'bg_color': '#58FA82'})
format_g = writer_book.add_format({'bg_color': '#F7FE2E'})
format_c = writer_book.add_format({'bg_color': '#0000FF'})
format_t = writer_book.add_format({'bg_color': '#FF0000'})
format_normal = writer_book.add_format({'bg_color': '#FDFEFE'})
formatlowqual = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'})
format_ambigous = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'})
format_n = writer_book.add_format({'bg_color': '#E2CFDD'})
rows, cols = table_df.shape
ws.set_column(0, 0, 30)
ws.set_column(1, cols, 2.1)
ws.freeze_panes(2, 1)
format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'})
# Set last row.
ws.set_row(rows + 1, cols + 1, format_annotation)
# Make sure that row/column locations don't overlap.
ws.conditional_format(rows - 2, 1, rows - 1, cols, {'type': 'cell', 'criteria': '<', 'value': 55, 'format': formatlowqual})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'cell', 'criteria': '==', 'value': 'B$2', 'format': format_normal})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'A', 'format': format_a})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'G', 'format': format_g})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'C', 'format': format_c})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'T', 'format': format_t})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'S', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'Y', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'R', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'W', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'K', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'M', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'N', 'format': format_n})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': '-', 'format': format_n})
format_rotation = writer_book.add_format({})
format_rotation.set_rotation(90)
for column_num, column_name in enumerate(list(table_df.columns)):
ws.write(0, column_num + 1, column_name, format_rotation)
format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'})
# Set last row.
ws.set_row(rows, 400, format_annotation)
writer.save()
def get_annotation_dict(gbk_file):
gbk_dict = SeqIO.to_dict(SeqIO.parse(gbk_file, "genbank"))
annotation_dict = {}
tmp_file = "features.csv"
# Create a file of chromosomes and features.
for chromosome in list(gbk_dict.keys()):
with open(tmp_file, 'w+') as fh:
for feature in gbk_dict[chromosome].features:
if "CDS" in feature.type or "rRNA" in feature.type:
try:
product = feature.qualifiers['product'][0]
except KeyError:
product = None
try:
locus = feature.qualifiers['locus_tag'][0]
except KeyError:
locus = None
try:
gene = feature.qualifiers['gene'][0]
except KeyError:
gene = None
fh.write("%s\t%d\t%d\t%s\t%s\t%s\n" % (chromosome, int(feature.location.start), int(feature.location.end), locus, product, gene))
# Read the chromosomes and features file into a data frame.
df = pandas.read_csv(tmp_file, sep='\t', names=["chrom", "start", "stop", "locus", "product", "gene"])
# Process the data.
df = df.sort_values(['start', 'gene'], ascending=[True, False])
df = df.drop_duplicates('start')
pro = df.reset_index(drop=True)
pro.index = pandas.IntervalIndex.from_arrays(pro['start'], pro['stop'], closed='both')
annotation_dict[chromosome] = pro
return annotation_dict
def get_sample_name(file_path):
base_file_name = os.path.basename(file_path)
if base_file_name.find(".") > 0:
# Eliminate the extension.
return os.path.splitext(base_file_name)[0]
return base_file_name
def output_cascade_table(cascade_order, mqdf, group, annotation_dict):
cascade_order_mq = pandas.concat([cascade_order, mqdf], join='inner')
output_table(cascade_order_mq, "cascade", group, annotation_dict)
def output_excel(df, type_str, group, annotation_dict, count=None):
# Output the temporary json file that
# is used by the excel_formatter.
if count is None:
if group is None:
json_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_order_mq.json" % type_str)
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_table.xlsx" % type_str)
else:
json_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_order_mq.json" % (group, type_str))
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_table.xlsx" % (group, type_str))
else:
# The table has more columns than is allowed by the
# MAXCOLS setting, so multiple files will be produced
# as an output collection.
if group is None:
json_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_order_mq_%d.json" % (type_str, count))
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_table_%d.xlsx" % (type_str, count))
else:
json_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_order_mq_%d.json" % (group, type_str, count))
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_table_%d.xlsx" % (group, type_str, count))
df.to_json(json_file_name, orient='split')
# Output the Excel file.
excel_formatter(json_file_name, excel_file_name, group, annotation_dict)
def output_sort_table(cascade_order, mqdf, group, annotation_dict):
sort_df = cascade_order.T
sort_df['abs_value'] = sort_df.index
sort_df[['chrom', 'pos']] = sort_df['abs_value'].str.split(':', expand=True)
sort_df = sort_df.drop(['abs_value', 'chrom'], axis=1)
sort_df.pos = sort_df.pos.astype(int)
sort_df = sort_df.sort_values(by=['pos'])
sort_df = sort_df.drop(['pos'], axis=1)
sort_df = sort_df.T
sort_order_mq = pandas.concat([sort_df, mqdf], join='inner')
output_table(sort_order_mq, "sort", group, annotation_dict)
def output_table(df, type_str, group, annotation_dict):
if isinstance(group, str) and group.startswith("dataset"):
# Inputs are single files, not collections,
# so input file names are not useful for naming
# output files.
group_str = None
else:
group_str = group
count = 0
chunk_start = 0
chunk_end = 0
column_count = df.shape[1]
if column_count >= MAXCOLS:
# Here the number of columns is greater than
# the maximum allowed by Excel, so multiple
# outputs will be produced.
while column_count >= MAXCOLS:
count += 1
chunk_end += MAXCOLS
df_of_type = df.iloc[:, chunk_start:chunk_end]
output_excel(df_of_type, type_str, group_str, annotation_dict, count=count)
chunk_start += MAXCOLS
column_count -= MAXCOLS
count += 1
df_of_type = df.iloc[:, chunk_start:]
output_excel(df_of_type, type_str, group_str, annotation_dict, count=count)
else:
output_excel(df, type_str, group_str, annotation_dict)
def preprocess_tables(newick_file, json_file, json_avg_mq_file, annotation_dict):
avg_mq_series = pandas.read_json(json_avg_mq_file, typ='series', orient='split')
# Map quality to dataframe.
mqdf = avg_mq_series.to_frame(name='MQ')
mqdf = mqdf.T
# Get the group.
group = get_sample_name(newick_file)
snps_df = pandas.read_json(json_file, orient='split')
with open(newick_file, 'r') as fh:
for line in fh:
line = re.sub('[:,]', '\n', line)
line = re.sub('[)(]', '', line)
line = re.sub(r'[0-9].*\.[0-9].*\n', '', line)
line = re.sub('root\n', '', line)
sample_order = line.split('\n')
sample_order = list([_f for _f in sample_order if _f])
sample_order.insert(0, 'root')
tree_order = snps_df.loc[sample_order]
# Count number of SNPs in each column.
snp_per_column = []
for column_header in tree_order:
count = 0
column = tree_order[column_header]
for element in column:
if element != column[0]:
count = count + 1
snp_per_column.append(count)
row1 = pandas.Series(snp_per_column, tree_order.columns, name="snp_per_column")
# Count number of SNPS from the
# top of each column in the table.
snp_from_top = []
for column_header in tree_order:
count = 0
column = tree_order[column_header]
# for each element in the column
# skip the first element
for element in column[1:]:
if element == column[0]:
count = count + 1
else:
break
snp_from_top.append(count)
row2 = pandas.Series(snp_from_top, tree_order.columns, name="snp_from_top")
tree_order = tree_order.append([row1])
tree_order = tree_order.append([row2])
# In pandas=0.18.1 even this does not work:
# abc = row1.to_frame()
# abc = abc.T --> tree_order.shape (5, 18), abc.shape (1, 18)
# tree_order.append(abc)
# Continue to get error: "*** ValueError: all the input arrays must have same number of dimensions"
tree_order = tree_order.T
tree_order = tree_order.sort_values(['snp_from_top', 'snp_per_column'], ascending=[True, False])
tree_order = tree_order.T
# Remove snp_per_column and snp_from_top rows.
cascade_order = tree_order[:-2]
# Output the cascade table.
output_cascade_table(cascade_order, mqdf, group, annotation_dict)
# Output the sorted table.
output_sort_table(cascade_order, mqdf, group, annotation_dict)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gbk_file', action='store', dest='gbk_file', required=False, default=None, help='Optional gbk file'),
parser.add_argument('--input_avg_mq_json', action='store', dest='input_avg_mq_json', help='Average MQ json file')
parser.add_argument('--input_newick', action='store', dest='input_newick', help='Newick file')
parser.add_argument('--input_snps_json', action='store', dest='input_snps_json', help='SNPs json file')
args = parser.parse_args()
if args.gbk_file is not None:
# Create the annotation_dict for annotating
# the Excel tables.
annotation_dict = get_annotation_dict(args.gbk_file)
else:
annotation_dict = None
preprocess_tables(args.input_newick, args.input_snps_json, args.input_avg_mq_json, annotation_dict)
| mit |
yipenggao/moose | modules/tensor_mechanics/doc/tests/cosserat_glide.py | 13 | 1867 | #!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected(y):
mu = 2.0
mu_c = 3.0
be = 0.6
bbb = 1.0
we = np.sqrt(2 * mu * mu_c / be / (mu + mu_c))
phi = bbb * np.sinh(we * y)
u = 2 * mu_c * bbb * (1 - np.cosh(we * y)) / we / (mu + mu_c)
m32 = 2 * bbb * be * we * np.cosh(we * y)
si21 = -4 * mu * mu_c / (mu + mu_c) * bbb * np.sinh(we * y)
return (phi, u, m32, si21)
def glide():
f = open("../../tests/static_deformations/gold/cosserat_glide_out_soln_0001.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:12]]
f.close()
return data
ypoints = np.arange(0, 1.05, 0.01)
moosex = [0.1 * i for i in range(11)]
moose = glide()
plt.figure()
plt.plot(ypoints, expected(ypoints)[0], 'k-', linewidth = 1.0, label = 'expected Cosserat rot')
plt.plot(ypoints, expected(ypoints)[1], 'r-', linewidth = 1.0, label = 'expected displacement')
plt.plot(moosex, [d[4] for d in moose], 'ks', markersize = 10.0, label = 'MOOSE Cosserat rot')
plt.plot(moosex, [d[1] for d in moose], 'r^', markersize = 10.0, label = 'MOOSE displacement')
plt.legend(loc = 'center right')
plt.xlabel("y (m)")
plt.ylabel("displacement (m)")
plt.title("Cosserat glide")
plt.savefig("cosserat_glide_disp.pdf")
plt.figure()
plt.plot(ypoints, expected(ypoints)[2], 'k-', linewidth = 1.0, label = 'expected couple stress')
plt.plot(ypoints, expected(ypoints)[3], 'r-', linewidth = 1.0, label = 'expected shear stress')
plt.plot(moosex, [d[0] for d in moose], 'ks', markersize = 10.0, label = 'MOOSE couple stress')
plt.plot(moosex, [d[3] for d in moose], 'r^', markersize = 10.0, label = 'MOOSE shear stress')
plt.legend(loc = 'center right')
plt.xlabel("y (m)")
plt.ylabel("stress (Pa)")
plt.title("Cosserat glide")
plt.savefig("cosserat_glide_stress.pdf")
sys.exit(0)
| lgpl-2.1 |
moutai/scikit-learn | sklearn/cluster/spectral.py | 25 | 18522 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
Where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None, n_jobs=1):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True,
n_jobs=self.n_jobs)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
abimannans/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
FBRTMaka/ooi-ui-services | ooiservices/app/uframe/windrose.py | 5 | 20440 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = '1.4'
__author__ = 'Lionel Roubeyrie'
__mail__ = '[email protected]'
__license__ = 'CeCILL-B'
import matplotlib
import matplotlib.cm as cm
import numpy as np
from matplotlib.patches import Rectangle, Polygon
from matplotlib.ticker import ScalarFormatter, AutoLocator
from matplotlib.text import Text, FontProperties
from matplotlib.projections.polar import PolarAxes
from numpy.lib.twodim_base import histogram2d
import matplotlib.pyplot as plt
from pylab import poly_between
RESOLUTION = 100
ZBASE = -1000 #The starting zorder for all drawing, negative to have the grid on
class WindroseAxes(PolarAxes):
"""
Create a windrose axes
"""
def __init__(self, *args, **kwargs):
"""
See Axes base class for args and kwargs documentation
"""
#Uncomment to have the possibility to change the resolution directly
#when the instance is created
#self.RESOLUTION = kwargs.pop('resolution', 100)
PolarAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.radii_angle = 67.5
self.cla()
def cla(self):
"""
Clear the current axes
"""
PolarAxes.cla(self)
self.theta_angles = np.arange(0, 360, 45)
self.theta_labels = ['E', 'N-E', 'N', 'N-W', 'W', 'S-W', 'S', 'S-E']
self.set_thetagrids(angles=self.theta_angles, labels=self.theta_labels)
self._info = {'dir' : list(),
'bins' : list(),
'table' : list()}
self.patches_list = list()
def _colors(self, cmap, n):
'''
Returns a list of n colors based on the colormap cmap
'''
return [cmap(i) for i in np.linspace(0.2, 1.0, n)]
def set_radii_angle(self, **kwargs):
"""
Set the radii labels angle
"""
null = kwargs.pop('labels', None)
angle = kwargs.pop('angle', None)
if angle is None:
angle = self.radii_angle
self.radii_angle = angle
radii = np.linspace(0.1, self.get_rmax(), 6)
radii_labels = ["%.1f%%" % r for r in radii]
radii_labels[0] = "" #Removing label 0
null = self.set_rgrids(radii=radii, labels=radii_labels,
angle=self.radii_angle, **kwargs)
def _update(self):
self.set_rmax(rmax=np.max(np.sum(self._info['table'], axis=0)))
self.set_radii_angle(angle=self.radii_angle)
def legend(self, loc='lower left', **kwargs):
"""
Sets the legend location and her properties.
The location codes are
'best' : 0,
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
If none of these are suitable, loc can be a 2-tuple giving x,y
in axes coords, ie,
loc = (0, 1) is left top
loc = (0.5, 0.5) is center, center
and so on. The following kwargs are supported:
isaxes=True # whether this is an axes legend
prop = FontProperties(size='smaller') # the font property
pad = 0.2 # the fractional whitespace inside the legend border
shadow # if True, draw a shadow behind legend
labelsep = 0.005 # the vertical space between the legend entries
handlelen = 0.05 # the length of the legend lines
handletextsep = 0.02 # the space between the legend line and legend text
axespad = 0.02 # the border between the axes and legend edge
"""
def get_handles():
handles = list()
for p in self.patches_list:
if isinstance(p, matplotlib.patches.Polygon) or \
isinstance(p, matplotlib.patches.Rectangle):
color = p.get_facecolor()
elif isinstance(p, matplotlib.lines.Line2D):
color = p.get_color()
else:
raise AttributeError("Can't handle patches")
handles.append(Rectangle((0, 0), 0.2, 0.2,
facecolor=color, edgecolor='black'))
return handles
def get_labels():
labels = np.copy(self._info['bins'])
labels = ["[%.1f : %0.1f]" %(labels[i], labels[i+1]) \
for i in range(len(labels)-1)]
return labels
null = kwargs.pop('labels', None)
null = kwargs.pop('handles', None)
handles = get_handles()
labels = get_labels()
self.legend_ = matplotlib.legend.Legend(self, handles, labels,
loc, **kwargs)
return self.legend_
def _init_plot(self, dir, var, **kwargs):
"""
Internal method used by all plotting commands
"""
#self.cla()
null = kwargs.pop('zorder', None)
#Init of the bins array if not set
bins = kwargs.pop('bins', None)
if bins is None:
bins = np.linspace(np.min(var), np.max(var), 6)
if isinstance(bins, int):
bins = np.linspace(np.min(var), np.max(var), bins)
bins = np.asarray(bins)
nbins = len(bins)
#Number of sectors
nsector = kwargs.pop('nsector', None)
if nsector is None:
nsector = 16
#Sets the colors table based on the colormap or the "colors" argument
colors = kwargs.pop('colors', None)
cmap = kwargs.pop('cmap', None)
if colors is not None:
if isinstance(colors, str):
colors = [colors]*nbins
if isinstance(colors, (tuple, list)):
if len(colors) != nbins:
raise ValueError("colors and bins must have same length")
else:
if cmap is None:
cmap = cm.jet
colors = self._colors(cmap, nbins)
#Building the angles list
angles = np.arange(0, -2*np.pi, -2*np.pi/nsector) + np.pi/2
normed = kwargs.pop('normed', False)
blowto = kwargs.pop('blowto', False)
#Set the global information dictionnary
self._info['dir'], self._info['bins'], self._info['table'] = histogram(dir, var, bins, nsector, normed, blowto)
return bins, nbins, nsector, colors, angles, kwargs
def contour(self, dir, var, **kwargs):
"""
Plot a windrose in linear mode. For each var bins, a line will be
draw on the axes, a segment between each sector (center to center).
Each line can be formated (color, width, ...) like with standard plot
pylab command.
Mandatory:
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6, then
bins=linspace(min(var), max(var), 6)
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
others kwargs : see help(pylab.plot)
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(dir, var,
**kwargs)
#closing lines
angles = np.hstack((angles, angles[-1]-2*np.pi/nsector))
vals = np.hstack((self._info['table'],
np.reshape(self._info['table'][:,0],
(self._info['table'].shape[0], 1))))
offset = 0
for i in range(nbins):
val = vals[i,:] + offset
offset += vals[i, :]
zorder = ZBASE + nbins - i
patch = self.plot(angles, val, color=colors[i], zorder=zorder,
**kwargs)
self.patches_list.extend(patch)
self._update()
def contourf2(self, dir, var, **kwargs):
"""
Plot a windrose in filled mode. For each var bins, a line will be
draw on the axes, a segment between each sector (center to center).
Each line can be formated (color, width, ...) like with standard plot
pylab command.
Mandatory:
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6, then
bins=linspace(min(var), max(var), 6)
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
others kwargs : see help(pylab.plot)
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(dir, var,
**kwargs)
null = kwargs.pop('facecolor', None)
null = kwargs.pop('edgecolor', None)
#closing lines
angles = np.hstack((angles, angles[-1]-2*np.pi/nsector))
vals = np.hstack((self._info['table'],
np.reshape(self._info['table'][:,0],
(self._info['table'].shape[0], 1))))
offset = 0
for i in range(nbins):
val = vals[i,:] + offset
offset += vals[i, :]
zorder = ZBASE + nbins - i
xs, ys = poly_between(angles, 0, val)
patch = self.fill(xs, ys, facecolor=colors[i],
edgecolor=colors[i], zorder=zorder, **kwargs)
self.patches_list.extend(patch)
def bar(self, dir, var, **kwargs):
"""
Plot a windrose in bar mode. For each var bins and for each sector,
a colored bar will be draw on the axes.
Mandatory:
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6 between min(var) and max(var).
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
edgecolor : string - The string color each edge bar will be plotted.
Default : no edgecolor
* opening : float - between 0.0 and 1.0, to control the space between
each sector (1.0 for no space)
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(dir, var,
**kwargs)
null = kwargs.pop('facecolor', None)
edgecolor = kwargs.pop('edgecolor', None)
if edgecolor is not None:
if not isinstance(edgecolor, str):
raise ValueError('edgecolor must be a string color')
opening = kwargs.pop('opening', None)
if opening is None:
opening = 0.8
dtheta = 2*np.pi/nsector
opening = dtheta*opening
for j in range(nsector):
offset = 0
for i in range(nbins):
if i > 0:
offset += self._info['table'][i-1, j]
val = self._info['table'][i, j]
zorder = ZBASE + nbins - i
patch = Rectangle((angles[j]-opening/2, offset), opening, val,
facecolor=colors[i], edgecolor=edgecolor, zorder=zorder,
**kwargs)
self.add_patch(patch)
if j == 0:
self.patches_list.append(patch)
self._update()
def box(self, dir, var, **kwargs):
"""
Plot a windrose in proportional bar mode. For each var bins and for each
sector, a colored bar will be draw on the axes.
Mandatory:
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6 between min(var) and max(var).
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
edgecolor : string - The string color each edge bar will be plotted.
Default : no edgecolor
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(dir, var,
**kwargs)
null = kwargs.pop('facecolor', None)
edgecolor = kwargs.pop('edgecolor', None)
if edgecolor is not None:
if not isinstance(edgecolor, str):
raise ValueError('edgecolor must be a string color')
opening = np.linspace(0.0, np.pi/16, nbins)
for j in range(nsector):
offset = 0
for i in range(nbins):
if i > 0:
offset += self._info['table'][i-1, j]
val = self._info['table'][i, j]
zorder = ZBASE + nbins - i
patch = Rectangle((angles[j]-opening[i]/2, offset), opening[i],
val, facecolor=colors[i], edgecolor=edgecolor,
zorder=zorder, **kwargs)
self.add_patch(patch)
if j == 0:
self.patches_list.append(patch)
self._update()
def histogram(dir, var, bins, nsector, normed=False, blowto=False):
"""
Returns an array where, for each sector of wind
(centred on the north), we have the number of time the wind comes with a
particular var (speed, polluant concentration, ...).
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
* bins : list - list of var category against we're going to compute the table
* nsector : integer - number of sectors
* normed : boolean - The resulting table is normed in percent or not.
* blowto : boolean - Normaly a windrose is computed with directions
as wind blows from. If true, the table will be reversed (usefull for
pollutantrose)
"""
if len(var) != len(dir):
raise ValueError, "var and dir must have same length"
angle = 360./nsector
dir_bins = np.arange(-angle/2 ,360.+angle, angle, dtype=np.float)
dir_edges = dir_bins.tolist()
dir_edges.pop(-1)
dir_edges[0] = dir_edges.pop(-1)
dir_bins[0] = 0.
var_bins = bins.tolist()
var_bins.append(np.inf)
if blowto:
dir = dir + 180.
dir[dir>=360.] = dir[dir>=360.] - 360
table = histogram2d(x=var, y=dir, bins=[var_bins, dir_bins],
normed=False)[0]
# add the last value to the first to have the table of North winds
table[:,0] = table[:,0] + table[:,-1]
# and remove the last col
table = table[:, :-1]
if normed:
table = table*100/table.sum()
return dir_edges, var_bins, table
def wrcontour(dir, var, **kwargs):
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)
fig.add_axes(ax)
ax.contour(dir, var, **kwargs)
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
plt.draw()
plt.show()
return ax
def wrcontourf(dir, var, **kwargs):
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)
fig.add_axes(ax)
ax.contourf(dir, var, **kwargs)
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
plt.draw()
plt.show()
return ax
def wrbox(dir, var, **kwargs):
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)
fig.add_axes(ax)
ax.box(dir, var, **kwargs)
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
plt.draw()
plt.show()
return ax
def wrbar(dir, var, **kwargs):
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)
fig.add_axes(ax)
ax.bar(dir, var, **kwargs)
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
plt.draw()
plt.show()
return ax
def clean(dir, var):
'''
Remove masked values in the two arrays, where if a direction data is masked,
the var data will also be removed in the cleaning process (and vice-versa)
'''
dirmask = dir.mask==False
varmask = var.mask==False
ind = dirmask*varmask
return dir[ind], var[ind]
if __name__=='__main__':
from pylab import figure, show, setp, random, grid, draw
vv=random(500)*6
dv=random(500)*360
fig = figure(figsize=(8, 8), dpi=80, facecolor='w', edgecolor='w')
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect, axisbg='w')
fig.add_axes(ax)
# ax.contourf(dv, vv, bins=np.arange(0,8,1), cmap=cm.hot)
# ax.contour(dv, vv, bins=np.arange(0,8,1), colors='k')
# ax.bar(dv, vv, normed=True, opening=0.8, edgecolor='white')
ax.box(dv, vv, normed=True)
l = ax.legend(axespad=-0.10)
setp(l.get_texts(), fontsize=8)
draw()
#print ax._info
show()
| apache-2.0 |
wanghaven/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/rcsetup.py | 69 | 23344 | """
The rcsetup module contains the default values and the validation code for
customization using matplotlib's rc settings.
Each rc setting is assigned a default value and a function used to validate any
attempted changes to that setting. The default values and validation functions
are defined in the rcsetup module, and are used to construct the rcParams global
object which stores the settings and is referenced throughout matplotlib.
These default values should be consistent with the default matplotlibrc file
that actually reflects the values given here. Any additions or deletions to the
parameter set listed here should also be visited to the
:file:`matplotlibrc.template` in matplotlib's root source directory.
"""
import os
import warnings
from matplotlib.fontconfig_pattern import parse_fontconfig_pattern
from matplotlib.colors import is_color_like
#interactive_bk = ['gtk', 'gtkagg', 'gtkcairo', 'fltkagg', 'qtagg', 'qt4agg',
# 'tkagg', 'wx', 'wxagg', 'cocoaagg']
# The capitalized forms are needed for ipython at present; this may
# change for later versions.
interactive_bk = ['GTK', 'GTKAgg', 'GTKCairo', 'FltkAgg', 'MacOSX',
'QtAgg', 'Qt4Agg', 'TkAgg', 'WX', 'WXAgg', 'CocoaAgg']
non_interactive_bk = ['agg', 'cairo', 'emf', 'gdk',
'pdf', 'ps', 'svg', 'template']
all_backends = interactive_bk + non_interactive_bk
class ValidateInStrings:
def __init__(self, key, valid, ignorecase=False):
'valid is a list of legal strings'
self.key = key
self.ignorecase = ignorecase
def func(s):
if ignorecase: return s.lower()
else: return s
self.valid = dict([(func(k),k) for k in valid])
def __call__(self, s):
if self.ignorecase: s = s.lower()
if s in self.valid: return self.valid[s]
raise ValueError('Unrecognized %s string "%s": valid strings are %s'
% (self.key, s, self.valid.values()))
def validate_path_exists(s):
'If s is a path, return s, else False'
if os.path.exists(s): return s
else:
raise RuntimeError('"%s" should be a path but it does not exist'%s)
def validate_bool(b):
'Convert b to a boolean or raise'
if type(b) is str:
b = b.lower()
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_bool_maybe_none(b):
'Convert b to a boolean or raise'
if type(b) is str:
b = b.lower()
if b=='none': return None
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_float(s):
'convert s to float or raise'
try: return float(s)
except ValueError:
raise ValueError('Could not convert "%s" to float' % s)
def validate_int(s):
'convert s to int or raise'
try: return int(s)
except ValueError:
raise ValueError('Could not convert "%s" to int' % s)
def validate_fonttype(s):
'confirm that this is a Postscript of PDF font type that we know how to convert to'
fonttypes = { 'type3': 3,
'truetype': 42 }
try:
fonttype = validate_int(s)
except ValueError:
if s.lower() in fonttypes.keys():
return fonttypes[s.lower()]
raise ValueError('Supported Postscript/PDF font types are %s' % fonttypes.keys())
else:
if fonttype not in fonttypes.values():
raise ValueError('Supported Postscript/PDF font types are %s' % fonttypes.values())
return fonttype
#validate_backend = ValidateInStrings('backend', all_backends, ignorecase=True)
_validate_standard_backends = ValidateInStrings('backend', all_backends, ignorecase=True)
def validate_backend(s):
if s.startswith('module://'): return s
else: return _validate_standard_backends(s)
validate_numerix = ValidateInStrings('numerix',[
'Numeric','numarray','numpy',
], ignorecase=True)
validate_toolbar = ValidateInStrings('toolbar',[
'None','classic','toolbar2',
], ignorecase=True)
def validate_autolayout(v):
if v:
warnings.warn("figure.autolayout is not currently supported")
class validate_nseq_float:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n floats or raise'
if type(s) is str:
ss = s.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [float(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to floats')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [float(val) for val in s]
class validate_nseq_int:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n ints or raise'
if type(s) is str:
ss = s.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [int(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to ints')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [int(val) for val in s]
def validate_color(s):
'return a valid color arg'
if s.lower() == 'none':
return 'None'
if is_color_like(s):
return s
stmp = '#' + s
if is_color_like(stmp):
return stmp
# If it is still valid, it must be a tuple.
colorarg = s
msg = ''
if s.find(',')>=0:
# get rid of grouping symbols
stmp = ''.join([ c for c in s if c.isdigit() or c=='.' or c==','])
vals = stmp.split(',')
if len(vals)!=3:
msg = '\nColor tuples must be length 3'
else:
try:
colorarg = [float(val) for val in vals]
except ValueError:
msg = '\nCould not convert all entries to floats'
if not msg and is_color_like(colorarg):
return colorarg
raise ValueError('%s does not look like a color arg%s'%(s, msg))
def validate_stringlist(s):
'return a list'
if type(s) is str:
return [ v.strip() for v in s.split(',') ]
else:
assert type(s) in [list,tuple]
return [ str(v) for v in s ]
validate_orientation = ValidateInStrings('orientation',[
'landscape', 'portrait',
])
def validate_aspect(s):
if s in ('auto', 'equal'):
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid aspect specification')
def validate_fontsize(s):
if type(s) is str:
s = s.lower()
if s in ['xx-small', 'x-small', 'small', 'medium', 'large', 'x-large',
'xx-large', 'smaller', 'larger']:
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid font size')
def validate_font_properties(s):
parse_fontconfig_pattern(s)
return s
validate_fontset = ValidateInStrings('fontset', ['cm', 'stix', 'stixsans', 'custom'])
validate_verbose = ValidateInStrings('verbose',[
'silent', 'helpful', 'debug', 'debug-annoying',
])
validate_cairo_format = ValidateInStrings('cairo_format',
['png', 'ps', 'pdf', 'svg'],
ignorecase=True)
validate_ps_papersize = ValidateInStrings('ps_papersize',[
'auto', 'letter', 'legal', 'ledger',
'a0', 'a1', 'a2','a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10',
'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'b10',
], ignorecase=True)
def validate_ps_distiller(s):
if type(s) is str:
s = s.lower()
if s in ('none',None):
return None
elif s in ('false', False):
return False
elif s in ('ghostscript', 'xpdf'):
return s
else:
raise ValueError('matplotlibrc ps.usedistiller must either be none, ghostscript or xpdf')
validate_joinstyle = ValidateInStrings('joinstyle',['miter', 'round', 'bevel'], ignorecase=True)
validate_capstyle = ValidateInStrings('capstyle',['butt', 'round', 'projecting'], ignorecase=True)
validate_negative_linestyle = ValidateInStrings('negative_linestyle',['solid', 'dashed'], ignorecase=True)
def validate_negative_linestyle_legacy(s):
try:
res = validate_negative_linestyle(s)
return res
except ValueError:
dashes = validate_nseq_float(2)(s)
warnings.warn("Deprecated negative_linestyle specification; use 'solid' or 'dashed'")
return (0, dashes) # (offset, (solid, blank))
validate_legend_loc = ValidateInStrings('legend_loc',[
'best',
'upper right',
'upper left',
'lower left',
'lower right',
'right',
'center left',
'center right',
'lower center',
'upper center',
'center',
], ignorecase=True)
class ValidateInterval:
"""
Value must be in interval
"""
def __init__(self, vmin, vmax, closedmin=True, closedmax=True):
self.vmin = vmin
self.vmax = vmax
self.cmin = closedmin
self.cmax = closedmax
def __call__(self, s):
try: s = float(s)
except: raise RuntimeError('Value must be a float; found "%s"'%s)
if self.cmin and s<self.vmin:
raise RuntimeError('Value must be >= %f; found "%f"'%(self.vmin, s))
elif not self.cmin and s<=self.vmin:
raise RuntimeError('Value must be > %f; found "%f"'%(self.vmin, s))
if self.cmax and s>self.vmax:
raise RuntimeError('Value must be <= %f; found "%f"'%(self.vmax, s))
elif not self.cmax and s>=self.vmax:
raise RuntimeError('Value must be < %f; found "%f"'%(self.vmax, s))
return s
# a map from key -> value, converter
defaultParams = {
'backend' : ['Agg', validate_backend], # agg is certainly present
'backend_fallback' : [True, validate_bool], # agg is certainly present
'numerix' : ['numpy', validate_numerix],
'maskedarray' : [False, validate_bool],
'toolbar' : ['toolbar2', validate_toolbar],
'datapath' : [None, validate_path_exists], # handled by _get_data_path_cached
'units' : [False, validate_bool],
'interactive' : [False, validate_bool],
'timezone' : ['UTC', str],
# the verbosity setting
'verbose.level' : ['silent', validate_verbose],
'verbose.fileo' : ['sys.stdout', str],
# line props
'lines.linewidth' : [1.0, validate_float], # line width in points
'lines.linestyle' : ['-', str], # solid line
'lines.color' : ['b', validate_color], # blue
'lines.marker' : ['None', str], # black
'lines.markeredgewidth' : [0.5, validate_float],
'lines.markersize' : [6, validate_float], # markersize, in points
'lines.antialiased' : [True, validate_bool], # antialised (no jaggies)
'lines.dash_joinstyle' : ['miter', validate_joinstyle],
'lines.solid_joinstyle' : ['miter', validate_joinstyle],
'lines.dash_capstyle' : ['butt', validate_capstyle],
'lines.solid_capstyle' : ['projecting', validate_capstyle],
# patch props
'patch.linewidth' : [1.0, validate_float], # line width in points
'patch.edgecolor' : ['k', validate_color], # black
'patch.facecolor' : ['b', validate_color], # blue
'patch.antialiased' : [True, validate_bool], # antialised (no jaggies)
# font props
'font.family' : ['sans-serif', str], # used by text object
'font.style' : ['normal', str], #
'font.variant' : ['normal', str], #
'font.stretch' : ['normal', str], #
'font.weight' : ['normal', str], #
'font.size' : [12.0, validate_float], #
'font.serif' : [['Bitstream Vera Serif', 'DejaVu Serif',
'New Century Schoolbook', 'Century Schoolbook L',
'Utopia', 'ITC Bookman', 'Bookman',
'Nimbus Roman No9 L','Times New Roman',
'Times','Palatino','Charter','serif'],
validate_stringlist],
'font.sans-serif' : [['Bitstream Vera Sans', 'DejaVu Sans',
'Lucida Grande', 'Verdana', 'Geneva', 'Lucid',
'Arial', 'Helvetica', 'Avant Garde', 'sans-serif'],
validate_stringlist],
'font.cursive' : [['Apple Chancery','Textile','Zapf Chancery',
'Sand','cursive'], validate_stringlist],
'font.fantasy' : [['Comic Sans MS','Chicago','Charcoal','Impact'
'Western','fantasy'], validate_stringlist],
'font.monospace' : [['Bitstream Vera Sans Mono', 'DejaVu Sans Mono',
'Andale Mono', 'Nimbus Mono L', 'Courier New',
'Courier','Fixed', 'Terminal','monospace'],
validate_stringlist],
# text props
'text.color' : ['k', validate_color], # black
'text.usetex' : [False, validate_bool],
'text.latex.unicode' : [False, validate_bool],
'text.latex.preamble' : [[''], validate_stringlist],
'text.dvipnghack' : [None, validate_bool_maybe_none],
'text.fontstyle' : ['normal', str],
'text.fontangle' : ['normal', str],
'text.fontvariant' : ['normal', str],
'text.fontweight' : ['normal', str],
'text.fontsize' : ['medium', validate_fontsize],
'mathtext.cal' : ['cursive', validate_font_properties],
'mathtext.rm' : ['serif', validate_font_properties],
'mathtext.tt' : ['monospace', validate_font_properties],
'mathtext.it' : ['serif:italic', validate_font_properties],
'mathtext.bf' : ['serif:bold', validate_font_properties],
'mathtext.sf' : ['sans\-serif', validate_font_properties],
'mathtext.fontset' : ['cm', validate_fontset],
'mathtext.fallback_to_cm' : [True, validate_bool],
'image.aspect' : ['equal', validate_aspect], # equal, auto, a number
'image.interpolation' : ['bilinear', str],
'image.cmap' : ['jet', str], # one of gray, jet, etc
'image.lut' : [256, validate_int], # lookup table
'image.origin' : ['upper', str], # lookup table
'image.resample' : [False, validate_bool],
'contour.negative_linestyle' : ['dashed', validate_negative_linestyle_legacy],
# axes props
'axes.axisbelow' : [False, validate_bool],
'axes.hold' : [True, validate_bool],
'axes.facecolor' : ['w', validate_color], # background color; white
'axes.edgecolor' : ['k', validate_color], # edge color; black
'axes.linewidth' : [1.0, validate_float], # edge linewidth
'axes.titlesize' : ['large', validate_fontsize], # fontsize of the axes title
'axes.grid' : [False, validate_bool], # display grid or not
'axes.labelsize' : ['medium', validate_fontsize], # fontsize of the x any y labels
'axes.labelcolor' : ['k', validate_color], # color of axis label
'axes.formatter.limits' : [[-7, 7], validate_nseq_int(2)],
# use scientific notation if log10
# of the axis range is smaller than the
# first or larger than the second
'axes.unicode_minus' : [True, validate_bool],
'polaraxes.grid' : [True, validate_bool], # display polar grid or not
#legend properties
'legend.fancybox' : [False,validate_bool],
'legend.loc' : ['upper right',validate_legend_loc], # at some point, this should be changed to 'best'
'legend.isaxes' : [True,validate_bool], # this option is internally ignored - it never served any useful purpose
'legend.numpoints' : [2, validate_int], # the number of points in the legend line
'legend.fontsize' : ['large', validate_fontsize],
'legend.pad' : [0, validate_float], # was 0.2, deprecated; the fractional whitespace inside the legend border
'legend.borderpad' : [0.4, validate_float], # units are fontsize
'legend.markerscale' : [1.0, validate_float], # the relative size of legend markers vs. original
# the following dimensions are in axes coords
'legend.labelsep' : [0.010, validate_float], # the vertical space between the legend entries
'legend.handlelen' : [0.05, validate_float], # the length of the legend lines
'legend.handletextsep' : [0.02, validate_float], # the space between the legend line and legend text
'legend.axespad' : [0.02, validate_float], # the border between the axes and legend edge
'legend.shadow' : [False, validate_bool],
'legend.labelspacing' : [0.5, validate_float], # the vertical space between the legend entries
'legend.handlelength' : [2., validate_float], # the length of the legend lines
'legend.handletextpad' : [.8, validate_float], # the space between the legend line and legend text
'legend.borderaxespad' : [0.5, validate_float], # the border between the axes and legend edge
'legend.columnspacing' : [2., validate_float], # the border between the axes and legend edge
'legend.markerscale' : [1.0, validate_float], # the relative size of legend markers vs. original
# the following dimensions are in axes coords
'legend.labelsep' : [0.010, validate_float], # the vertical space between the legend entries
'legend.handlelen' : [0.05, validate_float], # the length of the legend lines
'legend.handletextsep' : [0.02, validate_float], # the space between the legend line and legend text
'legend.axespad' : [0.5, validate_float], # the border between the axes and legend edge
'legend.shadow' : [False, validate_bool],
# tick properties
'xtick.major.size' : [4, validate_float], # major xtick size in points
'xtick.minor.size' : [2, validate_float], # minor xtick size in points
'xtick.major.pad' : [4, validate_float], # distance to label in points
'xtick.minor.pad' : [4, validate_float], # distance to label in points
'xtick.color' : ['k', validate_color], # color of the xtick labels
'xtick.labelsize' : ['medium', validate_fontsize], # fontsize of the xtick labels
'xtick.direction' : ['in', str], # direction of xticks
'ytick.major.size' : [4, validate_float], # major ytick size in points
'ytick.minor.size' : [2, validate_float], # minor ytick size in points
'ytick.major.pad' : [4, validate_float], # distance to label in points
'ytick.minor.pad' : [4, validate_float], # distance to label in points
'ytick.color' : ['k', validate_color], # color of the ytick labels
'ytick.labelsize' : ['medium', validate_fontsize], # fontsize of the ytick labels
'ytick.direction' : ['in', str], # direction of yticks
'grid.color' : ['k', validate_color], # grid color
'grid.linestyle' : [':', str], # dotted
'grid.linewidth' : [0.5, validate_float], # in points
# figure props
# figure size in inches: width by height
'figure.figsize' : [ [8.0,6.0], validate_nseq_float(2)],
'figure.dpi' : [ 80, validate_float], # DPI
'figure.facecolor' : [ '0.75', validate_color], # facecolor; scalar gray
'figure.edgecolor' : [ 'w', validate_color], # edgecolor; white
'figure.autolayout' : [ False, validate_autolayout],
'figure.subplot.left' : [0.125, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.right' : [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.bottom' : [0.1, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.top' : [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.wspace' : [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)],
'figure.subplot.hspace' : [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)],
'savefig.dpi' : [100, validate_float], # DPI
'savefig.facecolor' : ['w', validate_color], # facecolor; white
'savefig.edgecolor' : ['w', validate_color], # edgecolor; white
'savefig.orientation' : ['portrait', validate_orientation], # edgecolor; white
'cairo.format' : ['png', validate_cairo_format],
'tk.window_focus' : [False, validate_bool], # Maintain shell focus for TkAgg
'tk.pythoninspect' : [False, validate_bool], # Set PYTHONINSPECT
'ps.papersize' : ['letter', validate_ps_papersize], # Set the papersize/type
'ps.useafm' : [False, validate_bool], # Set PYTHONINSPECT
'ps.usedistiller' : [False, validate_ps_distiller], # use ghostscript or xpdf to distill ps output
'ps.distiller.res' : [6000, validate_int], # dpi
'ps.fonttype' : [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'pdf.compression' : [6, validate_int], # compression level from 0 to 9; 0 to disable
'pdf.inheritcolor' : [False, validate_bool], # ignore any color-setting commands from the frontend
'pdf.use14corefonts' : [False, validate_bool], # use only the 14 PDF core fonts
# embedded in every PDF viewing application
'pdf.fonttype' : [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'svg.image_inline' : [True, validate_bool], # write raster image data directly into the svg file
'svg.image_noscale' : [False, validate_bool], # suppress scaling of raster data embedded in SVG
'svg.embed_char_paths' : [True, validate_bool], # True to save all characters as paths in the SVG
'docstring.hardcopy' : [False, validate_bool], # set this when you want to generate hardcopy docstring
'plugins.directory' : ['.matplotlib_plugins', str], # where plugin directory is locate
'path.simplify' : [False, validate_bool],
'agg.path.chunksize' : [0, validate_int] # 0 to disable chunking;
# recommend about 20000 to
# enable. Experimental.
}
if __name__ == '__main__':
rc = defaultParams
rc['datapath'][0] = '/'
for key in rc:
if not rc[key][1](rc[key][0]) == rc[key][0]:
print "%s: %s != %s"%(key, rc[key][1](rc[key][0]), rc[key][0])
| agpl-3.0 |
KarlTDebiec/Moldynplot | moldynplot/dataset/IREDTimeSeriesDataset.py | 2 | 7363 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# moldynplot.dataset.IREDTimeSeriesDataset.py
#
# Copyright (C) 2015-2017 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Represents iRED NMR relaxation data as a function of time and residue number
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
if __name__ == "__main__":
__package__ = str("moldynplot.dataset")
import moldynplot.dataset
from IPython import embed
import h5py
import numpy as np
import pandas as pd
import six
from .IREDDataset import IREDDataset
from .TimeSeriesDataset import TimeSeriesDataset
from ..myplotspec.Dataset import Dataset
from ..myplotspec import wiprint
################################### CLASSES ###################################
class IREDTimeSeriesDataset(TimeSeriesDataset, IREDDataset):
"""
Represents iRED NMR relaxation data as a function of time and
residue number
"""
@staticmethod
def construct_argparser(parser_or_subparsers=None, **kwargs):
"""
Adds arguments to an existing argument parser, constructs a
subparser, or constructs a new parser
Arguments:
parser_or_subparsers (ArgumentParser, _SubParsersAction,
optional): If ArgumentParser, existing parser to which
arguments will be added; if _SubParsersAction, collection of
subparsers to which a new argument parser will be added; if
None, a new argument parser will be generated
kwargs (dict): Additional keyword arguments
Returns:
ArgumentParser: Argument parser or subparser
"""
import argparse
# Process arguments
help_message = """Process NMR relaxation data calculated from MD
simulation using the iRED method as implemented in cpptraj"""
if isinstance(parser_or_subparsers, argparse.ArgumentParser):
parser = parser_or_subparsers
elif isinstance(parser_or_subparsers, argparse._SubParsersAction):
parser = parser_or_subparsers.add_parser(name="ired",
description=help_message, help=help_message)
elif parser_or_subparsers is None:
parser = argparse.ArgumentParser(description=help_message)
# Defaults
if parser.get_default("cls") is None:
parser.set_defaults(cls=IREDTimeSeriesDataset)
# Arguments unique to this class
# Arguments inherited from superclass
IREDDataset.construct_argparser(parser)
TimeSeriesDataset.construct_argparser(parser)
return parser
@staticmethod
def concatenate_timeseries(timeseries_dfs=None, relax_dfs=None,
order_dfs=None, **kwargs):
"""
Concatenates a series of iRED datasets.
Arguments:
timeseries_dfs (list): DataFrames containing data from
timeseries infiles
relax_dfs (list): DataFrames containing data from relax infiles
order_dfs (list): DataFrames containing data from order infiles
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
Returns:
DataFrame: Concatenated DataFrame including relax and order
"""
# Process arguments
verbose = kwargs.get("verbose", 1)
# Process timeseries
if len(timeseries_dfs) >= 1:
if verbose >= 1:
wiprint("""Concatenating timeseries from {0} timeseries
infiles""".format(len(timeseries_dfs)))
timeseries_df = pd.concat(timeseries_dfs)
else:
timeseries_df = None
# Process relaxation
if len(relax_dfs) >= 1:
if verbose >= 1:
wiprint("""Concatenating timeseries from {0} relaxation
infiles""".format(len(relax_dfs)))
relax_df = pd.concat([rdf.stack() for rdf in relax_dfs],
axis=1).transpose()
else:
relax_df = None
# Process order parameters
if len(order_dfs) >= 1:
if verbose >= 1:
wiprint("""Concatenating timeseries from {0} order parameter
infiles""".format(len(order_dfs)))
order_df = pd.concat([odf.stack() for odf in order_dfs],
axis=1).transpose()
else:
order_df = None
# Merge and sort relaxation and order parameters
if relax_df is not None and order_df is not None:
df = pd.merge(relax_df, order_df, how="outer", left_index=True,
right_index=True)
df = df[sorted(list(set(df.columns.get_level_values(0))),
key=lambda x: int(x.split(":")[1]))]
elif relax_df is None and order_df is not None:
df = order_df
elif order_df is None and relax_df is not None:
df = relax_df
else:
df = None
# Append to existing timeseries_df
if timeseries_df is not None and df is not None:
df = pd.concat([timeseries_df, df])
elif df is None:
df = timeseries_df
return df
def read(self, **kwargs):
"""
Reads iRED time series data from one or more *infiles* into a
DataFrame.
"""
import re
from ..myplotspec import multi_pop_merged
# Process arguments
infile_args = multi_pop_merged(["infile", "infiles"], kwargs)
infiles = self.infiles = self.process_infiles(infiles=infile_args)
if len(infiles) == 0:
raise Exception(sformat("""No infiles found matching
'{0}'""".format(infile_args)))
re_h5 = re.compile(
r"^(?P<path>(.+)\.(h5|hdf5))((:)?(/)?(?P<address>.+))?$",
flags=re.UNICODE)
# Load data
timeseries_dfs = []
relax_dfs = []
order_dfs = []
for infile in infiles:
if re_h5.match(infile):
df = self._read_hdf5(infile, **kwargs)
else:
df = self._read_text(infile, **kwargs)
if df.columns.nlevels == 2:
timeseries_dfs.append(df)
else:
columns = df.columns.values
if "r1" in columns and "r2" in columns and "noe" in columns:
relax_dfs.append(df)
if "s2" in columns:
order_dfs.append(df)
if not ((
"r1" in columns and "r2" in columns and "noe"
in columns) or (
"s2" in columns)):
raise Exception(sformat("""DataFrame loaded from '{0}' does
not appear to contain either relaxation ('r1', 'r2',
'noe') or order parameter ('s2')
columns""".format(infile)))
# Concatenate into timeseries
df = self.concatenate_timeseries(timeseries_dfs, relax_dfs, order_dfs)
return df
#################################### MAIN #####################################
if __name__ == "__main__":
IREDTimeSeriesDataset.main()
| bsd-3-clause |