input
stringlengths 11
655
| context
stringlengths 134
2.03M
| answers
listlengths 1
438
| dataset
stringclasses 3
values | instruction_template
stringclasses 3
values |
---|---|---|---|---|
from __future__ import unicode_literals
import itsdangerous
import mock
import pytz
from api.base.settings.defaults import API_BASE
from api_tests import utils as api_utils
from framework.auth.core import Auth
from framework.sessions.model import Session
from nose.tools import * # flake8: noqa
from osf_tests.factories import (AuthUserFactory, CommentFactory,
ProjectFactory, UserFactory)
from tests.base import ApiTestCase, capture_signals
from website import settings as website_settings
from addons.osfstorage import settings as osfstorage_settings
from website.project.model import NodeLog
from website.project.signals import contributor_removed
# stolen from^W^Winspired by DRF rest_framework.fields.DateTimeField.to_representation
def _dt_to_iso8601(value):
iso8601 = value.isoformat()
if iso8601.endswith('+00:00'):
iso8601 = iso8601[:-9] + 'Z' # offset upped to 9 to get rid of 3 ms decimal points
return iso8601
class TestFileView(ApiTestCase):
def setUp(self):
super(TestFileView, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user, comment_level='public')
self.file = api_utils.create_test_file(self.node, self.user, create_guid=False)
self.file_url = '/{}files/{}/'.format(API_BASE, self.file._id)
def test_must_have_auth(self):
res = self.app.get(self.file_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_must_be_contributor(self):
user = AuthUserFactory()
res = self.app.get(self.file_url, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_unvisited_file_has_no_guid(self):
res = self.app.get(self.file_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['guid'], None)
def test_visited_file_has_guid(self):
guid = self.file.get_guid(create=True)
res = self.app.get(self.file_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_is_not_none(guid)
assert_equal(res.json['data']['attributes']['guid'], guid._id)
@mock.patch('api.base.throttling.CreateGuidThrottle.allow_request')
def test_file_guid_not_created_with_basic_auth(self, mock_allow):
res = self.app.get(self.file_url + '?create_guid=1', auth=self.user.auth)
guid = res.json['data']['attributes'].get('guid', None)
assert_equal(res.status_code, 200)
assert_equal(mock_allow.call_count, 1)
assert guid is None
@mock.patch('api.base.throttling.CreateGuidThrottle.allow_request')
def test_file_guid_created_with_cookie(self, mock_allow):
session = Session(data={'auth_user_id': self.user._id})
session.save()
cookie = itsdangerous.Signer(website_settings.SECRET_KEY).sign(session._id)
self.app.set_cookie(website_settings.COOKIE_NAME, str(cookie))
res = self.app.get(self.file_url + '?create_guid=1', auth=self.user.auth)
self.app.reset() # clear cookie
assert_equal(res.status_code, 200)
guid = res.json['data']['attributes'].get('guid', None)
assert_is_not_none(guid)
assert_equal(guid, self.file.get_guid()._id)
assert_equal(mock_allow.call_count, 1)
def test_get_file(self):
res = self.app.get(self.file_url, auth=self.user.auth)
self.file.versions.last().reload()
assert_equal(res.status_code, 200)
assert_equal(res.json.keys(), ['data'])
attributes = res.json['data']['attributes']
assert_equal(attributes['path'], self.file.path)
assert_equal(attributes['kind'], self.file.kind)
assert_equal(attributes['name'], self.file.name)
assert_equal(attributes['materialized_path'], self.file.materialized_path)
assert_equal(attributes['last_touched'], None)
assert_equal(attributes['provider'], self.file.provider)
assert_equal(attributes['size'], self.file.versions.last().size)
assert_equal(attributes['current_version'], len(self.file.history))
assert_equal(attributes['date_modified'], _dt_to_iso8601(self.file.versions.last().date_created.replace(tzinfo=pytz.utc)))
assert_equal(attributes['date_created'], _dt_to_iso8601(self.file.versions.first().date_created.replace(tzinfo=pytz.utc)))
assert_equal(attributes['extra']['hashes']['md5'], None)
assert_equal(attributes['extra']['hashes']['sha256'], None)
assert_equal(attributes['tags'], [])
def test_file_has_rel_link_to_owning_project(self):
res = self.app.get(self.file_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_in('node', res.json['data']['relationships'].keys())
expected_url = self.node.api_v2_url
actual_url = res.json['data']['relationships']['node']['links']['related']['href']
assert_in(expected_url, actual_url)
def test_file_has_comments_link(self):
self.file.get_guid(create=True)
res = self.app.get(self.file_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_in('comments', res.json['data']['relationships'].keys())
url = res.json['data']['relationships']['comments']['links']['related']['href']
assert_equal(self.app.get(url, auth=self.user.auth).status_code, 200)
assert_equal(res.json['data']['type'], 'files')
def test_file_has_correct_unread_comments_count(self):
contributor = AuthUserFactory()
self.node.add_contributor(contributor, auth=Auth(self.user), save=True)
comment = CommentFactory(node=self.node, target=self.file.get_guid(create=True), user=contributor, page='files')
res = self.app.get('/{}files/{}/?related_counts=True'.format(API_BASE, self.file._id), auth=self.user.auth)
assert_equal(res.status_code, 200)
unread_comments = res.json['data']['relationships']['comments']['links']['related']['meta']['unread']
assert_equal(unread_comments, 1)
def test_only_project_contrib_can_comment_on_closed_project(self):
self.node.comment_level = 'private'
self.node.is_public = True
self.node.save()
res = self.app.get(self.file_url, auth=self.user.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, True)
non_contributor = AuthUserFactory()
res = self.app.get(self.file_url, auth=non_contributor.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, False)
def test_any_loggedin_user_can_comment_on_open_project(self):
self.node.is_public = True
self.node.save()
non_contributor = AuthUserFactory()
res = self.app.get(self.file_url, auth=non_contributor.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, True)
def test_non_logged_in_user_cant_comment(self):
self.node.is_public = True
self.node.save()
res = self.app.get(self.file_url)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, False)
def test_checkout(self):
assert_equal(self.file.checkout, None)
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth
)
self.file.reload()
self.file.save()
self.node.reload()
assert_equal(res.status_code, 200)
assert_equal(self.file.checkout, self.user)
res = self.app.get(
self.file_url,
auth=self.user.auth
)
assert_equal(self.node.logs.count(),2)
assert_equal(self.node.logs.latest().action, NodeLog.CHECKED_OUT)
assert_equal(self.node.logs.latest().user, self.user)
assert_equal(
self.user._id,
res.json['data']['relationships']['checkout']['links']['related']['meta']['id']
)
assert_in(
'/{}users/{}/'.format(API_BASE, self.user._id),
res.json['data']['relationships']['checkout']['links']['related']['href']
)
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': None}}},
auth=self.user.auth
)
self.file.reload()
assert_equal(self.file.checkout, None)
assert_equal(res.status_code, 200)
def test_checkout_file_no_type(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
def test_checkout_file_no_id(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
def test_checkout_file_incorrect_type(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'Wrong type.', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth, expect_errors=True
)
assert_equal(res.status_code, 409)
def test_checkout_file_incorrect_id(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'id': '12345', 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth, expect_errors=True
)
assert_equal(res.status_code, 409)
def test_checkout_file_no_attributes(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files'}},
auth=self.user.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
def test_must_set_self(self):
user = UserFactory()
assert_equal(self.file.checkout, None)
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': user._id}}},
auth=self.user.auth,
expect_errors=True,
)
self.file.reload()
assert_equal(res.status_code, 400)
assert_equal(self.file.checkout, None)
def test_must_be_self(self):
user = AuthUserFactory()
self.file.checkout = self.user
self.file.save()
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': user._id}}},
auth=user.auth,
expect_errors=True,
)
self.file.reload()
assert_equal(res.status_code, 403)
assert_equal(self.file.checkout, self.user)
def test_admin_can_checkin(self):
user = UserFactory()
self.node.add_contributor(user)
self.file.checkout = user
self.file.save()
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': None}}},
auth=self.user.auth,
expect_errors=True,
)
self.file.reload()
self.node.reload()
assert_equal(res.status_code, 200)
assert_equal(self.file.checkout, None)
assert_equal(self.node.logs.latest().action, NodeLog.CHECKED_IN)
assert_equal(self.node.logs.latest().user, self.user)
def test_admin_can_checkout(self):
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth,
expect_errors=True,
)
self.file.reload()
self.node.reload()
assert_equal(res.status_code, 200)
assert_equal(self.file.checkout, self.user)
assert_equal(self.node.logs.latest().action, NodeLog.CHECKED_OUT)
assert_equal(self.node.logs.latest().user, self.user)
def test_cannot_checkin_when_already_checked_in(self):
count = self.node.logs.count()
assert_false(self.file.is_checked_out)
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': None}}},
auth=self.user.auth,
expect_errors=True,
)
self.file.reload()
self.node.reload()
assert_equal(res.status_code, 200)
assert_equal(self.node.logs.count(), count)
assert_equal(self.file.checkout, None)
def test_cannot_checkout_when_checked_out(self):
user = UserFactory()
self.node.add_contributor(user)
self.file.checkout = user
self.file.save()
count = self.node.logs.count()
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth,
expect_errors=True,
)
self.file.reload()
self.node.reload()
assert_equal(res.status_code, 200)
assert_equal(self.file.checkout, user)
assert_equal(self.node.logs.count(), count)
def test_noncontrib_cannot_checkout(self):
user = AuthUserFactory()
assert_equal(self.file.checkout, None)
assert_false(self.node.has_permission(user, 'read'))
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=user.auth,
expect_errors=True,
)
self.file.reload()
self.node.reload()
assert_equal(res.status_code, 403)
assert_equal(self.file.checkout, None)
assert self.node.logs.latest().action != NodeLog.CHECKED_OUT
def test_read_contrib_cannot_checkout(self):
user = AuthUserFactory()
self.node.add_contributor(user, permissions=['read'])
self.node.save()
assert_false(self.node.can_edit(user=user))
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': None}}},
auth=user.auth,
expect_errors=True
)
self.file.reload()
assert_equal(res.status_code, 403)
assert_equal(self.file.checkout, None)
assert self.node.logs.latest().action != NodeLog.CHECKED_OUT
def test_user_can_checkin(self):
user = AuthUserFactory()
self.node.add_contributor(user, permissions=['read', 'write'])
self.node.save()
assert_true(self.node.can_edit(user=user))
self.file.checkout = user
self.file.save()
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': None}}},
auth=user.auth,
)
self.file.reload()
assert_equal(res.status_code, 200)
assert_equal(self.file.checkout, None)
def test_removed_contrib_files_checked_in(self):
user = AuthUserFactory()
self.node.add_contributor(user, permissions=['read', 'write'])
self.node.save()
assert_true(self.node.can_edit(user=user))
self.file.checkout = user
self.file.save()
assert_true(self.file.is_checked_out)
with capture_signals() as mock_signals:
self.node.remove_contributor(user, auth=Auth(user))
assert_equal(mock_signals.signals_sent(), set([contributor_removed]))
self.file.reload()
assert_false(self.file.is_checked_out)
def test_must_be_osfstorage(self):
self.file.provider = 'github'
self.file.save()
res = self.app.put_json_api(
self.file_url,
{'data': {'id': self.file._id, 'type': 'files', 'attributes': {'checkout': self.user._id}}},
auth=self.user.auth,
expect_errors=True,
)
assert_equal(res.status_code, 403)
def test_get_file_resolves_guids(self):
guid = self.file.get_guid(create=True)
url = '/{}files/{}/'.format(API_BASE, guid._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json.keys(), ['data'])
assert_equal(res.json['data']['attributes']['path'], self.file.path)
def test_get_file_invalid_guid_gives_404(self):
url = '/{}files/{}/'.format(API_BASE, 'asdasasd')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_get_file_non_file_guid_gives_404(self):
url = '/{}files/{}/'.format(API_BASE, self.node._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_current_version_is_equal_to_length_of_history(self):
res = self.app.get(self.file_url, auth=self.user.auth)
assert_equal(res.json['data']['attributes']['current_version'], 1)
for version in range(2, 4):
self.file.create_version(self.user, {
'object': '06d80e' + str(version),
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {'size': 1337,
'contentType': 'img/png'
}).save()
res = self.app.get(self.file_url, auth=self.user.auth)
assert_equal(res.json['data']['attributes']['current_version'], version)
class TestFileVersionView(ApiTestCase):
def setUp(self):
super(TestFileVersionView, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
self.osfstorage = self.node.get_addon('osfstorage')
self.root_node = self.osfstorage.get_root()
self.file = self.root_node.append_file('test_file')
self.file.create_version(self.user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
def test_listing(self):
self.file.create_version(self.user, {
'object': '0683m38e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1347,
'contentType': 'img/png'
}).save()
res = self.app.get(
'/{}files/{}/versions/'.format(API_BASE, self.file._id),
auth=self.user.auth,
)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 2)
assert_equal(res.json['data'][0]['id'], '1')
assert_equal(res.json['data'][1]['id'], '2')
def test_by_id(self):
res = self.app.get(
'/{}files/{}/versions/1/'.format(API_BASE, self.file._id),
auth=self.user.auth,
)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], '1')
def test_read_only(self):
assert_equal(self.app.put(
'/{}files/{}/versions/1/'.format(API_BASE, self.file._id),
expect_errors=True,
auth=self.user.auth,
).status_code, 405)
assert_equal(self.app.post(
'/{}files/{}/versions/1/'.format(API_BASE, self.file._id),
expect_errors=True,
auth=self.user.auth,
).status_code, 405)
| [
" assert_equal(self.app.delete("
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetches album art.
"""
from __future__ import division, absolute_import, print_function
from contextlib import closing
import os
import re
from tempfile import NamedTemporaryFile
import requests
from beets import plugins
from beets import importer
from beets import ui
from beets import util
from beets import config
from mediafile import image_mime_type
from beets.util.artresizer import ArtResizer
from beets.util import sorted_walk
from beets.util import syspath, bytestring_path, py3_path
import confuse
import six
CONTENT_TYPES = {
'image/jpeg': [b'jpg', b'jpeg'],
'image/png': [b'png']
}
IMAGE_EXTENSIONS = [ext for exts in CONTENT_TYPES.values() for ext in exts]
class Candidate(object):
"""Holds information about a matching artwork, deals with validation of
dimension restrictions and resizing.
"""
CANDIDATE_BAD = 0
CANDIDATE_EXACT = 1
CANDIDATE_DOWNSCALE = 2
MATCH_EXACT = 0
MATCH_FALLBACK = 1
def __init__(self, log, path=None, url=None, source=u'',
match=None, size=None):
self._log = log
self.path = path
self.url = url
self.source = source
self.check = None
self.match = match
self.size = size
def _validate(self, plugin):
"""Determine whether the candidate artwork is valid based on
its dimensions (width and ratio).
Return `CANDIDATE_BAD` if the file is unusable.
Return `CANDIDATE_EXACT` if the file is usable as-is.
Return `CANDIDATE_DOWNSCALE` if the file must be resized.
"""
if not self.path:
return self.CANDIDATE_BAD
if not (plugin.enforce_ratio or plugin.minwidth or plugin.maxwidth):
return self.CANDIDATE_EXACT
# get_size returns None if no local imaging backend is available
if not self.size:
self.size = ArtResizer.shared.get_size(self.path)
self._log.debug(u'image size: {}', self.size)
if not self.size:
self._log.warning(u'Could not get size of image (please see '
u'documentation for dependencies). '
u'The configuration options `minwidth` and '
u'`enforce_ratio` may be violated.')
return self.CANDIDATE_EXACT
short_edge = min(self.size)
long_edge = max(self.size)
# Check minimum size.
if plugin.minwidth and self.size[0] < plugin.minwidth:
self._log.debug(u'image too small ({} < {})',
self.size[0], plugin.minwidth)
return self.CANDIDATE_BAD
# Check aspect ratio.
edge_diff = long_edge - short_edge
if plugin.enforce_ratio:
if plugin.margin_px:
if edge_diff > plugin.margin_px:
self._log.debug(u'image is not close enough to being '
u'square, ({} - {} > {})',
long_edge, short_edge, plugin.margin_px)
return self.CANDIDATE_BAD
elif plugin.margin_percent:
margin_px = plugin.margin_percent * long_edge
if edge_diff > margin_px:
self._log.debug(u'image is not close enough to being '
u'square, ({} - {} > {})',
long_edge, short_edge, margin_px)
return self.CANDIDATE_BAD
elif edge_diff:
# also reached for margin_px == 0 and margin_percent == 0.0
self._log.debug(u'image is not square ({} != {})',
self.size[0], self.size[1])
return self.CANDIDATE_BAD
# Check maximum size.
if plugin.maxwidth and self.size[0] > plugin.maxwidth:
self._log.debug(u'image needs resizing ({} > {})',
self.size[0], plugin.maxwidth)
return self.CANDIDATE_DOWNSCALE
return self.CANDIDATE_EXACT
def validate(self, plugin):
self.check = self._validate(plugin)
return self.check
def resize(self, plugin):
if plugin.maxwidth and self.check == self.CANDIDATE_DOWNSCALE:
self.path = ArtResizer.shared.resize(plugin.maxwidth, self.path)
def _logged_get(log, *args, **kwargs):
"""Like `requests.get`, but logs the effective URL to the specified
`log` at the `DEBUG` level.
Use the optional `message` parameter to specify what to log before
the URL. By default, the string is "getting URL".
Also sets the User-Agent header to indicate beets.
"""
# Use some arguments with the `send` call but most with the
# `Request` construction. This is a cheap, magic-filled way to
# emulate `requests.get` or, more pertinently,
# `requests.Session.request`.
req_kwargs = kwargs
send_kwargs = {}
for arg in ('stream', 'verify', 'proxies', 'cert', 'timeout'):
if arg in kwargs:
send_kwargs[arg] = req_kwargs.pop(arg)
# Our special logging message parameter.
if 'message' in kwargs:
message = kwargs.pop('message')
else:
message = 'getting URL'
req = requests.Request('GET', *args, **req_kwargs)
with requests.Session() as s:
s.headers = {'User-Agent': 'beets'}
prepped = s.prepare_request(req)
log.debug('{}: {}', message, prepped.url)
return s.send(prepped, **send_kwargs)
class RequestMixin(object):
"""Adds a Requests wrapper to the class that uses the logger, which
must be named `self._log`.
"""
def request(self, *args, **kwargs):
"""Like `requests.get`, but uses the logger `self._log`.
See also `_logged_get`.
"""
return _logged_get(self._log, *args, **kwargs)
# ART SOURCES ################################################################
class ArtSource(RequestMixin):
VALID_MATCHING_CRITERIA = ['default']
def __init__(self, log, config, match_by=None):
self._log = log
self._config = config
self.match_by = match_by or self.VALID_MATCHING_CRITERIA
def get(self, album, plugin, paths):
raise NotImplementedError()
def _candidate(self, **kwargs):
return Candidate(source=self, log=self._log, **kwargs)
def fetch_image(self, candidate, plugin):
raise NotImplementedError()
class LocalArtSource(ArtSource):
IS_LOCAL = True
LOC_STR = u'local'
def fetch_image(self, candidate, plugin):
pass
class RemoteArtSource(ArtSource):
IS_LOCAL = False
LOC_STR = u'remote'
def fetch_image(self, candidate, plugin):
"""Downloads an image from a URL and checks whether it seems to
actually be an image. If so, returns a path to the downloaded image.
Otherwise, returns None.
"""
if plugin.maxwidth:
candidate.url = ArtResizer.shared.proxy_url(plugin.maxwidth,
candidate.url)
try:
with closing(self.request(candidate.url, stream=True,
message=u'downloading image')) as resp:
ct = resp.headers.get('Content-Type', None)
# Download the image to a temporary file. As some servers
# (notably fanart.tv) have proven to return wrong Content-Types
# when images were uploaded with a bad file extension, do not
# rely on it. Instead validate the type using the file magic
# and only then determine the extension.
data = resp.iter_content(chunk_size=1024)
header = b''
for chunk in data:
header += chunk
if len(header) >= 32:
# The imghdr module will only read 32 bytes, and our
# own additions in mediafile even less.
break
else:
# server didn't return enough data, i.e. corrupt image
return
real_ct = image_mime_type(header)
if real_ct is None:
# detection by file magic failed, fall back to the
# server-supplied Content-Type
# Is our type detection failsafe enough to drop this?
real_ct = ct
if real_ct not in CONTENT_TYPES:
self._log.debug(u'not a supported image: {}',
real_ct or u'unknown content type')
return
ext = b'.' + CONTENT_TYPES[real_ct][0]
if real_ct != ct:
self._log.warning(u'Server specified {}, but returned a '
u'{} image. Correcting the extension '
u'to {}',
ct, real_ct, ext)
suffix = py3_path(ext)
with NamedTemporaryFile(suffix=suffix, delete=False) as fh:
# write the first already loaded part of the image
fh.write(header)
# download the remaining part of the image
for chunk in data:
fh.write(chunk)
self._log.debug(u'downloaded art to: {0}',
util.displayable_path(fh.name))
candidate.path = util.bytestring_path(fh.name)
return
except (IOError, requests.RequestException, TypeError) as exc:
# Handling TypeError works around a urllib3 bug:
# https://github.com/shazow/urllib3/issues/556
self._log.debug(u'error fetching art: {}', exc)
return
class CoverArtArchive(RemoteArtSource):
NAME = u"Cover Art Archive"
VALID_MATCHING_CRITERIA = ['release', 'releasegroup']
if util.SNI_SUPPORTED:
URL = 'https://coverartarchive.org/release/{mbid}/front'
GROUP_URL = 'https://coverartarchive.org/release-group/{mbid}/front'
else:
URL = 'http://coverartarchive.org/release/{mbid}/front'
GROUP_URL = 'http://coverartarchive.org/release-group/{mbid}/front'
def get(self, album, plugin, paths):
"""Return the Cover Art Archive and Cover Art Archive release group URLs
using album MusicBrainz release ID and release group ID.
"""
if 'release' in self.match_by and album.mb_albumid:
yield self._candidate(url=self.URL.format(mbid=album.mb_albumid),
match=Candidate.MATCH_EXACT)
if 'releasegroup' in self.match_by and album.mb_releasegroupid:
yield self._candidate(
url=self.GROUP_URL.format(mbid=album.mb_releasegroupid),
match=Candidate.MATCH_FALLBACK)
class Amazon(RemoteArtSource):
NAME = u"Amazon"
if util.SNI_SUPPORTED:
URL = 'https://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg'
else:
URL = 'http://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg'
INDICES = (1, 2)
def get(self, album, plugin, paths):
"""Generate URLs using Amazon ID (ASIN) string. | [
" \"\"\""
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines classes that represent parts of the common wave model.
Defines the core data structures for the common wave model. At this level,
models are read-only but can be modified through operations.
"""
__author__ = '[email protected] (David Byttow)'
import logging
import document
import util
ROOT_WAVELET_ID_SUFFIX = '!conv+root'
class Wave(object):
"""Models a single wave instance.
A single wave is composed of its id and any wavelet ids that belong to it.
Attributes:
waveId: This wave's id.
waveletIds: Set of wavelet id's on this wave.
"""
def __init__(self, json):
"""Inits this wave with JSON data.
Args:
json: JSON data from Wave server.
Attributes:
raw_data: Dictionary of incoming raw JSON data.
waveId: Wave id of this wave.
waveletId: Wavelet id of this wavelet.
"""
self.waveId = json.get('waveId')
self.waveletIds = set(json.get('waveletIds', []))
self.raw_data = json
def GetId(self):
"""Returns this wave's id."""
return self.waveId
def GetWaveletIds(self):
"""Returns a set of wavelet ids."""
return self.waveletIds
class Wavelet(object):
"""Models a single wavelet instance.
A single wavelet is composed of metadata, participants and the blips it
contains.
Attributes:
creator: Participant id of the creator of this wavelet.
creationTime: Time this wavelet was created on the server.
dataDocuments: Dictionary of data documents.
lastModifiedTime: Time this wavelet was last modified.
participants: Set of participants on this wavelet.
raw_data: Dictionary of incoming raw JSON data.
rootBlipId: Id of the root blip.
waveId: Id of the parent wave.
waveletId: This wavelet's id.
"""
def __init__(self, json):
"""Inits this wavelet with JSON data.
Args:
json: JSON data from Wave server.
"""
self.creator = json.get('creator')
self.creationTime = json.get('creationTime', 0)
self.dataDocuments = json.get('dataDocuments', {})
self.lastModifiedTime = json.get('lastModifiedTime')
self.participants = set(json.get('participants', []))
self.rootBlipId = json.get('rootBlipId')
self.title = json.get('title', '')
self.waveId = json.get('waveId')
self.waveletId = json.get('waveletId')
self.raw_data = json
def GetCreator(self):
"""Returns the participant id of the creator of this wavelet."""
return self.creator
def GetCreationTime(self):
"""Returns the time that this wavelet was first created in milliseconds."""
return self.creationTime
def GetDataDocument(self, name, default=None):
"""Returns a data document for this wavelet based on key name."""
if self.dataDocuments:
return self.dataDocuments.get(name, default)
return default
def GetId(self):
"""Returns this wavelet's id."""
return self.waveletId
def GetLastModifiedTime(self):
"""Returns the time that this wavelet was last modified in ms."""
return self.lastModifiedTime
def GetParticipants(self):
"""Returns a set of participants on this wavelet."""
return self.participants
def GetRootBlipId(self):
"""Returns this wavelet's root blip id."""
return self.rootBlipId
def GetTitle(self): | [
" \"\"\"Returns the title of this wavelet.\"\"\""
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# testing/requirements.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Global database feature support policy.
Provides decorators to mark tests requiring specific feature support from the
target database.
External dialect test suites should subclass SuiteRequirements
to provide specific inclusion/exclusions.
"""
import sys
from . import exclusions
from .. import util
class Requirements(object):
pass
class SuiteRequirements(Requirements):
@property
def create_table(self):
"""target platform can emit basic CreateTable DDL."""
return exclusions.open()
@property
def drop_table(self):
"""target platform can emit basic DropTable DDL."""
return exclusions.open()
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return exclusions.open()
@property
def on_update_cascade(self):
""""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.open()
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.closed()
@property
def deferrable_fks(self):
return exclusions.closed()
@property
def on_update_or_deferrable_fks(self):
# TODO: exclusions should be composable,
# somehow only_if([x, y]) isn't working here, negation/conjunctions
# getting confused.
return exclusions.only_if(
lambda: self.on_update_cascade.enabled or
self.deferrable_fks.enabled
)
@property
def self_referential_foreign_keys(self):
"""Target database must support self-referential foreign keys."""
return exclusions.open()
@property
def foreign_key_ddl(self):
"""Target database must support the DDL phrases for FOREIGN KEY."""
return exclusions.open()
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def subqueries(self):
"""Target database must support subqueries."""
return exclusions.open()
@property
def offset(self):
"""target database can render OFFSET, or an equivalent, in a
SELECT.
"""
return exclusions.open()
@property
def bound_limit_offset(self):
"""target database can render LIMIT and/or OFFSET using a bound
parameter
"""
return exclusions.open()
@property
def parens_in_union_contained_select_w_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when LIMIT/OFFSET is specifically present.
E.g. (SELECT ...) UNION (SELECT ..)
This is known to fail on SQLite.
"""
return exclusions.open()
@property
def parens_in_union_contained_select_wo_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when OFFSET/LIMIT is specifically not present.
E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..)
This is known to fail on SQLite. It also fails on Oracle
because without LIMIT/OFFSET, there is currently no step that
creates an additional subquery.
"""
return exclusions.open()
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return exclusions.closed()
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return exclusions.closed()
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return exclusions.closed()
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return exclusions.closed()
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return exclusions.closed()
@property
def window_functions(self):
"""Target database must support window functions."""
return exclusions.closed()
@property
def autoincrement_insert(self):
"""target platform generates new surrogate integer primary key values
when insert() is executed, excluding the pk column."""
return exclusions.open()
@property
def fetch_rows_post_commit(self):
"""target platform will allow cursor.fetchone() to proceed after a
COMMIT.
Typically this refers to an INSERT statement with RETURNING which
is invoked within "autocommit". If the row can be returned
after the autocommit, then this rule can be open.
"""
return exclusions.open()
@property
def empty_inserts(self):
"""target platform supports INSERT with no values, i.e.
INSERT DEFAULT VALUES or equivalent."""
return exclusions.only_if(
lambda config: config.db.dialect.supports_empty_insert or
config.db.dialect.supports_default_values,
"empty inserts not supported"
)
@property
def insert_from_select(self):
"""target platform supports INSERT from a SELECT."""
return exclusions.open()
@property
def returning(self):
"""target platform supports RETURNING."""
return exclusions.only_if(
lambda config: config.db.dialect.implicit_returning,
"%(database)s %(does_support)s 'returning'"
)
@property
def duplicate_names_in_cursor_description(self):
"""target platform supports a SELECT statement that has
the same name repeated more than once in the columns list."""
return exclusions.open()
@property
def denormalized_names(self):
"""Target database must have 'denormalized', i.e.
UPPERCASE as case insensitive names."""
return exclusions.skip_if(
lambda config: not config.db.dialect.requires_name_normalize,
"Backend does not require denormalized names."
)
@property
def multivalues_inserts(self):
"""target database must support multiple VALUES clauses in an
INSERT statement."""
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_multivalues_insert,
"Backend does not support multirow inserts."
)
@property
def implements_get_lastrowid(self):
""""target dialect implements the executioncontext.get_lastrowid()
method without reliance on RETURNING.
"""
return exclusions.open()
@property
def emulated_lastrowid(self):
""""target dialect retrieves cursor.lastrowid, or fetches
from a database-side function after an insert() construct executes,
within the get_lastrowid() method.
Only dialects that "pre-execute", or need RETURNING to get last
inserted id, would return closed/fail/skip for this.
"""
return exclusions.closed()
@property
def dbapi_lastrowid(self):
""""target platform includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return exclusions.closed()
@property
def views(self):
"""Target database must support VIEWs."""
return exclusions.closed()
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return exclusions.closed()
@property
def sequences(self):
"""Target database must support SEQUENCEs."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences
], "no sequence support")
@property
def sequences_optional(self):
"""Target database supports sequences, but also optionally
as a means of generating new PK values."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences and
config.db.dialect.sequences_optional
], "no sequence support, or sequences not optional")
@property
def reflects_pk_names(self):
return exclusions.closed()
@property
def table_reflection(self):
return exclusions.open()
@property
def view_column_reflection(self):
"""target database must support retrieval of the columns in a view,
similarly to how a table is inspected.
This does not include the full CREATE VIEW definition.
"""
return self.views
@property
def view_reflection(self):
"""target database must support inspection of the full CREATE VIEW definition.
"""
return self.views
@property
def schema_reflection(self):
return self.schemas
@property
def primary_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_option_reflection(self):
return exclusions.closed()
@property
def temp_table_reflection(self):
return exclusions.open()
@property
def temp_table_names(self):
"""target dialect supports listing of temporary table names"""
return exclusions.closed()
@property
def temporary_tables(self):
"""target database supports temporary tables"""
return exclusions.open()
@property
def temporary_views(self):
"""target database supports temporary views"""
return exclusions.closed()
@property
def index_reflection(self):
return exclusions.open()
@property
def unique_constraint_reflection(self):
"""target dialect supports reflection of unique constraints"""
return exclusions.open()
@property
def duplicate_key_raises_integrity_error(self):
"""target dialect raises IntegrityError when reporting an INSERT
with a primary key violation. (hint: it should)
"""
return exclusions.open()
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return exclusions.open()
@property
def unicode_data(self):
"""Target database/dialect must support Python unicode objects with
non-ASCII characters represented, delivered as bound parameters
as well as in result rows.
"""
return exclusions.open()
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol
names.
"""
return exclusions.closed()
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return exclusions.closed()
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return exclusions.open()
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
return exclusions.open()
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return exclusions.open()
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return exclusions.open()
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return exclusions.open()
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
return exclusions.open()
@property
def json_type(self):
"""target platform implements a native JSON type."""
return exclusions.closed()
@property
def json_array_indexes(self):
""""target platform supports numeric array indexes
within a JSON structure"""
return self.json_type
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
return exclusions.closed()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return exclusions.closed()
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
return exclusions.closed()
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return exclusions.closed()
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type.
"""
return exclusions.open()
@property
def floats_to_four_decimals(self):
"""target backend can return a floating-point number with four
significant digits (such as 15.7563) accurately
(i.e. without FP inaccuracies, such as 15.75629997253418).
"""
return exclusions.open()
@property
def fetch_null_from_numeric(self):
"""target backend doesn't crash when you try to select a NUMERIC
value that has a value of NULL.
Added to support Pyodbc bug #351.
"""
return exclusions.open()
| [
" @property"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF Lattice premade models implement typical monotonic model architectures.
You can use TFL premade models to easily construct commonly used monotonic model
architectures. To construct a TFL premade model, construct a model configuration
from `tfl.configs` and pass it to the premade model constructor. No fields in
the model config will be automatically filled in, so the config must be fully
specified. Note that the inputs to the model should match the order in which
they are defined in the feature configs.
```python
model_config = tfl.configs.CalibratedLatticeConfig(...)
calibrated_lattice_model = tfl.premade.CalibratedLattice(
model_config=model_config)
calibrated_lattice_model.compile(...)
calibrated_lattice_model.fit(...)
```
Supported models are defined in `tfl.configs`. Each model architecture can be
used the same as any other `tf.keras.Model`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import aggregation_layer
from . import categorical_calibration_layer
from . import configs
from . import kronecker_factored_lattice_layer as kfll
from . import lattice_layer
from . import linear_layer
from . import parallel_combination_layer
from . import premade_lib
from . import pwl_calibration_layer
from . import rtl_layer
from absl import logging
import tensorflow as tf
# TODO: add support for serialization and object scoping or annoations.
class CalibratedLatticeEnsemble(tf.keras.Model):
"""Premade model for Tensorflow calibrated lattice ensemble models.
Creates a `tf.keras.Model` for the model architecture specified by the
`model_config`, which should be a
`tfl.configs.CalibratedLatticeEnsembleConfig`. No fields in the model config
will be automatically filled in, so the config must be fully specified. Note
that the inputs to the model should match the order in which they are defined
in the feature configs.
Example:
```python
model_config = tfl.configs.CalibratedLatticeEnsembleConfig(...)
calibrated_lattice_ensemble_model = tfl.premade.CalibratedLatticeEnsemble(
model_config=model_config)
calibrated_lattice_ensemble_model.compile(...)
calibrated_lattice_ensemble_model.fit(...)
```
Attributes:
model_config: Model configuration object describing model architecture.
Should be a `tfl.configs.CalibratedLatticeEnsembleConfig` instance.
"""
def __init__(self, model_config=None, dtype=tf.float32, **kwargs):
"""Initializes a `CalibratedLatticeEnsemble` instance.
Args:
model_config: Model configuration object describing model architecutre.
Should be one of the model configs in `tfl.configs`.
dtype: dtype of layers used in the model.
**kwargs: Any additional `tf.keras.Model` arguments
"""
# Set our model_config
self.model_config = model_config
# Check if we are constructing with already provided inputs/outputs, e.g.
# when we are loading a model.
if 'inputs' in kwargs and 'outputs' in kwargs:
super(CalibratedLatticeEnsemble, self).__init__(**kwargs)
return
if model_config is None:
raise ValueError('Must provide a model_config.')
# Check that proper config has been given.
if not isinstance(model_config, configs.CalibratedLatticeEnsembleConfig):
raise ValueError('Invalid config type: {}'.format(type(model_config)))
# Verify that the config is fully specified.
premade_lib.verify_config(model_config)
# Get feature configs and construct model.
input_layer = premade_lib.build_input_layer(
feature_configs=model_config.feature_configs, dtype=dtype)
lattice_outputs = premade_lib.build_calibrated_lattice_ensemble_layer(
calibration_input_layer=input_layer,
model_config=model_config,
average_outputs=(not model_config.use_linear_combination),
dtype=dtype)
if model_config.use_linear_combination:
averaged_lattice_output = premade_lib.build_linear_combination_layer(
ensemble_outputs=lattice_outputs,
model_config=model_config,
dtype=dtype)
else:
averaged_lattice_output = lattice_outputs
if model_config.output_calibration:
model_output = premade_lib.build_output_calibration_layer(
output_calibration_input=averaged_lattice_output,
model_config=model_config,
dtype=dtype)
else:
model_output = averaged_lattice_output
# Define inputs and initialize model.
inputs = [
input_layer[feature_config.name]
for feature_config in model_config.feature_configs
]
kwargs['inputs'] = inputs
kwargs['outputs'] = model_output
super(CalibratedLatticeEnsemble, self).__init__(**kwargs)
def get_config(self):
"""Returns a configuration dictionary."""
config = super(CalibratedLatticeEnsemble, self).get_config()
config['model_config'] = tf.keras.utils.serialize_keras_object(
self.model_config)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
model = super(CalibratedLatticeEnsemble, cls).from_config(
config, custom_objects=custom_objects)
try:
model_config = tf.keras.utils.deserialize_keras_object(
config.get('model_config'), custom_objects=custom_objects)
premade_lib.verify_config(model_config)
model.model_config = model_config
except ValueError:
logging.warning(
'Could not load model_config. Constructing model without it: %s',
str(config.get('model_config')))
return model
class CalibratedLattice(tf.keras.Model):
"""Premade model for Tensorflow calibrated lattice models.
Creates a `tf.keras.Model` for the model architecture specified by the
`model_config`, which should be a `tfl.configs.CalibratedLatticeConfig`. No
fields in the model config will be automatically filled in, so the config
must be fully specified. Note that the inputs to the model should match the
order in which they are defined in the feature configs.
Example:
```python
model_config = tfl.configs.CalibratedLatticeConfig(...)
calibrated_lattice_model = tfl.premade.CalibratedLattice(
model_config=model_config)
calibrated_lattice_model.compile(...)
calibrated_lattice_model.fit(...)
```
Attributes:
model_config: Model configuration object describing model architecture.
Should be a `tfl.configs.CalibratedLatticeConfig` instance.
"""
def __init__(self, model_config=None, dtype=tf.float32, **kwargs):
"""Initializes a `CalibratedLattice` instance.
Args:
model_config: Model configuration object describing model architecutre.
Should be one of the model configs in `tfl.configs`.
dtype: dtype of layers used in the model.
**kwargs: Any additional `tf.keras.Model` arguments.
"""
# Set our model_config
self.model_config = model_config
# Check if we are constructing with already provided inputs/outputs, e.g.
# when we are loading a model.
if 'inputs' in kwargs and 'outputs' in kwargs:
super(CalibratedLattice, self).__init__(**kwargs)
return
if model_config is None:
raise ValueError('Must provide a model_config.')
# Check that proper config has been given.
if not isinstance(model_config, configs.CalibratedLatticeConfig):
raise ValueError('Invalid config type: {}'.format(type(model_config)))
# Verify that the config is fully specified.
premade_lib.verify_config(model_config)
# Get feature configs and construct model.
input_layer = premade_lib.build_input_layer(
feature_configs=model_config.feature_configs, dtype=dtype)
submodels_inputs = premade_lib.build_calibration_layers(
calibration_input_layer=input_layer,
model_config=model_config,
layer_output_range=premade_lib.LayerOutputRange.INPUT_TO_LATTICE,
submodels=[[
feature_config.name
for feature_config in model_config.feature_configs
]],
separate_calibrators=False,
dtype=dtype)
lattice_layer_output_range = (
premade_lib.LayerOutputRange.INPUT_TO_FINAL_CALIBRATION
if model_config.output_calibration else
premade_lib.LayerOutputRange.MODEL_OUTPUT)
lattice_output = premade_lib.build_lattice_layer(
lattice_input=submodels_inputs[0],
feature_configs=model_config.feature_configs,
model_config=model_config,
layer_output_range=lattice_layer_output_range,
submodel_index=0,
is_inside_ensemble=False,
dtype=dtype)
if model_config.output_calibration:
model_output = premade_lib.build_output_calibration_layer(
output_calibration_input=lattice_output,
model_config=model_config,
dtype=dtype)
else:
model_output = lattice_output
# Define inputs and initialize model.
inputs = [
input_layer[feature_config.name]
for feature_config in model_config.feature_configs
]
kwargs['inputs'] = inputs
kwargs['outputs'] = model_output
super(CalibratedLattice, self).__init__(**kwargs)
def get_config(self):
"""Returns a configuration dictionary."""
config = super(CalibratedLattice, self).get_config()
config['model_config'] = tf.keras.utils.serialize_keras_object(
self.model_config)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
model = super(CalibratedLattice, cls).from_config(
config, custom_objects=custom_objects)
try:
model_config = tf.keras.utils.deserialize_keras_object(
config.get('model_config'), custom_objects=custom_objects)
premade_lib.verify_config(model_config)
model.model_config = model_config
except ValueError:
logging.warning(
'Could not load model_config. Constructing model without it: %s',
str(config.get('model_config')))
return model
class CalibratedLinear(tf.keras.Model):
"""Premade model for Tensorflow calibrated linear models.
Creates a `tf.keras.Model` for the model architecture specified by the
`model_config`, which should be a `tfl.configs.CalibratedLinearConfig`. No
fields in the model config will be automatically filled in, so the config
must be fully specified. Note that the inputs to the model should match the
order in which they are defined in the feature configs.
Example:
```python
model_config = tfl.configs.CalibratedLinearConfig(...)
calibrated_linear_model = tfl.premade.CalibratedLinear(
model_config=model_config)
calibrated_linear_model.compile(...)
calibrated_linear_model.fit(...)
```
Attributes:
model_config: Model configuration object describing model architecture.
Should be a `tfl.configs.CalibratedLinearConfig` instance.
"""
def __init__(self, model_config=None, dtype=tf.float32, **kwargs):
"""Initializes a `CalibratedLinear` instance.
Args:
model_config: Model configuration object describing model architecutre.
Should be one of the model configs in `tfl.configs`.
dtype: dtype of layers used in the model.
**kwargs: Any additional `tf.keras.Model` arguments.
"""
# Set our model_config
self.model_config = model_config
# Check if we are constructing with already provided inputs/outputs, e.g.
# when we are loading a model.
if 'inputs' in kwargs and 'outputs' in kwargs:
super(CalibratedLinear, self).__init__(**kwargs)
return
if model_config is None:
raise ValueError('Must provide a model_config.')
# Check that proper config has been given.
if not isinstance(model_config, configs.CalibratedLinearConfig):
raise ValueError('Invalid config type: {}'.format(type(model_config)))
# Verify that the config is fully specified.
premade_lib.verify_config(model_config)
# Get feature configs and construct model.
input_layer = premade_lib.build_input_layer(
feature_configs=model_config.feature_configs, dtype=dtype)
calibration_layer_output_range = (
premade_lib.LayerOutputRange.INPUT_TO_FINAL_CALIBRATION
if model_config.output_calibration else
premade_lib.LayerOutputRange.MODEL_OUTPUT)
submodels_inputs = premade_lib.build_calibration_layers(
calibration_input_layer=input_layer,
model_config=model_config,
layer_output_range=calibration_layer_output_range,
submodels=[[
feature_config.name
for feature_config in model_config.feature_configs
]],
separate_calibrators=False,
dtype=dtype)
weighted_average = (
model_config.output_min is not None or
model_config.output_max is not None or model_config.output_calibration)
linear_output = premade_lib.build_linear_layer(
linear_input=submodels_inputs[0],
feature_configs=model_config.feature_configs,
model_config=model_config,
weighted_average=weighted_average,
submodel_index=0,
dtype=dtype)
if model_config.output_calibration:
model_output = premade_lib.build_output_calibration_layer(
output_calibration_input=linear_output,
model_config=model_config,
dtype=dtype)
else:
model_output = linear_output
# Define inputs and initialize model.
inputs = [
input_layer[feature_config.name]
for feature_config in model_config.feature_configs
]
kwargs['inputs'] = inputs
kwargs['outputs'] = model_output
super(CalibratedLinear, self).__init__(**kwargs)
def get_config(self):
"""Returns a configuration dictionary."""
config = super(CalibratedLinear, self).get_config()
config['model_config'] = tf.keras.utils.serialize_keras_object(
self.model_config)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
model = super(CalibratedLinear, cls).from_config(
config, custom_objects=custom_objects)
try:
model_config = tf.keras.utils.deserialize_keras_object(
config.get('model_config'), custom_objects=custom_objects)
premade_lib.verify_config(model_config)
model.model_config = model_config
except ValueError:
logging.warning(
'Could not load model_config. Constructing model without it: %s',
str(config.get('model_config')))
return model
# TODO: add support for tf.map_fn and inputs of shape (B, ?, input_dim)
# as well as non-ragged inputs using padding/mask.
class AggregateFunction(tf.keras.Model):
"""Premade model for Tensorflow aggregate function learning models.
Creates a `tf.keras.Model` for the model architecture specified by the
`model_config`, which should be a
`tfl.configs.AggregateFunctionConfig`. No
fields in the model config will be automatically filled in, so the config
must be fully specified. Note that the inputs to the model should match the
order in which they are defined in the feature configs. Features will be
considered ragged, so inputs to this model must be `tf.ragged` instances.
Example:
```python
model_config = tfl.configs.AggregateFunctionConfig(...)
agg_model = tfl.premade.AggregateFunction(
model_config=model_config)
agg_model.compile(...)
agg_model.fit(...)
```
"""
def __init__(self, model_config=None, dtype=tf.float32, **kwargs):
"""Initializes an `AggregateFunction` instance.
Args:
model_config: Model configuration object describing model architecutre.
Should be a `tfl.configs.AggregateFunctionConfig` instance.
dtype: dtype of layers used in the model.
**kwargs: Any additional `tf.keras.Model` arguments.
"""
# Set our model_config
self.model_config = model_config
# Check if we are constructing with already provided inputs/outputs, e.g.
# when we are loading a model.
if 'inputs' in kwargs and 'outputs' in kwargs:
super(AggregateFunction, self).__init__(**kwargs)
return
if model_config is None:
raise ValueError('Must provide a model_config.')
# Check that proper config has been given.
if not isinstance(model_config, configs.AggregateFunctionConfig):
raise ValueError('Invalid config type: {}'.format(type(model_config)))
# Verify that the config is fully specified.
premade_lib.verify_config(model_config)
# Get feature configs and construct model.
input_layer = premade_lib.build_input_layer(
feature_configs=model_config.feature_configs, dtype=dtype, ragged=True)
# We need to construct middle_dimension calibrated_lattices for the
# aggregation layer. Note that we cannot do this in premade_lib because
# importing premade in premade_lib would cause a dependency cycle. Also
# note that we only need to set the output initialization to the min and
# max since we are not using output calibration at this step of the
# aggregation.
calibrated_lattice_config = configs.CalibratedLatticeConfig(
feature_configs=model_config.feature_configs,
interpolation=model_config.aggregation_lattice_interpolation,
regularizer_configs=model_config.regularizer_configs,
output_min=-1.0,
output_max=1.0,
output_initialization=[-1.0, 1.0])
calibrated_lattice_models = [
CalibratedLattice(calibrated_lattice_config)
for _ in range(model_config.middle_dimension)
]
aggregation_layer_output_range = (
premade_lib.LayerOutputRange.INPUT_TO_FINAL_CALIBRATION
if model_config.output_calibration else
premade_lib.LayerOutputRange.MODEL_OUTPUT)
# Change input layer into a list based on model_config.feature_configs.
# This is the order of inputs expected by calibrated_lattice_models.
inputs = [
input_layer[feature_config.name]
for feature_config in model_config.feature_configs
]
aggregation_output = premade_lib.build_aggregation_layer(
aggregation_input_layer=inputs,
model_config=model_config,
calibrated_lattice_models=calibrated_lattice_models,
layer_output_range=aggregation_layer_output_range,
submodel_index=0,
dtype=dtype)
if model_config.output_calibration:
model_output = premade_lib.build_output_calibration_layer(
output_calibration_input=aggregation_output,
model_config=model_config,
dtype=dtype)
else:
model_output = aggregation_output
# Define inputs and initialize model.
kwargs['inputs'] = inputs
kwargs['outputs'] = model_output
super(AggregateFunction, self).__init__(**kwargs)
def get_config(self):
"""Returns a configuration dictionary."""
config = super(AggregateFunction, self).get_config()
config['model_config'] = tf.keras.utils.serialize_keras_object(
self.model_config)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
model = super(AggregateFunction, cls).from_config(
config, custom_objects=custom_objects)
try:
model_config = tf.keras.utils.deserialize_keras_object(
config.get('model_config'), custom_objects=custom_objects)
premade_lib.verify_config(model_config)
model.model_config = model_config
except ValueError:
logging.warning(
'Could not load model_config. Constructing model without it: %s',
str(config.get('model_config')))
return model
def get_custom_objects(custom_objects=None):
"""Creates and returns a dictionary mapping names to custom objects.
Args:
custom_objects: Optional dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization. If provided,
the returned mapping will be extended to contain this one.
Returns:
A dictionary mapping names (strings) to tensorflow lattice custom objects.
"""
tfl_custom_objects = {
'AggregateFunction':
AggregateFunction,
'AggregateFunctionConfig':
configs.AggregateFunctionConfig,
'Aggregation':
aggregation_layer.Aggregation,
'BiasInitializer':
kfll.BiasInitializer,
'CalibratedLatticeEnsemble':
CalibratedLatticeEnsemble,
'CalibratedLattice':
CalibratedLattice,
'CalibratedLatticeConfig':
configs.CalibratedLatticeConfig,
'CalibratedLatticeEnsembleConfig':
configs.CalibratedLatticeEnsembleConfig,
'CalibratedLinear':
CalibratedLinear,
'CalibratedLinearConfig':
configs.CalibratedLinearConfig,
'CategoricalCalibration':
categorical_calibration_layer.CategoricalCalibration,
'CategoricalCalibrationConstraints':
categorical_calibration_layer.CategoricalCalibrationConstraints,
'DominanceConfig':
configs.DominanceConfig,
'FeatureConfig':
configs.FeatureConfig,
'KFLRandomMonotonicInitializer':
kfll.KFLRandomMonotonicInitializer,
'KroneckerFactoredLattice':
kfll.KroneckerFactoredLattice,
'KroneckerFactoredLatticeConstraints':
kfll.KroneckerFactoredLatticeConstraints,
'LaplacianRegularizer':
lattice_layer.LaplacianRegularizer,
'Lattice':
lattice_layer.Lattice,
'LatticeConstraints':
lattice_layer.LatticeConstraints,
'Linear':
linear_layer.Linear,
'LinearConstraints':
linear_layer.LinearConstraints,
'LinearInitializer':
lattice_layer.LinearInitializer,
'NaiveBoundsConstraints': | [
" pwl_calibration_layer.NaiveBoundsConstraints,"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2015 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver for IBM FlashSystem storage systems with FC protocol.
Limitations:
1. Cinder driver only works when open_access_enabled=off.
"""
import random
import threading
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import utils
import cinder.volume.driver
from cinder.volume.drivers.ibm import flashsystem_common as fscommon
from cinder.volume.drivers.san import san
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
flashsystem_fc_opts = [
cfg.BoolOpt('flashsystem_multipath_enabled',
default=False,
help='Connect with multipath (FC only).'
'(Default is false.)')
]
CONF = cfg.CONF
CONF.register_opts(flashsystem_fc_opts)
class FlashSystemFCDriver(fscommon.FlashSystemDriver,
cinder.volume.driver.FibreChannelDriver):
"""IBM FlashSystem FC volume driver.
Version history:
1.0.0 - Initial driver
1.0.1 - Code clean up
1.0.2 - Add lock into vdisk map/unmap, connection
initialize/terminate
1.0.3 - Initial driver for iSCSI
1.0.4 - Split Flashsystem driver into common and FC
1.0.5 - Report capability of volume multiattach
1.0.6 - Fix bug #1469581, add I/T mapping check in
terminate_connection
"""
VERSION = "1.0.6"
def __init__(self, *args, **kwargs):
super(FlashSystemFCDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(fscommon.flashsystem_opts)
self.configuration.append_config_values(flashsystem_fc_opts)
self.configuration.append_config_values(san.san_opts)
def _check_vdisk_params(self, params):
# Check that the requested protocol is enabled
if params['protocol'] != self._protocol:
msg = (_("Illegal value '%(prot)s' specified for "
"flashsystem_connection_protocol: "
"valid value(s) are %(enabled)s.")
% {'prot': params['protocol'],
'enabled': self._protocol})
raise exception.InvalidInput(reason=msg)
def _create_host(self, connector):
"""Create a new host on the storage system.
We create a host and associate it with the given connection
information.
"""
LOG.debug('enter: _create_host: host %s.', connector['host'])
rand_id = six.text_type(random.randint(0, 99999999)).zfill(8)
host_name = '%s-%s' % (self._connector_to_hostname_prefix(connector),
rand_id)
ports = []
if 'FC' == self._protocol and 'wwpns' in connector:
for wwpn in connector['wwpns']:
ports.append('-hbawwpn %s' % wwpn)
self._driver_assert(ports,
(_('_create_host: No connector ports.')))
port1 = ports.pop(0)
arg_name, arg_val = port1.split()
ssh_cmd = ['svctask', 'mkhost', '-force', arg_name, arg_val, '-name',
'"%s"' % host_name]
out, err = self._ssh(ssh_cmd)
self._assert_ssh_return('successfully created' in out,
'_create_host', ssh_cmd, out, err)
for port in ports:
arg_name, arg_val = port.split()
ssh_cmd = ['svctask', 'addhostport', '-force',
arg_name, arg_val, host_name]
out, err = self._ssh(ssh_cmd)
self._assert_ssh_return(
(not out.strip()),
'_create_host', ssh_cmd, out, err)
LOG.debug(
'leave: _create_host: host %(host)s - %(host_name)s.', | [
" {'host': connector['host'], 'host_name': host_name})"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2017 Balazs Nemeth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generates increasingly bigger/more Service Chain requirements for a
network topology, reports how well the algorithm performed.
"""
import getopt
import logging
import math
import os
import random
import string
import sys
import traceback
from collections import OrderedDict
from generator import NFFG, NFFGToolBox
import CarrierTopoBuilder
from alg1 import MappingAlgorithms
import alg1.UnifyExceptionTypes as uet
log = logging.getLogger("StressTest")
log.setLevel(logging.WARN)
logging.basicConfig(format='%(levelname)s:%(name)s:%(message)s')
nf_types = list(string.ascii_uppercase)[:10]
rnd = random.Random()
def gen_seq ():
while True:
yield int(math.floor(rnd.random() * 999999999))
helpmsg = """StressTest.py options are:
-h Print this message help message.
-o The output file where the result shall be printed.
--loops All Service Chains will be loops.
--fullremap Ignores all VNF mappings in the substrate network.
--vnf_sharing=p Sets the ratio of shared and not shared VNF-s.
--request_seed=i Provides seed for the random generator.
--bw_factor=f Controls the importance between bandwidth, infra resources
--res_factor=f and distance in latency during the mapping process. The
--lat_factor=f factors are advised to be summed to 3, if any is given the
others shall be given too!
--bt_limit=i Backtracking depth limit of the mapping algorithm (def.:
6).
--bt_br_factor=i Branching factor of the backtracking procedure of the
mapping algorithm (default is 3).
| [
" --multiple_scs One request will contain at least 2 chains with"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
from __future__ import print_function
import gc
import sys
import time
import unittest
from nose.plugins.skip import SkipTest
import numpy
from six import itervalues
from theano import function
from theano.gof import vm
from theano.gof import OpWiseCLinker
from six.moves import xrange
from theano.compile import Mode
from theano import tensor
from theano.ifelse import ifelse
import theano
class TestCallbacks(unittest.TestCase):
"""
Test the VM_Linker's callback argument, which can be useful for debugging.
"""
def setUp(self):
self.n_callbacks = {}
def callback(self, node, thunk, storage_map, compute_map):
key = node.op.__class__.__name__
self.n_callbacks.setdefault(key, 0)
self.n_callbacks[key] += 1
def test_callback(self):
a, b, c = tensor.scalars('abc')
f = function([a, b, c], (a + b) + c,
mode=Mode(
optimizer=None,
linker=vm.VM_Linker(callback=self.callback)))
f(1, 2, 3)
assert sum(self.n_callbacks.values()) == len(f.maker.fgraph.toposort())
f(1, 2, 3)
assert (sum(self.n_callbacks.values()) ==
len(f.maker.fgraph.toposort()) * 2)
def test_callback_with_ifelse(self):
a, b, c = tensor.scalars('abc')
f = function([a, b, c], ifelse(a, 2 * b, 2 * c),
mode=Mode(
optimizer=None,
linker=vm.VM_Linker(callback=self.callback)))
f(1, 2, 3)
assert self.n_callbacks['IfElse'] == 2
def test_c_thunks():
a = tensor.scalars('a')
b, c = tensor.vectors('bc')
cases = [False]
if theano.config.cxx:
cases.append(True)
for c_thunks in cases:
f = function([a, b, c], ifelse(a, a * b, b * c),
mode=Mode(
optimizer=None,
linker=vm.VM_Linker(c_thunks=c_thunks,
use_cloop=False)))
f(1, [2], [3, 2])
from nose.tools import assert_raises
assert_raises(ValueError, f, 0, [2], [3, 4])
assert any([hasattr(t, 'cthunk') for t in f.fn.thunks]) == c_thunks
def test_speed():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
def build_graph(x, depth=5):
z = x
for d in range(depth):
z = (z + z)
return z
def numpy_version(x, depth):
z = x
for d in xrange(depth):
z = (z + z)
return z
def time_numpy():
steps_a = 5
steps_b = 100
x = numpy.asarray([2.0, 3.0], dtype=theano.config.floatX)
numpy_version(x, steps_a)
t0 = time.time()
# print numpy_version(x, steps_a)
t1 = time.time()
t2 = time.time()
# print numpy_version(x, steps_b)
t3 = time.time()
t_a = t1 - t0
t_b = t3 - t2
print("%s takes %f s/Kop" % (
'numpy',
(1000 * (t_b - t_a) / (steps_b - steps_a))))
def time_linker(name, linker):
steps_a = 5
steps_b = 100
x = tensor.vector()
a = build_graph(x, steps_a)
b = build_graph(x, steps_b)
f_a = function([x], a,
mode=Mode(optimizer=None, linker=linker()))
f_b = function([x], b,
mode=Mode(optimizer=None, linker=linker()))
f_a([2.0, 3.0])
t0 = time.time()
f_a([2.0, 3.0])
t1 = time.time()
f_b([2.0, 3.0])
t2 = time.time()
f_b([2.0, 3.0])
t3 = time.time()
t_a = t1 - t0
t_b = t3 - t2
print("%s takes %f s/Kop" % (
name,
(1000 * (t_b - t_a) / (steps_b - steps_a))))
time_linker('c|py', OpWiseCLinker)
time_linker('vmLinker', vm.VM_Linker)
time_linker('vmLinker_nogc', lambda: vm.VM_Linker(allow_gc=False))
if theano.config.cxx:
time_linker('vmLinker_CLOOP', lambda: vm.VM_Linker(allow_gc=False,
use_cloop=True))
time_numpy()
def test_speed_lazy():
def build_graph(x, depth=5):
z = x
for d in range(depth):
z = ifelse(z[0] > 0, -z, z)
return z
def time_linker(name, linker):
steps_a = 10
steps_b = 100
x = tensor.vector()
a = build_graph(x, steps_a)
b = build_graph(x, steps_b)
f_a = function([x], a,
mode=Mode(optimizer=None,
linker=linker()))
f_b = function([x], b,
mode=Mode(optimizer=None,
linker=linker()))
f_a([2.0])
t0 = time.time()
f_a([2.0])
t1 = time.time()
f_b([2.0])
t2 = time.time()
f_b([2.0])
t3 = time.time()
t_a = t1 - t0
t_b = t3 - t2
print("%s takes %f s/Kop" % (
name,
(1000 * (t_b - t_a) / (steps_b - steps_a))))
time_linker('vmLinker', vm.VM_Linker)
time_linker('vmLinker_nogc', lambda: vm.VM_Linker(allow_gc=False))
if theano.config.cxx:
time_linker('vmLinker_C', lambda: vm.VM_Linker(allow_gc=False,
use_cloop=True))
def test_allow_gc_cvm():
mode = theano.config.mode
if mode in ['DEBUG_MODE', 'DebugMode']:
mode = "FAST_RUN"
v = theano.tensor.vector()
f = theano.function([v], v + 1, mode=mode)
f([1])
n = list(f.maker.fgraph.apply_nodes)[0].outputs[0]
assert f.fn.storage_map[n][0] is None
assert f.fn.allow_gc is True
f.fn.allow_gc = False
assert f.fn.allow_gc is False
f([1])
assert f.fn.storage_map[n][0] is not None
f.fn.allow_gc = True
assert f.fn.allow_gc is True
f([1])
assert f.fn.storage_map[n][0] is None
run_memory_usage_tests = False
if run_memory_usage_tests:
# these are not normal unit tests, do not run them as part of standard
# suite. I ran them while looking at top, and stopped when memory usage
# was stable.
def test_leak2():
import theano.sandbox.cuda as cuda
for i in xrange(1000000):
n = numpy.asarray([2.3, 4.5], dtype='f')
c = sys.getrefcount(n)
a = cuda.CudaNdarray(n)
a.sum()
assert c == sys.getrefcount(n)
# This is to confuse flake8
a = a
del a
if not i % 1000:
print('.', end=' ')
print(gc.collect(), end=' ')
print(gc.collect())
sys.stdout.flush()
def test_no_leak_many_graphs():
# Verify no memory leaks when creating and deleting a lot of functions
# This isn't really a unit test, you have to run it and look at top to
# see if there's a leak
for i in xrange(10000):
x = tensor.vector()
z = x
for d in range(10):
z = tensor.sin(-z + 1)
f = function([x], z, mode=Mode(optimizer=None, linker='cvm'))
if not i % 100:
print(gc.collect())
sys.stdout.flush()
gc.collect()
if 1:
f([2.0])
f([3.0])
f([4.0])
f([5.0])
def test_no_leak_many_call_lazy():
# Verify no memory leaks when calling a function a lot of times
# This isn't really a unit test, you have to run it and look at top to
# see if there's a leak
def build_graph(x, depth=5):
z = x
for d in range(depth):
z = ifelse(z.mean() > 0.5, -z, z)
return z
def time_linker(name, linker):
steps_a = 10
x = tensor.dvector()
a = build_graph(x, steps_a)
f_a = function([x], a,
mode=Mode(optimizer=None,
linker=linker()))
inp = numpy.random.rand(1000000)
for i in xrange(100):
f_a(inp)
if 0: # this doesn't seem to work, prints 0 for everything
import resource
pre = resource.getrusage(resource.RUSAGE_SELF)
post = resource.getrusage(resource.RUSAGE_SELF)
print(pre.ru_ixrss, post.ru_ixrss)
print(pre.ru_idrss, post.ru_idrss)
print(pre.ru_maxrss, post.ru_maxrss)
print(1)
time_linker('vmLinker_C',
lambda: vm.VM_Linker(allow_gc=False, use_cloop=True))
print(2)
time_linker('vmLinker',
lambda: vm.VM_Linker(allow_gc=False, use_cloop=False))
def test_no_leak_many_call_nonlazy():
# Verify no memory leaks when calling a function a lot of times
# This isn't really a unit test, you have to run it and look at top to
# see if there's a leak.
def build_graph(x, depth=5):
z = x
for d in range(depth):
z = tensor.sin(-z + 1)
return z
def time_linker(name, linker):
steps_a = 10
x = tensor.dvector()
a = build_graph(x, steps_a)
f_a = function([x], a,
mode=Mode(optimizer=None,
linker=linker()))
inp = numpy.random.rand(1000000)
for i in xrange(500):
f_a(inp)
print(1)
time_linker('vmLinker_C',
lambda: vm.VM_Linker(allow_gc=False, use_cloop=True))
print(2)
time_linker('vmLinker',
lambda: vm.VM_Linker(allow_gc=False, use_cloop=False))
class RunOnce(theano.Op):
__props__ = ("nb_run",)
def __init__(self):
self.nb_run = 0
def make_node(self, x):
return theano.Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
assert self.nb_run == 0
self.nb_run += 1
outputs[0][0] = inputs[0].copy()
def test_vm_gc():
"""This already caused a bug in the trunk of Theano.
The bug was introduced in the trunk on July 5th, 2012 and fixed on
July 30th.
"""
x = theano.tensor.vector()
p = RunOnce()(x)
mode = theano.Mode(linker=theano.gof.vm.VM_Linker(lazy=True))
f = theano.function([theano.In(x, mutable=True)], [p + 1, p + 2],
mode=mode)
f([1, 2, 3])
p = RunOnce()(x)
pp = p + p
f = theano.function([x], [pp + pp],
mode=mode)
f([1, 2, 3])
def test_reallocation():
x = tensor.scalar('x')
y = tensor.scalar('y')
z = tensor.tanh(3 * x + y) + tensor.cosh(x + 5 * y)
# The functinality is currently implement for non lazy and non c VM only.
for l in [vm.VM_Linker(allow_gc=False, lazy=False, use_cloop=False),
vm.VM_Linker(allow_gc=True, lazy=False, use_cloop=False)]:
m = theano.compile.get_mode(theano.Mode(linker=l))
m = m.excluding('fusion', 'inplace')
f = theano.function([x, y], z, name="test_reduce_memory",
mode=m)
output = f(1, 2)
assert output
storage_map = f.fn.storage_map
| [
" def check_storage(storage_map):"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
"""Adds support for generic thermostat units."""
import asyncio
import logging
import math
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
ATTR_PRESET_MODE,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
CONF_NAME,
CONF_UNIQUE_ID,
EVENT_HOMEASSISTANT_START,
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import DOMAIN as HA_DOMAIN, CoreState, callback
from homeassistant.exceptions import ConditionError
from homeassistant.helpers import condition
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import (
async_track_state_change_event,
async_track_time_interval,
)
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.restore_state import RestoreEntity
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
DEFAULT_TOLERANCE = 0.3
DEFAULT_NAME = "Generic Thermostat"
CONF_HEATER = "heater"
CONF_SENSOR = "target_sensor"
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
CONF_TARGET_TEMP = "target_temp"
CONF_AC_MODE = "ac_mode"
CONF_MIN_DUR = "min_cycle_duration"
CONF_COLD_TOLERANCE = "cold_tolerance"
CONF_HOT_TOLERANCE = "hot_tolerance"
CONF_KEEP_ALIVE = "keep_alive"
CONF_INITIAL_HVAC_MODE = "initial_hvac_mode"
CONF_AWAY_TEMP = "away_temp"
CONF_PRECISION = "precision"
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HEATER): cv.entity_id,
vol.Required(CONF_SENSOR): cv.entity_id,
vol.Optional(CONF_AC_MODE): cv.boolean,
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
vol.Optional(CONF_MIN_DUR): cv.positive_time_period,
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_COLD_TOLERANCE, default=DEFAULT_TOLERANCE): vol.Coerce(float), | [
" vol.Optional(CONF_HOT_TOLERANCE, default=DEFAULT_TOLERANCE): vol.Coerce(float),"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
import base64
import json
import logging
import six
from . import credentials
from . import errors
from .utils import config
INDEX_NAME = 'docker.io'
INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
TOKEN_USERNAME = '<token>'
log = logging.getLogger(__name__)
def resolve_repository_name(repo_name):
if '://' in repo_name:
raise errors.InvalidRepository(
'Repository name cannot contain a scheme ({0})'.format(repo_name)
)
index_name, remote_name = split_repo_name(repo_name)
if index_name[0] == '-' or index_name[-1] == '-':
raise errors.InvalidRepository(
'Invalid index name ({0}). Cannot begin or end with a'
' hyphen.'.format(index_name)
)
return resolve_index_name(index_name), remote_name
def resolve_index_name(index_name):
index_name = convert_to_hostname(index_name)
if index_name == 'index.' + INDEX_NAME:
index_name = INDEX_NAME
return index_name
def get_config_header(client, registry):
log.debug('Looking for auth config')
if not client._auth_configs or client._auth_configs.is_empty:
log.debug(
"No auth config in memory - loading from filesystem"
)
client._auth_configs = load_config(credstore_env=client.credstore_env)
authcfg = resolve_authconfig(
client._auth_configs, registry, credstore_env=client.credstore_env
)
# Do not fail here if no authentication exists for this
# specific registry as we can have a readonly pull. Just
# put the header if we can.
if authcfg:
log.debug('Found auth config')
# auth_config needs to be a dict in the format used by
# auth.py username , password, serveraddress, email
return encode_header(authcfg)
log.debug('No auth config found')
return None
def split_repo_name(repo_name):
parts = repo_name.split('/', 1)
if len(parts) == 1 or (
'.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
):
# This is a docker index repo (ex: username/foobar or ubuntu)
return INDEX_NAME, repo_name
return tuple(parts)
def get_credential_store(authconfig, registry):
if not isinstance(authconfig, AuthConfig):
authconfig = AuthConfig(authconfig)
return authconfig.get_credential_store(registry)
class AuthConfig(dict):
def __init__(self, dct, credstore_env=None):
if 'auths' not in dct:
dct['auths'] = {}
self.update(dct)
self._credstore_env = credstore_env
self._stores = {}
@classmethod
def parse_auth(cls, entries, raise_on_error=False):
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
conf = {}
for registry, entry in six.iteritems(entries):
if not isinstance(entry, dict):
log.debug(
'Config entry for key {0} is not auth config'.format(
registry
)
)
# We sometimes fall back to parsing the whole config as if it
# was the auth config by itself, for legacy purposes. In that
# case, we fail silently and return an empty conf if any of the
# keys is not formatted properly.
if raise_on_error:
raise errors.InvalidConfigFile(
'Invalid configuration for registry {0}'.format(
registry
)
)
return {}
if 'identitytoken' in entry:
log.debug(
'Found an IdentityToken entry for registry {0}'.format(
registry
)
)
conf[registry] = {
'IdentityToken': entry['identitytoken']
}
continue # Other values are irrelevant if we have a token
if 'auth' not in entry:
# Starting with engine v1.11 (API 1.23), an empty dictionary is
# a valid value in the auths config.
# https://github.com/docker/compose/issues/3265
log.debug(
'Auth data for {0} is absent. Client might be using a '
'credentials store instead.'.format(registry)
)
conf[registry] = {}
continue
username, password = decode_auth(entry['auth'])
log.debug(
'Found entry (registry={0}, username={1})'
.format(repr(registry), repr(username))
)
conf[registry] = {
'username': username,
'password': password,
'email': entry.get('email'),
'serveraddress': registry,
}
return conf
@classmethod
def load_config(cls, config_path, config_dict, credstore_env=None):
"""
Loads authentication data from a Docker configuration file in the given
root directory or if config_path is passed use given path.
Lookup priority:
explicit config_path parameter > DOCKER_CONFIG environment
variable > ~/.docker/config.json > ~/.dockercfg
"""
if not config_dict:
config_file = config.find_config_file(config_path)
if not config_file:
return cls({}, credstore_env)
try:
with open(config_file) as f:
config_dict = json.load(f)
except (IOError, KeyError, ValueError) as e:
# Likely missing new Docker config file or it's in an
# unknown format, continue to attempt to read old location
# and format.
log.debug(e)
return cls(_load_legacy_config(config_file), credstore_env)
res = {}
if config_dict.get('auths'):
log.debug("Found 'auths' section")
res.update({
'auths': cls.parse_auth(
config_dict.pop('auths'), raise_on_error=True
) | [
" })"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import argparse
import copy
import os
import sys
import time
from cinderclient import exceptions
from cinderclient import utils
from cinderclient.v1 import availability_zones
def _poll_for_status(poll_fn, obj_id, action, final_ok_states,
poll_period=5, show_progress=True):
"""Block while an action is being performed, periodically printing
progress.
"""
def print_progress(progress):
if show_progress:
msg = ('\rInstance %(action)s... %(progress)s%% complete'
% dict(action=action, progress=progress))
else:
msg = '\rInstance %(action)s...' % dict(action=action)
sys.stdout.write(msg)
sys.stdout.flush()
print()
while True:
obj = poll_fn(obj_id)
status = obj.status.lower()
progress = getattr(obj, 'progress', None) or 0
if status in final_ok_states:
print_progress(100)
print("\nFinished")
break
elif status == "error":
print("\nError %(action)s instance" % {'action': action})
break
else:
print_progress(progress)
time.sleep(poll_period)
def _find_volume(cs, volume):
"""Get a volume by ID."""
return utils.find_resource(cs.volumes, volume)
def _find_volume_snapshot(cs, snapshot):
"""Get a volume snapshot by ID."""
return utils.find_resource(cs.volume_snapshots, snapshot)
def _find_backup(cs, backup):
"""Get a backup by ID."""
return utils.find_resource(cs.backups, backup)
def _find_transfer(cs, transfer):
"""Get a transfer by ID."""
return utils.find_resource(cs.transfers, transfer)
def _print_volume(volume):
utils.print_dict(volume._info)
def _print_volume_snapshot(snapshot):
utils.print_dict(snapshot._info)
def _print_volume_image(image):
utils.print_dict(image[1]['os-volume_upload_image'])
def _translate_keys(collection, convert):
for item in collection:
keys = list(item.__dict__.keys())
for from_key, to_key in convert:
if from_key in keys and to_key not in keys:
setattr(item, to_key, item._info[from_key])
def _translate_volume_keys(collection):
convert = [('displayName', 'display_name'), ('volumeType', 'volume_type')]
_translate_keys(collection, convert)
def _translate_volume_snapshot_keys(collection):
convert = [('displayName', 'display_name'), ('volumeId', 'volume_id')]
_translate_keys(collection, convert)
def _translate_availability_zone_keys(collection):
convert = [('zoneName', 'name'), ('zoneState', 'status')]
_translate_keys(collection, convert)
def _extract_metadata(args):
metadata = {}
for metadatum in args.metadata:
# unset doesn't require a val, so we have the if/else
if '=' in metadatum:
(key, value) = metadatum.split('=', 1)
else:
key = metadatum
value = None
metadata[key] = value
return metadata
@utils.arg(
'--all-tenants',
dest='all_tenants',
metavar='<0|1>',
nargs='?',
type=int,
const=1,
default=0,
help='Display information from all tenants (Admin only).')
@utils.arg(
'--all_tenants',
nargs='?',
type=int,
const=1,
help=argparse.SUPPRESS)
@utils.arg(
'--display-name',
metavar='<display-name>',
default=None,
help='Filter results by display-name')
@utils.arg(
'--status',
metavar='<status>',
default=None,
help='Filter results by status')
@utils.arg(
'--metadata',
type=str,
nargs='*',
metavar='<key=value>',
help='Filter results by metadata',
default=None)
@utils.service_type('volume')
def do_list(cs, args):
"""List all the volumes."""
all_tenants = int(os.environ.get("ALL_TENANTS", args.all_tenants))
search_opts = {
'all_tenants': all_tenants,
'display_name': args.display_name,
'status': args.status,
'metadata': _extract_metadata(args) if args.metadata else None,
}
volumes = cs.volumes.list(search_opts=search_opts)
_translate_volume_keys(volumes)
# Create a list of servers to which the volume is attached
for vol in volumes:
servers = [s.get('server_id') for s in vol.attachments]
setattr(vol, 'attached_to', ','.join(map(str, servers)))
utils.print_list(volumes, ['ID', 'Status', 'Display Name',
'Size', 'Volume Type', 'Bootable', 'Attached to'])
@utils.arg('volume', metavar='<volume>', help='ID of the volume.')
@utils.service_type('volume')
def do_show(cs, args):
"""Show details about a volume."""
volume = _find_volume(cs, args.volume)
_print_volume(volume)
@utils.arg('size',
metavar='<size>',
type=int,
help='Size of volume in GB')
@utils.arg(
'--snapshot-id',
metavar='<snapshot-id>',
default=None,
help='Create volume from snapshot id (Optional, Default=None)')
@utils.arg(
'--snapshot_id',
help=argparse.SUPPRESS)
@utils.arg(
'--source-volid',
metavar='<source-volid>',
default=None,
help='Create volume from volume id (Optional, Default=None)')
@utils.arg(
'--source_volid',
help=argparse.SUPPRESS)
@utils.arg(
'--image-id',
metavar='<image-id>',
default=None,
help='Create volume from image id (Optional, Default=None)')
@utils.arg(
'--image_id',
help=argparse.SUPPRESS)
@utils.arg(
'--display-name',
metavar='<display-name>',
default=None,
help='Volume name (Optional, Default=None)')
@utils.arg(
'--display_name',
help=argparse.SUPPRESS)
@utils.arg(
'--display-description',
metavar='<display-description>',
default=None,
help='Volume description (Optional, Default=None)')
@utils.arg(
'--display_description',
help=argparse.SUPPRESS)
@utils.arg(
'--volume-type',
metavar='<volume-type>',
default=None,
help='Volume type (Optional, Default=None)')
@utils.arg(
'--volume_type',
help=argparse.SUPPRESS)
@utils.arg(
'--availability-zone',
metavar='<availability-zone>',
default=None,
help='Availability zone for volume (Optional, Default=None)')
@utils.arg(
'--availability_zone',
help=argparse.SUPPRESS)
@utils.arg('--metadata',
type=str,
nargs='*',
metavar='<key=value>',
help='Metadata key=value pairs (Optional, Default=None)',
default=None)
@utils.service_type('volume')
def do_create(cs, args):
"""Add a new volume."""
volume_metadata = None
if args.metadata is not None:
volume_metadata = _extract_metadata(args)
volume = cs.volumes.create(args.size,
args.snapshot_id,
args.source_volid,
args.display_name,
args.display_description,
args.volume_type,
availability_zone=args.availability_zone,
imageRef=args.image_id,
metadata=volume_metadata)
_print_volume(volume)
@utils.arg('volume', metavar='<volume>', help='ID of the volume to delete.')
@utils.service_type('volume')
def do_delete(cs, args):
"""Remove a volume."""
volume = _find_volume(cs, args.volume)
volume.delete()
@utils.arg('volume', metavar='<volume>', help='ID of the volume to delete.')
@utils.service_type('volume')
def do_force_delete(cs, args):
"""Attempt forced removal of a volume, regardless of its state."""
volume = _find_volume(cs, args.volume)
volume.force_delete()
@utils.arg('volume', metavar='<volume>', help='ID of the volume to modify.')
@utils.arg('--state', metavar='<state>', default='available',
help=('Indicate which state to assign the volume. Options include '
'available, error, creating, deleting, error_deleting. If no '
'state is provided, available will be used.'))
@utils.service_type('volume')
def do_reset_state(cs, args):
"""Explicitly update the state of a volume."""
volume = _find_volume(cs, args.volume)
volume.reset_state(args.state)
@utils.arg('volume', metavar='<volume>', help='ID of the volume to rename.')
@utils.arg('display_name', nargs='?', metavar='<display-name>',
help='New display-name for the volume.')
@utils.arg('--display-description', metavar='<display-description>',
help='Optional volume description. (Default=None)',
default=None)
@utils.service_type('volume')
def do_rename(cs, args):
"""Rename a volume."""
kwargs = {}
if args.display_name is not None:
kwargs['display_name'] = args.display_name
if args.display_description is not None:
kwargs['display_description'] = args.display_description
_find_volume(cs, args.volume).update(**kwargs)
@utils.arg('volume',
metavar='<volume>',
help='ID of the volume to update metadata on.')
@utils.arg('action',
metavar='<action>',
choices=['set', 'unset'],
help="Actions: 'set' or 'unset'")
@utils.arg('metadata',
metavar='<key=value>',
nargs='+',
default=[],
help='Metadata to set/unset (only key is necessary on unset)')
@utils.service_type('volume')
def do_metadata(cs, args):
"""Set or Delete metadata on a volume."""
volume = _find_volume(cs, args.volume)
metadata = _extract_metadata(args)
if args.action == 'set':
cs.volumes.set_metadata(volume, metadata)
elif args.action == 'unset':
cs.volumes.delete_metadata(volume, list(metadata.keys()))
@utils.arg(
'--all-tenants',
dest='all_tenants',
metavar='<0|1>',
nargs='?',
type=int,
const=1,
default=0,
help='Display information from all tenants (Admin only).')
@utils.arg(
'--all_tenants',
nargs='?',
type=int,
const=1,
help=argparse.SUPPRESS)
@utils.arg(
'--display-name',
metavar='<display-name>',
default=None,
help='Filter results by display-name')
@utils.arg(
'--status',
metavar='<status>',
default=None,
help='Filter results by status')
@utils.arg(
'--volume-id',
metavar='<volume-id>',
default=None,
help='Filter results by volume-id')
@utils.service_type('volume')
def do_snapshot_list(cs, args):
"""List all the snapshots."""
all_tenants = int(os.environ.get("ALL_TENANTS", args.all_tenants))
search_opts = {
'all_tenants': all_tenants,
'display_name': args.display_name,
'status': args.status,
'volume_id': args.volume_id,
}
snapshots = cs.volume_snapshots.list(search_opts=search_opts)
_translate_volume_snapshot_keys(snapshots)
utils.print_list(snapshots,
['ID', 'Volume ID', 'Status', 'Display Name', 'Size'])
@utils.arg('snapshot', metavar='<snapshot>', help='ID of the snapshot.')
@utils.service_type('volume')
def do_snapshot_show(cs, args):
"""Show details about a snapshot."""
snapshot = _find_volume_snapshot(cs, args.snapshot)
_print_volume_snapshot(snapshot)
@utils.arg('volume_id',
metavar='<volume-id>',
help='ID of the volume to snapshot')
@utils.arg('--force',
metavar='<True|False>',
help='Optional flag to indicate whether '
'to snapshot a volume even if it\'s '
'attached to an instance. (Default=False)',
default=False)
@utils.arg(
'--display-name',
metavar='<display-name>',
default=None,
help='Optional snapshot name. (Default=None)')
@utils.arg(
'--display_name',
help=argparse.SUPPRESS)
@utils.arg(
'--display-description',
metavar='<display-description>',
default=None,
help='Optional snapshot description. (Default=None)')
@utils.arg(
'--display_description',
help=argparse.SUPPRESS)
@utils.service_type('volume')
def do_snapshot_create(cs, args):
"""Add a new snapshot."""
snapshot = cs.volume_snapshots.create(args.volume_id,
args.force,
args.display_name,
args.display_description)
_print_volume_snapshot(snapshot)
@utils.arg('snapshot_id',
metavar='<snapshot-id>',
help='ID of the snapshot to delete.')
@utils.service_type('volume')
def do_snapshot_delete(cs, args):
"""Remove a snapshot."""
snapshot = _find_volume_snapshot(cs, args.snapshot_id)
snapshot.delete()
@utils.arg('snapshot', metavar='<snapshot>', help='ID of the snapshot.')
@utils.arg('display_name', nargs='?', metavar='<display-name>',
help='New display-name for the snapshot.')
@utils.arg('--display-description', metavar='<display-description>',
help='Optional snapshot description. (Default=None)',
default=None)
@utils.service_type('volume')
def do_snapshot_rename(cs, args):
"""Rename a snapshot."""
kwargs = {}
if args.display_name is not None:
kwargs['display_name'] = args.display_name
if args.display_description is not None:
kwargs['display_description'] = args.display_description
_find_volume_snapshot(cs, args.snapshot).update(**kwargs)
@utils.arg('snapshot', metavar='<snapshot>',
help='ID of the snapshot to modify.')
@utils.arg('--state', metavar='<state>',
default='available',
help=('Indicate which state to assign the snapshot. '
'Options include available, error, creating, deleting, '
'error_deleting. If no state is provided, '
'available will be used.'))
@utils.service_type('volume')
def do_snapshot_reset_state(cs, args):
"""Explicitly update the state of a snapshot."""
snapshot = _find_volume_snapshot(cs, args.snapshot)
snapshot.reset_state(args.state)
def _print_volume_type_list(vtypes):
utils.print_list(vtypes, ['ID', 'Name'])
def _print_type_and_extra_specs_list(vtypes):
formatters = {'extra_specs': _print_type_extra_specs}
utils.print_list(vtypes, ['ID', 'Name', 'extra_specs'], formatters)
@utils.service_type('volume')
def do_type_list(cs, args):
"""Print a list of available 'volume types'."""
vtypes = cs.volume_types.list()
_print_volume_type_list(vtypes)
@utils.service_type('volume')
def do_extra_specs_list(cs, args):
"""Print a list of current 'volume types and extra specs' (Admin Only)."""
vtypes = cs.volume_types.list()
_print_type_and_extra_specs_list(vtypes)
@utils.arg('name',
metavar='<name>',
help="Name of the new volume type")
@utils.service_type('volume')
def do_type_create(cs, args):
"""Create a new volume type."""
vtype = cs.volume_types.create(args.name)
_print_volume_type_list([vtype])
@utils.arg('id',
metavar='<id>',
help="Unique ID of the volume type to delete")
@utils.service_type('volume')
def do_type_delete(cs, args):
"""Delete a specific volume type."""
cs.volume_types.delete(args.id)
@utils.arg('vtype',
metavar='<vtype>',
help="Name or ID of the volume type")
@utils.arg('action',
metavar='<action>',
choices=['set', 'unset'],
help="Actions: 'set' or 'unset'")
@utils.arg('metadata',
metavar='<key=value>',
nargs='*',
default=None,
help='Extra_specs to set/unset (only key is necessary on unset)')
@utils.service_type('volume')
def do_type_key(cs, args):
"Set or unset extra_spec for a volume type."""
vtype = _find_volume_type(cs, args.vtype)
if args.metadata is not None:
keypair = _extract_metadata(args)
if args.action == 'set':
vtype.set_keys(keypair)
elif args.action == 'unset':
vtype.unset_keys(list(keypair.keys()))
def do_endpoints(cs, args):
"""Discover endpoints that get returned from the authenticate services."""
catalog = cs.client.service_catalog.catalog
for e in catalog['access']['serviceCatalog']:
utils.print_dict(e['endpoints'][0], e['name'])
def do_credentials(cs, args):
"""Show user credentials returned from auth."""
catalog = cs.client.service_catalog.catalog
utils.print_dict(catalog['access']['user'], "User Credentials")
utils.print_dict(catalog['access']['token'], "Token")
_quota_resources = ['volumes', 'snapshots', 'gigabytes']
def _quota_show(quotas):
quota_dict = {}
for resource in quotas._info.keys():
good_name = False
for name in _quota_resources:
if resource.startswith(name):
good_name = True
if not good_name:
continue
quota_dict[resource] = getattr(quotas, resource, None)
utils.print_dict(quota_dict)
def _quota_update(manager, identifier, args):
updates = {}
for resource in _quota_resources:
val = getattr(args, resource, None)
if val is not None:
if args.volume_type:
resource = resource + '_%s' % args.volume_type
updates[resource] = val
if updates:
manager.update(identifier, **updates)
@utils.arg('tenant', metavar='<tenant_id>',
help='UUID of tenant to list the quotas for.')
@utils.service_type('volume')
def do_quota_show(cs, args):
"""List the quotas for a tenant."""
_quota_show(cs.quotas.get(args.tenant))
@utils.arg('tenant', metavar='<tenant_id>',
help='UUID of tenant to list the default quotas for.')
@utils.service_type('volume')
def do_quota_defaults(cs, args):
"""List the default quotas for a tenant."""
_quota_show(cs.quotas.defaults(args.tenant))
@utils.arg('tenant', metavar='<tenant_id>',
help='UUID of tenant to set the quotas for.')
@utils.arg('--volumes',
metavar='<volumes>',
type=int, default=None,
help='New value for the "volumes" quota.')
@utils.arg('--snapshots',
metavar='<snapshots>',
type=int, default=None,
help='New value for the "snapshots" quota.')
@utils.arg('--gigabytes',
metavar='<gigabytes>',
type=int, default=None,
help='New value for the "gigabytes" quota.')
@utils.arg('--volume-type',
metavar='<volume_type_name>',
default=None,
help='Volume type (Optional, Default=None)')
@utils.service_type('volume')
def do_quota_update(cs, args):
"""Update the quotas for a tenant."""
_quota_update(cs.quotas, args.tenant, args)
@utils.arg('class_name', metavar='<class>',
help='Name of quota class to list the quotas for.')
@utils.service_type('volume')
def do_quota_class_show(cs, args):
"""List the quotas for a quota class."""
_quota_show(cs.quota_classes.get(args.class_name))
@utils.arg('class_name', metavar='<class>',
help='Name of quota class to set the quotas for.')
@utils.arg('--volumes',
metavar='<volumes>',
type=int, default=None,
help='New value for the "volumes" quota.')
@utils.arg('--snapshots',
metavar='<snapshots>',
type=int, default=None,
help='New value for the "snapshots" quota.')
@utils.arg('--gigabytes',
metavar='<gigabytes>',
type=int, default=None,
help='New value for the "gigabytes" quota.')
@utils.arg('--volume-type',
metavar='<volume_type_name>',
default=None,
help='Volume type (Optional, Default=None)')
@utils.service_type('volume')
def do_quota_class_update(cs, args):
"""Update the quotas for a quota class."""
_quota_update(cs.quota_classes, args.class_name, args)
@utils.service_type('volume')
def do_absolute_limits(cs, args):
"""Print a list of absolute limits for a user"""
limits = cs.limits.get().absolute
columns = ['Name', 'Value']
utils.print_list(limits, columns)
@utils.service_type('volume')
def do_rate_limits(cs, args):
"""Print a list of rate limits for a user"""
limits = cs.limits.get().rate
columns = ['Verb', 'URI', 'Value', 'Remain', 'Unit', 'Next_Available']
utils.print_list(limits, columns)
def _print_type_extra_specs(vol_type):
try:
return vol_type.get_keys()
except exceptions.NotFound:
return "N/A"
def _find_volume_type(cs, vtype):
"""Get a volume type by name or ID."""
return utils.find_resource(cs.volume_types, vtype)
@utils.arg('volume_id',
metavar='<volume-id>',
help='ID of the volume to upload to an image')
@utils.arg('--force',
metavar='<True|False>',
help='Optional flag to indicate whether '
'to upload a volume even if it\'s '
'attached to an instance. (Default=False)',
default=False)
@utils.arg('--container-format',
metavar='<container-format>',
help='Optional type for container format '
'(Default=bare)',
default='bare')
@utils.arg('--disk-format',
metavar='<disk-format>',
help='Optional type for disk format '
'(Default=raw)',
default='raw')
@utils.arg('image_name',
metavar='<image-name>',
help='Name for created image')
@utils.service_type('volume')
def do_upload_to_image(cs, args):
"""Upload volume to image service as image."""
volume = _find_volume(cs, args.volume_id)
_print_volume_image(volume.upload_to_image(args.force,
args.image_name,
args.container_format,
args.disk_format))
@utils.arg('volume', metavar='<volume>',
help='ID of the volume to backup.')
@utils.arg('--container', metavar='<container>',
help='Optional Backup container name. (Default=None)',
default=None)
@utils.arg('--display-name', metavar='<display-name>',
help='Optional backup name. (Default=None)',
default=None)
@utils.arg('--display-description', metavar='<display-description>',
help='Optional backup description. (Default=None)',
default=None)
@utils.service_type('volume')
def do_backup_create(cs, args):
"""Creates a backup."""
backup = cs.backups.create(args.volume,
args.container,
args.display_name,
args.display_description)
info = {"volume_id": args.volume}
info.update(backup._info)
if 'links' in info:
info.pop('links')
utils.print_dict(info)
@utils.arg('backup', metavar='<backup>', help='ID of the backup.')
@utils.service_type('volume')
def do_backup_show(cs, args):
"""Show details about a backup."""
backup = _find_backup(cs, args.backup)
info = dict()
info.update(backup._info)
if 'links' in info:
info.pop('links')
utils.print_dict(info)
@utils.service_type('volume')
def do_backup_list(cs, args):
"""List all the backups."""
backups = cs.backups.list()
columns = ['ID', 'Volume ID', 'Status', 'Name', 'Size', 'Object Count',
'Container']
utils.print_list(backups, columns)
@utils.arg('backup', metavar='<backup>',
help='ID of the backup to delete.')
@utils.service_type('volume')
def do_backup_delete(cs, args):
"""Remove a backup."""
backup = _find_backup(cs, args.backup)
backup.delete()
@utils.arg('backup', metavar='<backup>',
help='ID of the backup to restore.')
@utils.arg('--volume-id', metavar='<volume-id>',
help='Optional ID of the volume to restore to.',
default=None)
@utils.service_type('volume')
def do_backup_restore(cs, args):
"""Restore a backup."""
cs.restores.restore(args.backup,
args.volume_id)
@utils.arg('volume', metavar='<volume>',
help='ID of the volume to transfer.')
@utils.arg('--display-name', metavar='<display-name>',
help='Optional transfer name. (Default=None)',
default=None)
@utils.service_type('volume')
def do_transfer_create(cs, args):
"""Creates a volume transfer."""
transfer = cs.transfers.create(args.volume,
args.display_name)
info = dict()
info.update(transfer._info)
if 'links' in info:
info.pop('links')
utils.print_dict(info)
@utils.arg('transfer', metavar='<transfer>',
help='ID of the transfer to delete.')
@utils.service_type('volume')
def do_transfer_delete(cs, args):
"""Undo a transfer."""
transfer = _find_transfer(cs, args.transfer)
transfer.delete()
@utils.arg('transfer', metavar='<transfer>',
help='ID of the transfer to accept.')
@utils.arg('auth_key', metavar='<auth_key>',
help='Auth key of the transfer to accept.')
@utils.service_type('volume')
def do_transfer_accept(cs, args):
"""Accepts a volume transfer."""
transfer = cs.transfers.accept(args.transfer, args.auth_key)
info = dict()
info.update(transfer._info)
if 'links' in info:
info.pop('links')
utils.print_dict(info)
@utils.service_type('volume')
def do_transfer_list(cs, args):
"""List all the transfers."""
transfers = cs.transfers.list()
columns = ['ID', 'Volume ID', 'Name']
utils.print_list(transfers, columns)
@utils.arg('transfer', metavar='<transfer>',
help='ID of the transfer to accept.')
@utils.service_type('volume')
def do_transfer_show(cs, args):
"""Show details about a transfer."""
transfer = _find_transfer(cs, args.transfer)
info = dict()
info.update(transfer._info)
if 'links' in info:
info.pop('links')
utils.print_dict(info)
@utils.arg('volume', metavar='<volume>', help='ID of the volume to extend.')
@utils.arg('new_size',
metavar='<new-size>',
type=int,
help='New size of volume in GB')
@utils.service_type('volume')
def do_extend(cs, args):
"""Attempt to extend the size of an existing volume."""
volume = _find_volume(cs, args.volume)
cs.volumes.extend(volume, args.new_size)
@utils.arg('--host', metavar='<hostname>', default=None,
help='Name of host.')
@utils.arg('--binary', metavar='<binary>', default=None,
help='Service binary.')
@utils.service_type('volume')
def do_service_list(cs, args):
"""List all the services. Filter by host & service binary."""
result = cs.services.list(host=args.host, binary=args.binary)
columns = ["Binary", "Host", "Zone", "Status", "State", "Updated_at"]
utils.print_list(result, columns)
@utils.arg('host', metavar='<hostname>', help='Name of host.')
@utils.arg('binary', metavar='<binary>', help='Service binary.')
@utils.service_type('volume')
def do_service_enable(cs, args):
"""Enable the service.""" | [
" cs.services.enable(args.host, args.binary)"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r'''
Simple script that launches GKE pods to act as client workers for a TPU pod.
This script strongly assumes that it is running in the context of another GKE
pod that had a TPU attached. As such, this script expects that you will provide
the current pod's name, UID, and TPU pod addresses via the downward API (the
TPU addresses are automatically given in $KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS).
This script does not clean up created resources. Instead, it sets
`metadata.ownerReferences` such that GKE's garbage collector will clean up
the created pods and services when the invoking pod is deleted.
Example:
```
python3 launch_k8s_workers.py \
--name=pytorch-xla-pods \
--image=gcr.io/xl-ml-test/pytorch-xla:nightly \
--owner_name=$POD_NAME \
--owner_uid=$POD_UID \
--tpu=$KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS \
--cpu=4 \
--memory=4Gi \
-- \
python3 /pytorch/xla/test/test_train_mp_imagenet.py --fake_data
```
'''
import concurrent.futures
import os
import random
import re
import string
import time
from absl import app
from absl import flags
from absl import logging
import kubernetes
FLAGS = flags.FLAGS
flags.DEFINE_string('name', None,
'Name of worker StatefulSet. Must be unique in `namespace`.')
flags.DEFINE_string('command', None, 'Command to run on each worker.')
flags.DEFINE_string('namespace', 'default',
'The namespace of the created StatefulSet.')
flags.DEFINE_string('tpu', os.getenv('KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS', None),
'List of grpc:// addresses for the TPU. Defaults to '
'$KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS.')
flags.DEFINE_string('owner_name', None, 'Name of Pod that owns workers, if any.')
flags.DEFINE_string('owner_uid', None, 'UUID of Pod that owns workers, if any.')
flags.DEFINE_string('image', 'gcr.io/tpu-pytorch/xla:nightly',
'Docker image used for workers in created StatefulSet.')
flags.DEFINE_string('cpu', '4', 'CPU request for each worker.')
flags.DEFINE_string('memory', '4Gi', 'Memory request for each worker.')
flags.DEFINE_list('volumes', None,
'Comma-separated list of [PVC_NAME]:[MOUNT_DIR], where '
'[PVC_NAME] is the name of a Kubernetes PersistentVolumeClaim '
'and [MOUNT_PATH] is the directory where the PVC will be '
'mounted.')
def _format_env(envs):
return [{'name': k, 'value': v} for k, v in envs.items()]
# Name must consist of lower case alphanumeric characters or '-'.
def _sanitize_job_name(name):
return re.sub(r'[^a-z0-9\-]', '-', name.lower())
def main(argv):
if FLAGS.command and len(argv) > 1:
logging.warning('`--command` defined. Ignoring positional arguments.')
elif not FLAGS.command and len(argv) > 1:
FLAGS.command = ' '.join(argv[1:])
elif not FLAGS.command:
logging.error(
'Must define `--command` or give command as a positional argument.')
return 1
logging.info('Command to distribute: `%s`', FLAGS.command)
try:
kubernetes.config.load_incluster_config()
except:
logging.warning('No Kubernetes cluster config. Using local kube config.')
kubernetes.config.load_kube_config()
k8s_client = kubernetes.client.CoreV1Api()
random_suffix = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6))
job_name = '{}-{}'.format(FLAGS.name, random_suffix)
if FLAGS.owner_name:
ownerReferences = [{
'apiVersion': 'v1',
'controller': True,
'blockOwnerDeletion': True,
'kind': 'Pod',
'name': FLAGS.owner_name,
'uid': FLAGS.owner_uid
}]
else:
ownerReferences = None
service_request = kubernetes.client.V1Service(**{
'metadata': {
'name': _sanitize_job_name(job_name),
'ownerReferences': ownerReferences,
},
'spec': {
'ports': [{
'name': 'xrt-mesh',
'port': 8477,
'protocol': 'UDP',
}],
# Use headless service -- a load balancer is unnecessary for one pod.
'clusterIP': 'None',
# Bind to the master pod (i.e. index 0).
'selector': {
'app': 'pytorch-xla',
'group': job_name,
'role': 'xrt-worker',
'index': '0'
}
}
})
service = k8s_client.create_namespaced_service(FLAGS.namespace, service_request)
service_name = service.metadata.name
tpu_hosts = FLAGS.tpu.split(',')
num_workers = len(tpu_hosts)
master_envs = {
'XRT_TPU_CONFIG': '|'.join(
'c_tpu_worker;{};{}'.format(i, host.replace('grpc://', ''))
for i, host in enumerate(tpu_hosts)
)
}
common_envs = {
'XRT_LOCAL_WORKER': 'c_tpu_worker:$(INDEX)',
'XRT_SHARD_ORDINAL': '$(INDEX)',
'XRT_MESH_SERVICE_ADDRESS': '{}.{}.svc.cluster.local:8477'.format(
service_name, FLAGS.namespace),
'XRT_SHARD_WORLD_SIZE': str(num_workers),
'TPU_NUM_DEVICES': '8',
}
if FLAGS.volumes:
volumes = {
name: mount_path for name, mount_path in
[v.split(':') for v in FLAGS.volumes]
}
else:
volumes = {}
pods = []
for i in range(num_workers):
body = kubernetes.client.V1Pod(**{
'metadata': { | [
" 'name': f'{job_name}-{i}',"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ClusterViewResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'name': 'str',
'description': 'str',
'status': 'str',
'secure': 'bool',
'ambari_server_ip': 'str',
'blueprint': 'BlueprintViewResponse',
'host_groups': 'list[HostGroupViewResponse]',
'shared_service_response': 'SharedServiceResponse'
}
attribute_map = {
'id': 'id',
'name': 'name',
'description': 'description',
'status': 'status',
'secure': 'secure',
'ambari_server_ip': 'ambariServerIp',
'blueprint': 'blueprint',
'host_groups': 'hostGroups',
'shared_service_response': 'sharedServiceResponse'
}
def __init__(self, id=None, name=None, description=None, status=None, secure=False, ambari_server_ip=None, blueprint=None, host_groups=None, shared_service_response=None):
"""
ClusterViewResponse - a model defined in Swagger
"""
self._id = None
self._name = None
self._description = None
self._status = None
self._secure = None
self._ambari_server_ip = None
self._blueprint = None
self._host_groups = None
self._shared_service_response = None
if id is not None:
self.id = id
self.name = name
if description is not None:
self.description = description
if status is not None:
self.status = status
if secure is not None:
self.secure = secure
if ambari_server_ip is not None:
self.ambari_server_ip = ambari_server_ip
if blueprint is not None:
self.blueprint = blueprint
if host_groups is not None:
self.host_groups = host_groups
if shared_service_response is not None:
self.shared_service_response = shared_service_response
@property
def id(self):
"""
Gets the id of this ClusterViewResponse.
id of the resource
:return: The id of this ClusterViewResponse.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ClusterViewResponse.
id of the resource
:param id: The id of this ClusterViewResponse.
:type: int
"""
self._id = id
@property
def name(self):
"""
Gets the name of this ClusterViewResponse.
name of the resource
:return: The name of this ClusterViewResponse.
:rtype: str | [
" \"\"\""
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#
# Test script for the curses module
#
# This script doesn't actually display anything very coherent. but it
# does call (nearly) every method and function.
#
# Functions not tested: {def,reset}_{shell,prog}_mode, getch(), getstr(),
# init_color()
# Only called, not tested: getmouse(), ungetmouse()
#
import os
import string
import sys
import tempfile
import unittest
from test.support import requires, import_module, verbose
# Optionally test curses module. This currently requires that the
# 'curses' resource be given on the regrtest command line using the -u
# option. If not available, nothing after this line will be executed.
import inspect
requires('curses')
# If either of these don't exist, skip the tests.
curses = import_module('curses')
import_module('curses.panel')
import_module('curses.ascii')
import_module('curses.textpad')
def requires_curses_func(name):
return unittest.skipUnless(hasattr(curses, name),
'requires curses.%s' % name)
term = os.environ.get('TERM')
# If newterm was supported we could use it instead of initscr and not exit
@unittest.skipIf(not term or term == 'unknown',
"$TERM=%r, calling initscr() may cause exit" % term)
@unittest.skipIf(sys.platform == "cygwin",
"cygwin's curses mostly just hangs")
class TestCurses(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not sys.__stdout__.isatty():
# Temporary skip tests on non-tty
raise unittest.SkipTest('sys.__stdout__ is not a tty')
cls.tmp = tempfile.TemporaryFile()
fd = cls.tmp.fileno()
else:
cls.tmp = None
fd = sys.__stdout__.fileno()
# testing setupterm() inside initscr/endwin
# causes terminal breakage
curses.setupterm(fd=fd)
@classmethod
def tearDownClass(cls):
if cls.tmp:
cls.tmp.close()
del cls.tmp
def setUp(self):
if verbose:
# just to make the test output a little more readable
print()
self.stdscr = curses.initscr()
curses.savetty()
def tearDown(self):
curses.resetty()
curses.endwin()
def test_window_funcs(self):
"Test the methods of windows"
stdscr = self.stdscr
win = curses.newwin(10,10)
win = curses.newwin(5,5, 5,5)
win2 = curses.newwin(15,15, 5,5)
for meth in [stdscr.addch, stdscr.addstr]:
for args in [('a'), ('a', curses.A_BOLD),
(4,4, 'a'), (5,5, 'a', curses.A_BOLD)]:
with self.subTest(meth=meth.__qualname__, args=args):
meth(*args)
for meth in [stdscr.box, stdscr.clear, stdscr.clrtobot,
stdscr.clrtoeol, stdscr.cursyncup, stdscr.delch,
stdscr.deleteln, stdscr.erase, stdscr.getbegyx,
stdscr.getbkgd, stdscr.getkey, stdscr.getmaxyx,
stdscr.getparyx, stdscr.getyx, stdscr.inch,
stdscr.insertln, stdscr.instr, stdscr.is_wintouched,
win.noutrefresh, stdscr.redrawwin, stdscr.refresh,
stdscr.standout, stdscr.standend, stdscr.syncdown,
stdscr.syncup, stdscr.touchwin, stdscr.untouchwin]:
with self.subTest(meth=meth.__qualname__):
meth()
stdscr.addnstr('1234', 3)
stdscr.addnstr('1234', 3, curses.A_BOLD)
stdscr.addnstr(4,4, '1234', 3)
stdscr.addnstr(5,5, '1234', 3, curses.A_BOLD)
stdscr.attron(curses.A_BOLD)
stdscr.attroff(curses.A_BOLD)
stdscr.attrset(curses.A_BOLD)
stdscr.bkgd(' ')
stdscr.bkgd(' ', curses.A_REVERSE)
stdscr.bkgdset(' ')
stdscr.bkgdset(' ', curses.A_REVERSE)
win.border(65, 66, 67, 68,
69, 70, 71, 72)
win.border('|', '!', '-', '_',
'+', '\\', '#', '/')
with self.assertRaises(TypeError,
msg="Expected win.border() to raise TypeError"):
win.border(65, 66, 67, 68,
69, [], 71, 72)
stdscr.clearok(1)
win4 = stdscr.derwin(2,2)
win4 = stdscr.derwin(1,1, 5,5)
win4.mvderwin(9,9)
stdscr.echochar('a')
stdscr.echochar('a', curses.A_BOLD)
stdscr.hline('-', 5)
stdscr.hline('-', 5, curses.A_BOLD)
stdscr.hline(1,1,'-', 5)
stdscr.hline(1,1,'-', 5, curses.A_BOLD)
stdscr.idcok(1)
stdscr.idlok(1)
stdscr.immedok(1)
stdscr.insch('c')
stdscr.insdelln(1)
stdscr.insnstr('abc', 3)
stdscr.insnstr('abc', 3, curses.A_BOLD)
stdscr.insnstr(5, 5, 'abc', 3)
stdscr.insnstr(5, 5, 'abc', 3, curses.A_BOLD)
stdscr.insstr('def')
stdscr.insstr('def', curses.A_BOLD)
stdscr.insstr(5, 5, 'def')
stdscr.insstr(5, 5, 'def', curses.A_BOLD)
stdscr.is_linetouched(0)
stdscr.keypad(1)
stdscr.leaveok(1)
stdscr.move(3,3)
win.mvwin(2,2)
stdscr.nodelay(1)
stdscr.notimeout(1)
win2.overlay(win)
win2.overwrite(win)
win2.overlay(win, 1, 2, 2, 1, 3, 3)
win2.overwrite(win, 1, 2, 2, 1, 3, 3)
stdscr.redrawln(1,2)
stdscr.scrollok(1)
stdscr.scroll()
stdscr.scroll(2)
stdscr.scroll(-3)
stdscr.move(12, 2)
stdscr.setscrreg(10,15)
win3 = stdscr.subwin(10,10)
win3 = stdscr.subwin(10,10, 5,5)
stdscr.syncok(1)
stdscr.timeout(5)
stdscr.touchline(5,5)
stdscr.touchline(5,5,0)
stdscr.vline('a', 3)
stdscr.vline('a', 3, curses.A_STANDOUT)
stdscr.chgat(5, 2, 3, curses.A_BLINK)
stdscr.chgat(3, curses.A_BOLD)
stdscr.chgat(5, 8, curses.A_UNDERLINE)
stdscr.chgat(curses.A_BLINK)
stdscr.refresh()
stdscr.vline(1,1, 'a', 3)
stdscr.vline(1,1, 'a', 3, curses.A_STANDOUT)
if hasattr(curses, 'resize'):
stdscr.resize()
if hasattr(curses, 'enclose'):
stdscr.enclose()
self.assertRaises(ValueError, stdscr.getstr, -400)
self.assertRaises(ValueError, stdscr.getstr, 2, 3, -400)
self.assertRaises(ValueError, stdscr.instr, -2)
self.assertRaises(ValueError, stdscr.instr, 2, 3, -2)
def test_module_funcs(self):
"Test module-level functions"
for func in [curses.baudrate, curses.beep, curses.can_change_color,
curses.cbreak, curses.def_prog_mode, curses.doupdate,
curses.filter, curses.flash, curses.flushinp,
curses.has_colors, curses.has_ic, curses.has_il,
curses.isendwin, curses.killchar, curses.longname,
curses.nocbreak, curses.noecho, curses.nonl,
curses.noqiflush, curses.noraw,
curses.reset_prog_mode, curses.termattrs,
curses.termname, curses.erasechar, curses.getsyx]:
with self.subTest(func=func.__qualname__):
func()
# Functions that actually need arguments
if curses.tigetstr("cnorm"):
curses.curs_set(1)
curses.delay_output(1)
curses.echo() ; curses.echo(1)
with tempfile.TemporaryFile() as f:
self.stdscr.putwin(f)
f.seek(0)
curses.getwin(f)
curses.halfdelay(1)
curses.intrflush(1)
curses.meta(1)
curses.napms(100)
curses.newpad(50,50)
win = curses.newwin(5,5)
win = curses.newwin(5,5, 1,1)
curses.nl() ; curses.nl(1)
curses.putp(b'abc')
curses.qiflush()
curses.raw() ; curses.raw(1)
curses.setsyx(5,5)
curses.tigetflag('hc')
curses.tigetnum('co')
curses.tigetstr('cr')
curses.tparm(b'cr')
curses.typeahead(sys.__stdin__.fileno())
curses.unctrl('a')
curses.ungetch('a')
curses.use_env(1)
# Functions only available on a few platforms
def test_colors_funcs(self):
if not curses.has_colors():
self.skipTest('requires colors support')
curses.start_color()
curses.init_pair(2, 1,1)
curses.color_content(1)
curses.color_pair(2)
curses.pair_content(curses.COLOR_PAIRS - 1)
curses.pair_number(0)
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
@requires_curses_func('keyname')
def test_keyname(self):
curses.keyname(13)
@requires_curses_func('has_key')
def test_has_key(self):
curses.has_key(13)
@requires_curses_func('getmouse') | [
" def test_getmouse(self):"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for Iterators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import warnings
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util.tf_export import tf_export
# NOTE(mrry): It is legitimate to call `Iterator.get_next()` multiple
# times, e.g. when you are distributing different elements to multiple
# devices in a single step. However, a common pitfall arises when
# users call `Iterator.get_next()` in each iteration of their training
# loop. `Iterator.get_next()` adds ops to the graph, and executing
# each op allocates resources (including threads); as a consequence,
# invoking it in every iteration of a training loop causes slowdown
# and eventual resource exhaustion. To guard against this outcome, we
# log a warning when the number of uses crosses a threshold of suspicion.
GET_NEXT_CALL_WARNING_THRESHOLD = 32
GET_NEXT_CALL_WARNING_MESSAGE = (
"An unusually high number of `Iterator.get_next()` calls was detected. "
"This often indicates that `Iterator.get_next()` is being called inside "
"a training loop, which will cause gradual slowdown and eventual resource "
"exhaustion. If this is the case, restructure your code to call "
"`next_element = iterator.get_next()` once outside the loop, and use "
"`next_element` as the input to some computation that is invoked inside "
"the loop.")
@tf_export("data.Iterator")
class Iterator(object):
"""Represents the state of iterating through a `Dataset`."""
def __init__(self, iterator_resource, initializer, output_types,
output_shapes, output_classes):
"""Creates a new iterator from the given iterator resource.
Note: Most users will not call this initializer directly, and will
instead use `Dataset.make_initializable_iterator()` or
`Dataset.make_one_shot_iterator()`.
Args:
iterator_resource: A `tf.resource` scalar `tf.Tensor` representing the
iterator.
initializer: A `tf.Operation` that should be run to initialize this
iterator.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element of this dataset.
output_shapes: A nested structure of `tf.TensorShape` objects
corresponding to each component of an element of this dataset.
output_classes: A nested structure of Python `type` object corresponding
to each
component of an element of this iterator.
"""
self._iterator_resource = iterator_resource
self._initializer = initializer
self._output_classes = output_classes
self._output_types = output_types
self._output_shapes = output_shapes
self._string_handle = gen_dataset_ops.iterator_to_string_handle(
self._iterator_resource)
self._get_next_call_count = 0
@staticmethod
def from_structure(output_types,
output_shapes=None,
shared_name=None,
output_classes=None):
"""Creates a new, uninitialized `Iterator` with the given structure.
This iterator-constructing method can be used to create an iterator that
is reusable with many different datasets.
The returned iterator is not bound to a particular dataset, and it has
no `initializer`. To initialize the iterator, run the operation returned by
`Iterator.make_initializer(dataset)`.
The following is an example
```python
iterator = Iterator.from_structure(tf.int64, tf.TensorShape([]))
dataset_range = Dataset.range(10)
range_initializer = iterator.make_initializer(dataset_range)
dataset_evens = dataset_range.filter(lambda x: x % 2 == 0)
evens_initializer = iterator.make_initializer(dataset_evens)
# Define a model based on the iterator; in this example, the model_fn
# is expected to take scalar tf.int64 Tensors as input (see
# the definition of 'iterator' above).
prediction, loss = model_fn(iterator.get_next())
# Train for `num_epochs`, where for each epoch, we first iterate over
# dataset_range, and then iterate over dataset_evens.
for _ in range(num_epochs):
# Initialize the iterator to `dataset_range`
sess.run(range_initializer)
while True:
try:
pred, loss_val = sess.run([prediction, loss])
except tf.errors.OutOfRangeError:
break
# Initialize the iterator to `dataset_evens`
sess.run(evens_initializer)
while True:
try:
pred, loss_val = sess.run([prediction, loss])
except tf.errors.OutOfRangeError:
break
```
Args:
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element of this dataset.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
corresponding to each component of an element of this dataset. If
omitted, each component will have an unconstrainted shape.
shared_name: (Optional.) If non-empty, this iterator will be shared under
the given name across multiple sessions that share the same devices
(e.g. when using a remote server).
output_classes: (Optional.) A nested structure of Python `type` objects
corresponding to each component of an element of this iterator. If
omitted, each component is assumed to be of type `tf.Tensor`.
Returns:
An `Iterator`.
Raises:
TypeError: If the structures of `output_shapes` and `output_types` are
not the same.
"""
output_types = nest.map_structure(dtypes.as_dtype, output_types)
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
if output_classes is None:
output_classes = nest.map_structure(lambda _: ops.Tensor, output_types)
nest.assert_same_structure(output_types, output_shapes)
if shared_name is None:
shared_name = ""
iterator_resource = gen_dataset_ops.iterator(
container="",
shared_name=shared_name,
output_types=nest.flatten(
sparse.as_dense_types(output_types, output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_shapes(output_shapes, output_classes)))
return Iterator(iterator_resource, None, output_types, output_shapes,
output_classes)
@staticmethod
def from_string_handle(string_handle,
output_types,
output_shapes=None,
output_classes=None):
"""Creates a new, uninitialized `Iterator` based on the given handle.
This method allows you to define a "feedable" iterator where you can choose
between concrete iterators by feeding a value in a @{tf.Session.run} call.
In that case, `string_handle` would a @{tf.placeholder}, and you would feed
it with the value of @{tf.data.Iterator.string_handle} in each step.
For example, if you had two iterators that marked the current position in
a training dataset and a test dataset, you could choose which to use in
each step as follows:
```python
train_iterator = tf.data.Dataset(...).make_one_shot_iterator()
train_iterator_handle = sess.run(train_iterator.string_handle())
test_iterator = tf.data.Dataset(...).make_one_shot_iterator()
test_iterator_handle = sess.run(test_iterator.string_handle())
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
handle, train_iterator.output_types)
next_element = iterator.get_next()
loss = f(next_element)
train_loss = sess.run(loss, feed_dict={handle: train_iterator_handle})
test_loss = sess.run(loss, feed_dict={handle: test_iterator_handle})
```
Args:
string_handle: A scalar `tf.Tensor` of type `tf.string` that evaluates
to a handle produced by the `Iterator.string_handle()` method.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element of this dataset.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
corresponding to each component of an element of this dataset. If
omitted, each component will have an unconstrainted shape.
output_classes: (Optional.) A nested structure of Python `type` objects
corresponding to each component of an element of this iterator. If
omitted, each component is assumed to be of type `tf.Tensor`.
Returns:
An `Iterator`.
"""
output_types = nest.map_structure(dtypes.as_dtype, output_types)
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
if output_classes is None:
output_classes = nest.map_structure(lambda _: ops.Tensor, output_types)
nest.assert_same_structure(output_types, output_shapes)
string_handle = ops.convert_to_tensor(string_handle, dtype=dtypes.string)
iterator_resource = gen_dataset_ops.iterator_from_string_handle(
string_handle,
output_types=nest.flatten(
sparse.as_dense_types(output_types, output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_shapes(output_shapes, output_classes)))
return Iterator(iterator_resource, None, output_types, output_shapes,
output_classes)
@property
def initializer(self):
"""A `tf.Operation` that should be run to initialize this iterator.
Returns:
A `tf.Operation` that should be run to initialize this iterator
Raises:
ValueError: If this iterator initializes itself automatically.
"""
if self._initializer is not None:
return self._initializer
else:
# TODO(mrry): Consider whether one-shot iterators should have
# initializers that simply reset their state to the beginning.
raise ValueError("Iterator does not have an initializer.")
def make_initializer(self, dataset, name=None):
"""Returns a `tf.Operation` that initializes this iterator on `dataset`.
Args:
dataset: A `Dataset` with compatible structure to this iterator.
name: (Optional.) A name for the created operation.
Returns:
A `tf.Operation` that can be run to initialize this iterator on the given
`dataset`.
Raises:
TypeError: If `dataset` and this iterator do not have a compatible
element structure.
"""
with ops.name_scope(name, "make_initializer") as name:
nest.assert_same_structure(self._output_types, dataset.output_types)
nest.assert_same_structure(self._output_shapes, dataset.output_shapes)
for iterator_class, dataset_class in zip(
nest.flatten(self._output_classes),
nest.flatten(dataset.output_classes)):
if iterator_class is not dataset_class:
raise TypeError(
"Expected output classes %r but got dataset with output class %r."
% (self._output_classes, dataset.output_classes))
for iterator_dtype, dataset_dtype in zip(
nest.flatten(self._output_types), nest.flatten(dataset.output_types)):
if iterator_dtype != dataset_dtype:
raise TypeError(
"Expected output types %r but got dataset with output types %r." %
(self._output_types, dataset.output_types))
for iterator_shape, dataset_shape in zip(
nest.flatten(self._output_shapes), nest.flatten(
dataset.output_shapes)):
if not iterator_shape.is_compatible_with(dataset_shape):
raise TypeError("Expected output shapes compatible with %r but got "
"dataset with output shapes %r." %
(self._output_shapes, dataset.output_shapes))
with ops.colocate_with(self._iterator_resource):
return gen_dataset_ops.make_iterator(
dataset._as_variant_tensor(), self._iterator_resource, name=name) # pylint: disable=protected-access
def get_next(self, name=None):
"""Returns a nested structure of `tf.Tensor`s representing the next element.
In graph mode, you should typically call this method *once* and use its
result as the input to another computation. A typical loop will then call
@{tf.Session.run} on the result of that computation. The loop will terminate
when the `Iterator.get_next()` operation raises
@{tf.errors.OutOfRangeError}. The following skeleton shows how to use
this method when building a training loop:
```python
dataset = ... # A `tf.data.Dataset` object.
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
# Build a TensorFlow graph that does something with each element.
loss = model_function(next_element)
optimizer = ... # A `tf.train.Optimizer` object.
train_op = optimizer.minimize(loss)
with tf.Session() as sess:
try:
while True:
sess.run(train_op)
except tf.errors.OutOfRangeError:
pass
```
NOTE: It is legitimate to call `Iterator.get_next()` multiple times, e.g.
when you are distributing different elements to multiple devices in a single
step. However, a common pitfall arises when users call `Iterator.get_next()`
in each iteration of their training loop. `Iterator.get_next()` adds ops to
the graph, and executing each op allocates resources (including threads); as
a consequence, invoking it in every iteration of a training loop causes
slowdown and eventual resource exhaustion. To guard against this outcome, we
log a warning when the number of uses crosses a fixed threshold of
suspiciousness.
Args:
name: (Optional.) A name for the created operation.
Returns:
A nested structure of `tf.Tensor` objects.
"""
self._get_next_call_count += 1
if self._get_next_call_count > GET_NEXT_CALL_WARNING_THRESHOLD:
warnings.warn(GET_NEXT_CALL_WARNING_MESSAGE)
return sparse.deserialize_sparse_tensors(
nest.pack_sequence_as(self._output_types,
gen_dataset_ops.iterator_get_next(
self._iterator_resource,
output_types=nest.flatten(
sparse.as_dense_types(
self._output_types,
self._output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_shapes(
self._output_shapes,
self._output_classes)),
name=name)), self._output_types,
self._output_shapes, self._output_classes)
def string_handle(self, name=None):
"""Returns a string-valued `tf.Tensor` that represents this iterator.
Args: | [
" name: (Optional.) A name for the created operation."
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
"""Support for the Philips Hue sensors as a platform."""
import asyncio
from datetime import timedelta
import logging
from time import monotonic
import async_timeout
from homeassistant.components import hue
from homeassistant.exceptions import NoEntitySpecifiedError
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
CURRENT_SENSORS = 'current_sensors'
SENSOR_MANAGER_FORMAT = '{}_sensor_manager'
_LOGGER = logging.getLogger(__name__)
def _device_id(aiohue_sensor):
# Work out the shared device ID, as described below
device_id = aiohue_sensor.uniqueid
if device_id and len(device_id) > 23:
device_id = device_id[:23]
return device_id
async def async_setup_entry(hass, config_entry, async_add_entities,
binary=False):
"""Set up the Hue sensors from a config entry."""
bridge = hass.data[hue.DOMAIN][config_entry.data['host']]
hass.data[hue.DOMAIN].setdefault(CURRENT_SENSORS, {})
sm_key = SENSOR_MANAGER_FORMAT.format(config_entry.data['host'])
manager = hass.data[hue.DOMAIN].get(sm_key)
if manager is None:
manager = SensorManager(hass, bridge)
hass.data[hue.DOMAIN][sm_key] = manager
manager.register_component(binary, async_add_entities)
await manager.start()
class SensorManager:
"""Class that handles registering and updating Hue sensor entities.
Intended to be a singleton.
"""
SCAN_INTERVAL = timedelta(seconds=5)
sensor_config_map = {}
def __init__(self, hass, bridge):
"""Initialize the sensor manager."""
import aiohue
from .binary_sensor import HuePresence, PRESENCE_NAME_FORMAT
from .sensor import (
HueLightLevel, HueTemperature, LIGHT_LEVEL_NAME_FORMAT,
TEMPERATURE_NAME_FORMAT)
self.hass = hass
self.bridge = bridge
self._component_add_entities = {}
self._started = False
self.sensor_config_map.update({
aiohue.sensors.TYPE_ZLL_LIGHTLEVEL: {
"binary": False,
"name_format": LIGHT_LEVEL_NAME_FORMAT,
"class": HueLightLevel,
},
aiohue.sensors.TYPE_ZLL_TEMPERATURE: {
"binary": False,
"name_format": TEMPERATURE_NAME_FORMAT,
"class": HueTemperature,
},
aiohue.sensors.TYPE_ZLL_PRESENCE: {
"binary": True,
"name_format": PRESENCE_NAME_FORMAT,
"class": HuePresence,
},
})
def register_component(self, binary, async_add_entities):
"""Register async_add_entities methods for components."""
self._component_add_entities[binary] = async_add_entities
async def start(self):
"""Start updating sensors from the bridge on a schedule."""
# but only if it's not already started, and when we've got both
# async_add_entities methods
if self._started or len(self._component_add_entities) < 2:
return
self._started = True
_LOGGER.info('Starting sensor polling loop with %s second interval',
self.SCAN_INTERVAL.total_seconds())
async def async_update_bridge(now):
"""Will update sensors from the bridge."""
await self.async_update_items()
async_track_point_in_utc_time(
self.hass, async_update_bridge, utcnow() + self.SCAN_INTERVAL)
await async_update_bridge(None)
async def async_update_items(self):
"""Update sensors from the bridge."""
import aiohue
api = self.bridge.api.sensors
try:
start = monotonic() | [
" with async_timeout.timeout(4):"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from chaco.abstract_overlay import AbstractOverlay
from chaco.array_data_source import ArrayDataSource
from chaco.label import Label
from chaco.plot_label import PlotLabel
from enable.enable_traits import LineStyle
from kiva import FILL
from kiva.trait_defs.kiva_font_trait import KivaFont
from numpy import linspace, pi
from six.moves import zip
from traits.api import Float, Str, Instance
from uncertainties import std_dev, nominal_value
from pychron.core.helpers.formatting import floatfmt, calc_percent_error, format_percent_error
from pychron.core.stats import validate_mswd
from pychron.graph.error_ellipse_overlay import ErrorEllipseOverlay
from pychron.graph.error_envelope_overlay import ErrorEnvelopeOverlay
from pychron.graph.ml_label import tokenize
from pychron.pipeline.plot.overlays.isochron_inset import InverseIsochronPointsInset, InverseIsochronLineInset
from pychron.pipeline.plot.plotter.arar_figure import BaseArArFigure
from pychron.processing.analyses.analysis_group import StepHeatAnalysisGroup
from pychron.pychron_constants import PLUSMINUS, SIGMA, MSEM, SEM, SE, MSE
class MLTextLabel(Label):
def draw(self, gc):
""" Draws the label.
This method assumes the graphics context has been translated to the
correct position such that the origin is at the lower left-hand corner
of this text label's box.
"""
# Make sure `max_width` is respected
self._fit_text_to_max_width(gc)
# For this version we're not supporting rotated text.
self._calc_line_positions(gc)
with gc:
bb_width, bb_height = self.get_bounding_box(gc)
# Rotate label about center of bounding box
width, height = self._bounding_box
gc.translate_ctm(bb_width / 2.0, bb_height / 2.0)
gc.rotate_ctm(pi / 180.0 * self.rotate_angle)
gc.translate_ctm(-width / 2.0, -height / 2.0)
# Draw border and fill background
if self.bgcolor != "transparent":
gc.set_fill_color(self.bgcolor_)
gc.draw_rect((0, 0, width, height), FILL)
if self.border_visible and self.border_width > 0:
gc.set_stroke_color(self.border_color_)
gc.set_line_width(self.border_width)
border_offset = (self.border_width - 1) / 2.0
gc.rect(border_offset, border_offset,
width - 2 * border_offset, height - 2 * border_offset)
gc.stroke_path()
gc.set_fill_color(self.color_)
gc.set_stroke_color(self.color_)
gc.set_font(self.font)
if self.font.size <= 8.0:
gc.set_antialias(0)
else:
gc.set_antialias(1)
lines = self.text.split("\n")
if self.border_visible:
gc.translate_ctm(self.border_width, self.border_width)
# width, height = self.get_width_height(gc)
for i, line in enumerate(lines):
if line == "":
continue
x_offset = round(self._line_xpos[i])
y_offset = round(self._line_ypos[i])
with gc:
gc.translate_ctm(x_offset, y_offset)
self._draw_line(gc, line)
def _draw_line(self, gc, txt):
def gen():
offset = 0
for ti in tokenize(txt):
if ti == 'sup':
offset = 1
elif ti == 'sub':
offset = -1
elif ti in ('/sup', '/sub'):
offset = 0
else:
yield offset, ti
ofont = self.font
sfont = self.font.copy()
sfont.size = int(sfont.size * 0.80)
suph = int(ofont.size * 0.4)
subh = -int(ofont.size * 0.3)
x = 0
for offset, text in gen():
with gc:
if offset == 1:
gc.translate_ctm(0, suph)
gc.set_font(sfont)
elif offset == -1:
gc.set_font(sfont)
gc.translate_ctm(0, subh)
else:
gc.set_font(ofont)
w, h, _, _ = gc.get_full_text_extent(text)
gc.set_text_position(x, 0)
gc.show_text(text)
x += w
class OffsetPlotLabel(PlotLabel):
offset = None
_label = Instance(MLTextLabel, args=())
def overlay(self, component, gc, view_bounds=None, mode="normal"):
with gc:
if self.offset:
gc.translate_ctm(*self.offset)
super(OffsetPlotLabel, self).overlay(component, gc, view_bounds, mode)
class AtmInterceptOverlay(AbstractOverlay):
line_width = Float(1.5)
font = KivaFont("modern 10")
line_style = LineStyle('dash')
label = Str
value = Float
def overlay(self, component, gc, view_bounds=None, mode="normal"):
x, y = component.map_screen((0, self.value))
xo = component.x
if x < xo:
x = xo + 5
with gc:
txt = self.label
gc.set_font(self.font)
w, h = gc.get_full_text_extent(txt)[:2]
gc.clip_to_rect(component.x - w - 5, component.y, component.width, component.height)
gc.set_line_width(self.line_width)
gc.set_line_dash(self.line_style_)
gc.move_to(xo, y)
gc.line_to(x, y)
gc.draw_path()
gc.set_text_position(xo - w - 2, y)
gc.show_text(txt)
class Isochron(BaseArArFigure):
pass
class InverseIsochron(Isochron):
_plot_label = None
xpad = None
_analysis_group_klass = StepHeatAnalysisGroup
def post_make(self):
g = self.graph
for i, p in enumerate(g.plots):
l, h = self.ymis[i], self.ymas[i]
g.set_y_limits(max(0, l), h, pad='0.1', pad_style='upper', plotid=i)
g.set_x_limits(0, self.xma, pad='0.1')
self._fix_log_axes()
def plot(self, plots, legend=None):
"""
plot data on plots
"""
graph = self.graph
if self.options.omit_non_plateau:
self.analysis_group.do_omit_non_plateau()
for pid, (plotobj, po) in enumerate(zip(graph.plots, plots)):
getattr(self, '_plot_{}'.format(po.plot_name))(po, plotobj, pid)
# ===============================================================================
# plotters
# ===============================================================================
def _plot_aux(self, title, vk, po, pid):
ys, es = self._get_aux_plot_data(vk, po.scalar)
self._add_aux_plot(ys, title, vk, pid)
def _add_plot(self, xs, ys, es, plotid, value_scale='linear'):
pass
def _plot_inverse_isochron(self, po, plot, pid):
opt = self.options
self.analysis_group.isochron_age_error_kind = opt.error_calc_method
self.analysis_group.isochron_method = opt.regressor_kind
_, _, reg = self.analysis_group.get_isochron_data(exclude_non_plateau=opt.exclude_non_plateau)
graph = self.graph
xtitle = '<sup>39</sup>Ar/<sup>40</sup>Ar'
ytitle = '<sup>36</sup>Ar/<sup>40</sup>Ar'
# self._set_ml_title(ytitle, pid, 'y')
# self._set_ml_title(xtitle, pid, 'x')
graph.set_y_title(ytitle, plotid=pid)
graph.set_x_title(xtitle, plotid=pid)
p = graph.plots[pid]
p.y_axis.title_spacing = 50
graph.set_grid_traits(visible=False)
graph.set_grid_traits(visible=False, grid='y')
group = opt.get_group(self.group_id)
color = group.color
marker = opt.marker
marker_size = opt.marker_size
scatter, _p = graph.new_series(reg.xs, reg.ys,
xerror=ArrayDataSource(data=reg.xserr),
yerror=ArrayDataSource(data=reg.yserr),
type='scatter',
marker=marker,
selection_marker=marker,
selection_marker_size=marker_size,
bind_id=self.group_id,
color=color,
marker_size=marker_size)
graph.set_series_label('data{}'.format(self.group_id))
eo = ErrorEllipseOverlay(component=scatter,
reg=reg,
border_color=color,
fill=opt.fill_ellipses,
kind=opt.ellipse_kind)
scatter.overlays.append(eo)
ma = max(reg.xs)
self.xma = max(self.xma, ma)
self.xmi = min(self.xmi, min(reg.xs))
mi = 0
rxs = linspace(mi, ma * 1.1)
rys = reg.predict(rxs)
graph.set_x_limits(min_=mi, max_=ma, pad='0.1')
l, _ = graph.new_series(rxs, rys, color=color)
graph.set_series_label('fit{}'.format(self.group_id))
l.index.set_data(rxs)
l.value.set_data(rys)
yma, ymi = max(rys), min(rys)
try:
self.ymis[pid] = min(self.ymis[pid], ymi)
self.ymas[pid] = max(self.ymas[pid], yma)
except IndexError:
self.ymis.append(ymi)
self.ymas.append(yma)
if opt.include_error_envelope:
lci, uci = reg.calculate_error_envelope(l.index.get_data())
ee = ErrorEnvelopeOverlay(component=l,
upper=uci, lower=lci,
line_color=color)
l.underlays.append(ee)
l.error_envelope = ee
if opt.display_inset:
self._add_inset(plot, reg)
if self.group_id == 0:
if opt.show_nominal_intercept:
self._add_atm_overlay(plot)
graph.add_vertical_rule(0, color='black')
if opt.show_results_info:
self._add_results_info(plot, text_color=color)
if opt.show_info:
self._add_info(plot)
if opt.show_labels:
self._add_point_labels(scatter)
def ad(i, x, y, ai):
a = ai.isotopes['Ar39'].get_interference_corrected_value()
b = ai.isotopes['Ar40'].get_interference_corrected_value()
r = a / b
v = nominal_value(r)
e = std_dev(r)
try:
pe = '({:0.2f}%)'.format(e / v * 100)
except ZeroDivisionError:
pe = '(Inf%)'
return u'39Ar/40Ar= {} {}{} {}'.format(floatfmt(v, n=6), PLUSMINUS, floatfmt(e, n=7), pe)
self._add_scatter_inspector(scatter, additional_info=ad)
p.index_mapper.on_trait_change(self.update_index_mapper, 'updated')
# sel = self._get_omitted_by_tag(self.analyses)
# self._rebuild_iso(sel)
self.replot()
# ===============================================================================
# overlays
# ===============================================================================
def _add_info(self, plot):
ts = []
if self.options.show_info:
m = self.options.regressor_kind
s = self.options.nsigma
es = self.options.ellipse_kind
ts.append(u'{} {}{}{} Data: {}{}'.format(m, PLUSMINUS, s, SIGMA, PLUSMINUS, es))
if self.options.show_error_type_info:
ts.append('Error Type: {}'.format(self.options.error_calc_method))
if ts:
self._add_info_label(plot, ts, font=self.options.info_font)
def _add_inset(self, plot, reg):
opt = self.options
group = opt.get_group(self.group_id)
insetp = InverseIsochronPointsInset(reg.xs, reg.ys,
marker_size=opt.inset_marker_size,
line_width=0,
nominal_intercept=opt.inominal_intercept_value,
label_font=opt.inset_label_font)
if opt.inset_show_error_ellipse:
eo = ErrorEllipseOverlay(component=insetp,
reg=reg,
border_color=group.color,
fill=opt.fill_ellipses,
kind=opt.ellipse_kind)
insetp.overlays.append(eo)
if self.group_id > 0:
insetp.y_axis.visible = False
insetp.x_axis.visible = False
xintercept = reg.x_intercept * 1.1
yintercept = reg.predict(0)
m, _ = insetp.index.get_bounds()
lx, hx = opt.inset_x_bounds
if not lx and not hx:
lx = -0.1 * (xintercept - m)
hx = xintercept
elif lx and lx > hx:
hx = xintercept
xs = linspace(lx, hx, 20)
ys = reg.predict(xs)
xtitle, ytitle = '', ''
if opt.inset_show_axes_titles:
xtitle = '<sup>39</sup>Ar/<sup>40</sup>Ar'
ytitle = '<sup>36</sup>Ar/<sup>40</sup>Ar'
insetl = InverseIsochronLineInset(xs, ys,
xtitle=xtitle,
ytitle=ytitle,
label_font=opt.inset_label_font)
plot.overlays.append(insetl)
plot.overlays.append(insetp)
ly, hy = opt.inset_y_bounds
if not ly and not hy:
ly = 0
hy = max(1.1 * opt.inominal_intercept_value, yintercept * 1.1)
elif hy < ly:
hy = max(1.1 * opt.inominal_intercept_value, yintercept * 1.1)
for inset in plot.overlays:
if isinstance(inset, (InverseIsochronPointsInset, InverseIsochronLineInset)):
inset.location = opt.inset_location
inset.width = opt.inset_width
inset.height = opt.inset_height
inset.color = group.color
inset.index_range.low = lx
inset.index_range.high = hx
inset.value_range.low = ly
inset.value_range.high = hy
plot.request_redraw()
def _add_atm_overlay(self, plot):
plot.overlays.append(AtmInterceptOverlay(component=plot,
label=self.options.nominal_intercept_label,
value=self.options.inominal_intercept_value))
def _add_results_info(self, plot, label=None, text_color='black'):
ag = self.analysis_group
age = ag.isochron_age
a = ag.isochron_3640
n = ag.nanalyses
mswd = ag.isochron_regressor.mswd
intercept, err = nominal_value(a), std_dev(a)
opt = self.options
try:
inv_intercept = intercept ** -1
p = calc_percent_error(intercept, err, scale=1)
err = inv_intercept * p * opt.nsigma
mse = err * mswd ** 0.5
sf = opt.yintercept_sig_figs
v, e, p, mse = floatfmt(inv_intercept, n=sf, s=3), floatfmt(err, n=sf, s=3), \ | [
" floatfmt(p * 100, n=2), floatfmt(mse, s=3)"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# -*- coding: utf-8 -*-
# Copyright 2014-2018 Rumma & Ko Ltd
# License: BSD, see LICENSE for more details.
"""
Defines the :rst:dir:`sigal_image` directive.
.. rst:directive:: sigal_image
.. _picsel: https://github.com/lsaffre/picsel
.. _Shotwell: https://en.wikipedia.org/wiki/Shotwell_%28software%29
.. _digiKam: https://www.digikam.org/
.. _Sigal: http://sigal.saimon.org/en/latest/
This creates a bridge between a photo collection managed with
Shotwell_ or digiKam_ and a blog generated with Sphinx. All photos
remain in the single central file tree managed by Shotwell_ or
digiKam_. From within Shotwell_ or digiKam_ you use a tag "blog" to
mark all photos that are to be available for your Sphinx blog. Then
you use picsel_ to extract those images to a separate directory. This
tree serves as input for Sigal_ which will generate a static html
gallery. An example of a Sigal gallery is `here
<http://sigal.saffre-rumma.net/>`__. The :rst:dir:`sigal_image`
directive was the last missing part of this publishing bridge: it
allows you to integrate your pictures into blog entries.
Usage::
.. sigal_image:: partial/path/to/photo.jpg[|title_or_options]
For example, if `sigal_base_url` in your :xfile:`conf.py` is set to
``"http://sigal.saffre-rumma.net"``, the following directive in your
rst source file::
.. sigal_image:: 2014/04/10/img_6617.jpg
will insert the following rst code::
.. raw:: html
<a href="http://sigal.saffre-rumma.net/2014/04/10/img_6617.jpg">
<img
src="http://sigal.saffre-rumma.net/2014/04/10/thumbnails/img_6617.jpg"/>
</a>
The file name can contain **formatting instructions** inspired by
`Wikipedia pictures
<https://en.wikipedia.org/wiki/Wikipedia:Picture_tutorial>`_ which
uses a variable number of pipe characters. For example:
>>> print(line2html("foo.jpg"))
<a href="http://example.com//foo.jpg" data-lightbox="image-1" data-title="foo.jpg"/><img src="http://example.com//thumbnails/foo.jpg" style="padding:4px; width:280px;" title="foo.jpg"/></a>
>>> print(line2html("foo.jpg|This is a nice picture"))
<a href="http://example.com//foo.jpg" data-lightbox="image-1" data-title="This is a nice picture"/><img src="http://example.com//thumbnails/foo.jpg" style="padding:4px; width:280px;" title="This is a nice picture"/></a>
>>> print(line2html("foo.jpg|thumb|This is a nice picture"))
<a href="http://example.com//foo.jpg" data-lightbox="image-1" data-title="This is a nice picture"/><img src="http://example.com//thumbnails/foo.jpg" style="padding:4px; float:right; width:280px;" title="This is a nice picture"/></a>
>>> print(line2html("foo.jpg|thumb|left|This is a nice picture"))
<a href="http://example.com//foo.jpg" data-lightbox="image-1" data-title="This is a nice picture"/><img src="http://example.com//thumbnails/foo.jpg" style="padding:4px; float:left;; width:280px;" title="This is a nice picture"/></a>
The generated HTML also includes attributes for `lightbox
<http://lokeshdhakar.com/projects/lightbox2/>`_. In order to activate
this feature you must add the content of the lighbox :file:`dist`
directory somewhere to your web server and then change your
`layout.html` template to something like this::
{%- block extrahead %}
{{ super() }}
<script src="/data/lightbox/js/lightbox-plus-jquery.min.js"></script>
<link href="/data/lightbox/css/lightbox.css" rel="stylesheet" />
{% endblock %}
"""
import os
from atelier.sphinxconf.insert_input import InsertInputDirective
TEMPLATE1 = """
.. raw:: html
<a href="%(target)s"><img src="%(src)s" style="padding:4px"/></a>
"""
#TEMPLATE = """<a href="%(target)s" style="%(style)s" %(class)s data-lightbox="image-1" data-title="%(caption)s"/><img src="%(src)s" style="padding:4px" title="%(caption)s"/></a>"""
TEMPLATE = """<a href="%(target)s" %(class)s data-lightbox="image-1" data-title="%(caption)s"/><img src="%(src)s" style="%(style)s" title="%(caption)s"/></a>"""
class Format(object):
@classmethod
def update_context(self, caption, tplkw):
tplkw.update(caption=caption)
tplkw.update(style="padding:4px; width:280px;")
class Thumb(Format):
@classmethod
def update_context(self, caption, tplkw):
chunks = caption.split('|')
if len(chunks) == 1:
tplkw['style'] = "padding:4px; float:right; width:280px;"
elif len(chunks) == 2:
align, caption = chunks
if not align in ("right", "left", "center"):
raise Exception("Invalid alignment '{0}'".format(align))
tplkw['style'] = "padding:4px; float:{0};; width:280px;".format(align)
else:
raise Exception("Impossible")
tplkw.update(caption=caption)
class Wide(Format):
@classmethod
def update_context(self, caption, tplkw):
chunks = caption.split('|')
if len(chunks) == 1:
tplkw['style'] = "padding:4px; width:100%;"
else:
raise Exception("Impossible")
tplkw.update(caption=caption)
FORMATS = dict()
FORMATS[None] = Format()
FORMATS['thumb'] = Thumb()
FORMATS['wide'] = Wide()
def buildurl(*parts):
return 'http://example.com/' + '/'.join(parts)
def line2html(name, buildurl=buildurl):
name = name.strip()
if not name:
return ''
kw = dict() # style="padding:4px")
kw['class'] = ''
kw['style'] = "padding:4px; width:280px;"
if True: # new format using only | as separator
caption = name
fmt = FORMATS[None]
chunks = name.split('|', 1)
if len(chunks) == 2:
name, caption = chunks
chunks = caption.split('|', 1)
if len(chunks) == 2:
fmtname, caption = chunks
fmt = FORMATS[fmtname]
fmt.update_context(caption, kw)
if ' ' in name:
raise Exception("Invalid filename. Spaces not allowed.")
else:
chunks = name.split(None, 1)
if len(chunks) == 1:
kw.update(caption='')
elif len(chunks) == 2:
name, caption = chunks
chunks = caption.split('|', 1)
if len(chunks) == 1:
fmt = FORMATS[None]
elif len(chunks) == 2:
fmtname, caption = chunks
fmt = FORMATS[fmtname]
else:
raise Exception("Impossible")
fmt.update_context(caption, kw)
else:
raise Exception("FILENAME <whitespace> DESC %s" % chunks)
head, tail = os.path.split(name)
kw.update(target=buildurl(head, tail))
kw.update(src=buildurl(head, 'thumbnails', tail))
return TEMPLATE % kw
class SigalImage(InsertInputDirective):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
# option_spec = {
# 'style': directives.unchanged,
# 'class': directives.unchanged,
# }
def get_rst(self):
env = self.state.document.settings.env
base_url = env.config.sigal_base_url
def buildurl(*parts):
return base_url + '/' + '/'.join(parts)
s = ''
for name in self.content:
s += line2html(name, buildurl)
if s:
s = "\n\n.. raw:: html\n\n {0}\n\n".format(s)
return s
def get_headers(self):
return ['title', 'author', 'date']
def format_entry(self, e):
cells = []
# text = ''.join([unicode(c) for c in e.title.children])
# cells.append(":doc:`%s <%s>`" % (text, e.docname))
cells.append(":doc:`%s`" % e.docname)
cells.append(str(e.meta.get('author', '')))
cells.append(str(e.meta.get('date', '')))
return cells
| [
"def setup(app):"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# tcpconnlat Trace TCP active connection latency (connect).
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: tcpconnlat [-h] [-t] [-p PID] [-4 | -6]
#
# This uses dynamic tracing of kernel functions, and will need to be updated
# to match kernel changes.
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 19-Feb-2016 Brendan Gregg Created this.
# 15-Mar-2021 Suresh Kumar Added LPORT option
from __future__ import print_function
from bcc import BPF
from socket import inet_ntop, AF_INET, AF_INET6
from struct import pack
import argparse
# arg validation
def positive_float(val):
try:
ival = float(val)
except ValueError:
raise argparse.ArgumentTypeError("must be a float")
if ival < 0:
raise argparse.ArgumentTypeError("must be positive")
return ival
# arguments
examples = """examples:
./tcpconnlat # trace all TCP connect()s
./tcpconnlat 1 # trace connection latency slower than 1 ms
./tcpconnlat 0.1 # trace connection latency slower than 100 us
./tcpconnlat -t # include timestamps
./tcpconnlat -p 181 # only trace PID 181
./tcpconnlat -L # include LPORT while printing outputs
./tcpconnlat -4 # trace IPv4 family only
./tcpconnlat -6 # trace IPv6 family only
"""
parser = argparse.ArgumentParser(
description="Trace TCP connects and show connection latency",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("-L", "--lport", action="store_true",
help="include LPORT on output")
group = parser.add_mutually_exclusive_group()
group.add_argument("-4", "--ipv4", action="store_true",
help="trace IPv4 family only")
group.add_argument("-6", "--ipv6", action="store_true",
help="trace IPv6 family only")
parser.add_argument("duration_ms", nargs="?", default=0,
type=positive_float,
help="minimum duration to trace (ms)")
parser.add_argument("-v", "--verbose", action="store_true",
help="print the BPF program for debugging purposes")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
if args.duration_ms:
# support fractions but round to nearest microsecond
duration_us = int(args.duration_ms * 1000)
else:
duration_us = 0 # default is show all
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <bcc/proto.h>
struct info_t {
u64 ts;
u32 pid;
char task[TASK_COMM_LEN];
};
BPF_HASH(start, struct sock *, struct info_t);
// separate data structs for ipv4 and ipv6
struct ipv4_data_t {
u64 ts_us;
u32 pid;
u32 saddr;
u32 daddr;
u64 ip;
u16 lport;
u16 dport;
u64 delta_us;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv4_events);
struct ipv6_data_t {
u64 ts_us;
u32 pid;
unsigned __int128 saddr;
unsigned __int128 daddr;
u64 ip;
u16 lport;
u16 dport;
u64 delta_us;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv6_events);
int trace_connect(struct pt_regs *ctx, struct sock *sk)
{
u32 pid = bpf_get_current_pid_tgid() >> 32;
FILTER
struct info_t info = {.pid = pid};
info.ts = bpf_ktime_get_ns();
bpf_get_current_comm(&info.task, sizeof(info.task));
start.update(&sk, &info);
return 0;
};
// See tcp_v4_do_rcv() and tcp_v6_do_rcv(). So TCP_ESTBALISHED and TCP_LISTEN
// are fast path and processed elsewhere, and leftovers are processed by
// tcp_rcv_state_process(). We can trace this for handshake completion.
// This should all be switched to static tracepoints when available.
int trace_tcp_rcv_state_process(struct pt_regs *ctx, struct sock *skp)
{
// will be in TCP_SYN_SENT for handshake
if (skp->__sk_common.skc_state != TCP_SYN_SENT)
return 0;
// check start and calculate delta
struct info_t *infop = start.lookup(&skp);
if (infop == 0) {
return 0; // missed entry or filtered
}
u64 ts = infop->ts;
u64 now = bpf_ktime_get_ns();
u64 delta_us = (now - ts) / 1000ul;
#ifdef MIN_LATENCY | [
" if ( delta_us < DURATION_US ) {"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Simple speech recognition to spot a limited number of keywords.
This is a self-contained example script that will train a very basic audio
recognition model in TensorFlow. It can download the necessary training data,
and runs with reasonable defaults to train within a few hours even only using a
CPU. For more information see http://tensorflow.org/tutorials/audio_recognition.
It is intended as an introduction to using neural networks for audio
recognition, and is not a full speech recognition system. For more advanced
speech systems, I recommend looking into Kaldi. This network uses a keyword
detection style to spot discrete words from a small vocabulary, consisting of
"yes", "no", "up", "down", "left", "right", "on", "off", "stop", and "go".
To run the training process, use:
bazel run tensorflow/examples/speech_commands:train
This will write out checkpoints to /tmp/speech_commands_train/, and will
download over 1GB of open source training data, so you'll need enough free space
and a good internet connection. The default data is a collection of thousands of
one-second .wav files, each containing one spoken word. This data set is
collected from https://aiyprojects.withgoogle.com/open_speech_recording, please
consider contributing to help improve this and other models!
As training progresses, it will print out its accuracy metrics, which should
rise above 90% by the end. Once it's complete, you can run the freeze script to
get a binary GraphDef that you can easily deploy on mobile applications.
If you want to train on your own data, you'll need to create .wavs with your
recordings, all at a consistent length, and then arrange them into subfolders
organized by label. For example, here's a possible file structure:
my_wavs >
up >
audio_0.wav
audio_1.wav
down >
audio_2.wav
audio_3.wav
other>
audio_4.wav
audio_5.wav
You'll also need to tell the script what labels to look for, using the
`--wanted_words` argument. In this case, 'up,down' might be what you want, and
the audio in the 'other' folder would be used to train an 'unknown' category.
To pull this all together, you'd run:
bazel run tensorflow/examples/speech_commands:train -- \
--data_dir=my_wavs --wanted_words=up,down
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import numpy as np
import tensorflow as tf
import input_data
import models
from tensorflow.python.platform import gfile
FLAGS = None
def main(_):
# We want to see all the logging messages for this tutorial.
tf.logging.set_verbosity(tf.logging.INFO)
# Start a new TensorFlow session.
sess = tf.InteractiveSession()
# Begin by making sure we have the training data we need. If you already have
# training data of your own, use `--data_url= ` on the command line to avoid
# downloading.
model_settings = models.prepare_model_settings(
len(input_data.prepare_words_list(FLAGS.wanted_words.split(','))),
FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)
audio_processor = input_data.AudioProcessor(
FLAGS.data_url, FLAGS.data_dir, FLAGS.silence_percentage,
FLAGS.unknown_percentage,
FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
FLAGS.testing_percentage, model_settings)
fingerprint_size = model_settings['fingerprint_size']
label_count = model_settings['label_count']
time_shift_samples = int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000)
# Figure out the learning rates for each training phase. Since it's often
# effective to have high learning rates at the start of training, followed by
# lower levels towards the end, the number of steps and learning rates can be
# specified as comma-separated lists to define the rate at each stage. For
# example --how_many_training_steps=10000,3000 --learning_rate=0.001,0.0001
# will run 13,000 training loops in total, with a rate of 0.001 for the first
# 10,000, and 0.0001 for the final 3,000.
training_steps_list = map(int, FLAGS.how_many_training_steps.split(','))
learning_rates_list = map(float, FLAGS.learning_rate.split(','))
if len(training_steps_list) != len(learning_rates_list):
raise Exception(
'--how_many_training_steps and --learning_rate must be equal length '
'lists, but are %d and %d long instead' % (len(training_steps_list),
len(learning_rates_list)))
fingerprint_input = tf.placeholder(
tf.float32, [None, fingerprint_size], name='fingerprint_input')
logits, dropout_prob = models.create_model(
fingerprint_input,
model_settings,
FLAGS.model_architecture,
is_training=True)
# Define loss and optimizer
ground_truth_input = tf.placeholder(
tf.float32, [None, label_count], name='groundtruth_input')
# Optionally we can add runtime checks to spot when NaNs or other symptoms of
# numerical errors start occurring during training.
control_dependencies = []
if FLAGS.check_nans:
checks = tf.add_check_numerics_ops()
control_dependencies = [checks]
# Create the back propagation and training evaluation machinery in the graph.
with tf.name_scope('cross_entropy'):
cross_entropy_mean = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits))
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'), tf.control_dependencies(control_dependencies):
learning_rate_input = tf.placeholder(
tf.float32, [], name='learning_rate_input')
train_step = tf.train.GradientDescentOptimizer(
learning_rate_input).minimize(cross_entropy_mean)
predicted_indices = tf.argmax(logits, 1)
expected_indices = tf.argmax(ground_truth_input, 1)
correct_prediction = tf.equal(predicted_indices, expected_indices)
confusion_matrix = tf.confusion_matrix(expected_indices, predicted_indices)
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
global_step = tf.contrib.framework.get_or_create_global_step()
increment_global_step = tf.assign(global_step, global_step + 1)
saver = tf.train.Saver(tf.global_variables())
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged_summaries = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')
tf.global_variables_initializer().run()
start_step = 1
if FLAGS.start_checkpoint: | [
" models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#!/usr/bin/env python
'''
MOSSE tracking sample
This sample implements correlation-based tracking approach, described in [1].
Usage:
mosse.py [--pause] [<video source>]
--pause - Start with playback paused at the first video frame.
Useful for tracking target selection.
--picam - Use this flag if using raspberry pi
Draw rectangles around objects with a mouse to track them.
Keys:
SPACE - pause video
c - clear targets
[1] David S. Bolme et al. "Visual Object Tracking using Adaptive Correlation Filters"
http://www.cs.colostate.edu/~bolme/publications/Bolme2010Tracking.pdf
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
import argparse
import imutils
import cv2
import time
import numpy as np
from imutils.video import VideoStream
from common import draw_str, RectSelector
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
def rnd_warp(a):
h, w = a.shape[:2]
T = np.zeros((2, 3))
coef = 0.2
ang = (np.random.rand()-0.5)*coef
c, s = np.cos(ang), np.sin(ang)
T[:2, :2] = [[c,-s], [s, c]]
T[:2, :2] += (np.random.rand(2, 2) - 0.5)*coef
c = (w/2, h/2)
T[:,2] = c - np.dot(T[:2, :2], c)
return cv2.warpAffine(a, T, (w, h), borderMode = cv2.BORDER_REFLECT)
def divSpec(A, B):
Ar, Ai = A[...,0], A[...,1]
Br, Bi = B[...,0], B[...,1]
C = (Ar+1j*Ai)/(Br+1j*Bi)
C = np.dstack([np.real(C), np.imag(C)]).copy()
return C
eps = 1e-5
class MOSSE:
def __init__(self, frame, rect):
x1, y1, x2, y2 = rect
w, h = map(cv2.getOptimalDFTSize, [x2-x1, y2-y1])
x1, y1 = (x1+x2-w)//2, (y1+y2-h)//2
self.pos = x, y = x1+0.5*(w-1), y1+0.5*(h-1)
self.size = w, h
img = cv2.getRectSubPix(frame, (w, h), (x, y))
self.win = cv2.createHanningWindow((w, h), cv2.CV_32F)
g = np.zeros((h, w), np.float32)
g[h//2, w//2] = 1
g = cv2.GaussianBlur(g, (-1, -1), 2.0)
g /= g.max()
self.G = cv2.dft(g, flags=cv2.DFT_COMPLEX_OUTPUT)
self.H1 = np.zeros_like(self.G)
self.H2 = np.zeros_like(self.G)
for i in xrange(128):
a = self.preprocess(rnd_warp(img))
A = cv2.dft(a, flags=cv2.DFT_COMPLEX_OUTPUT)
self.H1 += cv2.mulSpectrums(self.G, A, 0, conjB=True)
self.H2 += cv2.mulSpectrums( A, A, 0, conjB=True)
self.update_kernel()
self.update(frame)
def update(self, frame, rate = 0.125):
(x, y), (w, h) = self.pos, self.size
self.last_img = img = cv2.getRectSubPix(frame, (w, h), (x, y))
img = self.preprocess(img)
self.last_resp, (dx, dy), self.psr = self.correlate(img)
self.good = self.psr > 8.0
if not self.good:
return
self.pos = x+dx, y+dy
self.last_img = img = cv2.getRectSubPix(frame, (w, h), self.pos)
img = self.preprocess(img)
A = cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT)
H1 = cv2.mulSpectrums(self.G, A, 0, conjB=True)
H2 = cv2.mulSpectrums( A, A, 0, conjB=True)
self.H1 = self.H1 * (1.0-rate) + H1 * rate
self.H2 = self.H2 * (1.0-rate) + H2 * rate
self.update_kernel()
@property
def state_vis(self):
f = cv2.idft(self.H, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT )
h, w = f.shape
f = np.roll(f, -h//2, 0)
f = np.roll(f, -w//2, 1)
kernel = np.uint8( (f-f.min()) / f.ptp()*255 )
resp = self.last_resp
resp = np.uint8(np.clip(resp/resp.max(), 0, 1)*255)
vis = np.hstack([self.last_img, kernel, resp])
return vis
def draw_state(self, vis):
(x, y), (w, h) = self.pos, self.size
x1, y1, x2, y2 = int(x-0.5*w), int(y-0.5*h), int(x+0.5*w), int(y+0.5*h)
cv2.rectangle(vis, (x1, y1), (x2, y2), (0, 0, 255))
if self.good:
cv2.circle(vis, (int(x), int(y)), 2, (0, 0, 255), -1)
else:
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255))
cv2.line(vis, (x2, y1), (x1, y2), (0, 0, 255))
draw_str(vis, (x1, y2+16), 'PSR: %.2f' % self.psr)
def preprocess(self, img):
img = np.log(np.float32(img)+1.0)
img = (img-img.mean()) / (img.std()+eps)
return img*self.win
def correlate(self, img):
C = cv2.mulSpectrums(cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT), self.H, 0, conjB=True)
resp = cv2.idft(C, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
h, w = resp.shape
_, mval, _, (mx, my) = cv2.minMaxLoc(resp)
side_resp = resp.copy()
cv2.rectangle(side_resp, (mx-5, my-5), (mx+5, my+5), 0, -1)
smean, sstd = side_resp.mean(), side_resp.std()
psr = (mval-smean) / (sstd+eps)
return resp, (mx-w//2, my-h//2), psr
def update_kernel(self):
self.H = divSpec(self.H1, self.H2)
self.H[...,1] *= -1
class App:
def __init__(self, cap, paused = False):
self.cap = cap
self.frame = self.cap.read()
print(self.frame.shape)
cv2.imshow('frame', self.frame)
self.rect_sel = RectSelector('frame', self.onrect)
self.trackers = []
self.paused = paused
| [
" def onrect(self, rect):"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2006 Christopher Lenz <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <[email protected]>
from fnmatch import fnmatchcase
from itertools import groupby
import inspect
import os
import re
from StringIO import StringIO
from genshi.builder import tag
from genshi.core import Markup
from trac.core import *
from trac.resource import Resource, ResourceNotFound, get_resource_name, \
get_resource_summary, get_resource_url
from trac.util.datefmt import format_date, from_utimestamp, user_time
from trac.util.html import escape, find_element
from trac.util.presentation import separated
from trac.util.text import unquote, to_unicode
from trac.util.translation import _, dgettext, cleandoc_
from trac.wiki.api import IWikiMacroProvider, WikiSystem, parse_args
from trac.wiki.formatter import format_to_html, format_to_oneliner, \
extract_link, OutlineFormatter
class WikiMacroBase(Component):
"""Abstract base class for wiki macros."""
implements(IWikiMacroProvider)
abstract = True
# A gettext domain to translate the macro description
_domain = None
# A macro description
_description = None
def get_macros(self):
"""Yield the name of the macro based on the class name."""
name = self.__class__.__name__
if name.endswith('Macro'):
name = name[:-5]
yield name
def get_macro_description(self, name):
"""Return the subclass's gettext domain and macro description"""
domain, description = self._domain, self._description
if description:
return (domain, description) if domain else description
# For pre-0.12 compatibility
doc = inspect.getdoc(self.__class__)
return to_unicode(doc) if doc else ''
def parse_macro(self, parser, name, content):
raise NotImplementedError
def expand_macro(self, formatter, name, content):
# -- TODO: remove in 0.12
if hasattr(self, 'render_macro'):
self.log.warning('Executing pre-0.11 Wiki macro %s by provider %s'
% (name, self.__class__))
return self.render_macro(formatter.req, name, content)
# --
raise NotImplementedError
class TitleIndexMacro(WikiMacroBase):
_domain = 'messages'
_description = cleandoc_(
"""Insert an alphabetic list of all wiki pages into the output.
Accepts a prefix string as parameter: if provided, only pages with names
that start with the prefix are included in the resulting list. If this
parameter is omitted, all pages are listed.
If the prefix is specified, a second argument of value `hideprefix`
can be given as well, in order to remove that prefix from the output.
Alternate `format` and `depth` named parameters can be specified:
- `format=compact`: The pages are displayed as comma-separated links.
- `format=group`: The list of pages will be structured in groups
according to common prefix. This format also supports a `min=n`
argument, where `n` is the minimal number of pages for a group.
- `format=hierarchy`: The list of pages will be structured according
to the page name path hierarchy. This format also supports a `min=n`
argument, where higher `n` flatten the display hierarchy
- `depth=n`: limit the depth of the pages to list. If set to 0,
only toplevel pages will be shown, if set to 1, only immediate
children pages will be shown, etc. If not set, or set to -1,
all pages in the hierarchy will be shown.
- `include=page1:page*2`: include only pages that match an item in the
colon-separated list of pages. If the list is empty, or if no `include`
argument is given, include all pages.
- `exclude=page1:page*2`: exclude pages that match an item in the colon-
separated list of pages.
The `include` and `exclude` lists accept shell-style patterns.
""")
SPLIT_RE = re.compile(r"([/ 0-9.]+)")
def expand_macro(self, formatter, name, content):
args, kw = parse_args(content)
prefix = args[0].strip() if args else None
hideprefix = args and len(args) > 1 and args[1].strip() == 'hideprefix'
minsize = max(int(kw.get('min', 2)), 2)
depth = int(kw.get('depth', -1))
start = prefix.count('/') if prefix else 0
format = kw.get('format', '')
def parse_list(name):
return [inc.strip() for inc in kw.get(name, '').split(':')
if inc.strip()]
includes = parse_list('include') or ['*']
excludes = parse_list('exclude')
if hideprefix:
omitprefix = lambda page: page[len(prefix):]
else:
omitprefix = lambda page: page
| [
" wiki = formatter.wiki"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
import django
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
User = get_user_model()
import denorm
from denorm import denorms
import models
# Use all but denorms in FailingTriggers models by default
failingdenorms = denorms.alldenorms
denorms.alldenorms = [d for d in failingdenorms if d.model not in (models.FailingTriggersModelA, models.FailingTriggersModelB)]
class TestTriggers(TestCase):
def setUp(self):
denorms.drop_triggers()
def test_triggers(self):
"""Test potentially failing denorms.
"""
# save and restore alldenorms
# test will fail if it's raising an exception
alldenorms = denorms.alldenorms
denorms.alldenorms = failingdenorms
try:
denorms.install_triggers()
finally:
denorms.alldenorms = alldenorms
class TestCached(TestCase):
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
def tearDown(self):
models.CachedModelA.objects.all().delete()
models.CachedModelB.objects.all().delete()
def test_depends_related(self):
models.CachedModelB.objects.create(data='Hello')
b = models.CachedModelB.objects.all()[0]
self.assertEqual('Hello', b.data)
models.CachedModelA.objects.create(b=b)
a = models.CachedModelA.objects.all()[0]
self.assertEqual("HELLO", a.cached_data['upper'])
self.assertEqual("hello", a.cached_data['lower'])
b.data = 'World'
self.assertEqual("HELLO", a.cached_data['upper'])
self.assertEqual("hello", a.cached_data['lower'])
b.save()
a = models.CachedModelA.objects.all()[0]
self.assertEqual("WORLD", a.cached_data['upper'])
self.assertEqual("world", a.cached_data['lower'])
class TestAbstract(TestCase):
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
def test_abstract(self):
d1 = models.RealDenormModel.objects.create(text='onion')
self.assertEqual("Ham and onion", d1.ham)
self.assertEqual("Eggs and onion", d1.eggs)
class TestSkip(TestCase):
"""
Tests for the skip feature.
"""
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
post = models.SkipPost(text='Here be ponies.')
post.save()
self.post = post
# TODO: Enable and check!
# Unsure on how to test this behaviour. It results in an endless loop:
# update -> trigger -> update -> trigger -> ...
#
#def test_without_skip(self):
# # This results in an infinate loop on SQLite.
# comment = SkipCommentWithoutSkip(post=self.post, text='Oh really?')
# comment.save()
#
# denorm.flush()
# TODO: Check if an infinate loop happens and stop it.
def test_with_skip(self):
# This should not result in an endless loop.
comment = models.SkipCommentWithSkip(post=self.post, text='Oh really?')
comment.save()
denorm.flush()
def test_meta_skip(self):
"""Test a model with the attribute listed under denorm_always_skip."""
comment = models.SkipCommentWithAttributeSkip(post=self.post, text='Yup, and they have wings!')
comment.save()
denorm.flush()
class TestDenormalisation(TestCase):
"""
Tests for the denormalisation fields.
"""
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
self.testuser = User.objects.create_user("testuser", "testuser", "testuser")
self.testuser.is_staff = True
ctype = ContentType.objects.get_for_model(models.Member)
self.testuser.save()
def tearDown(self):
# delete all model instances
self.testuser.delete()
models.Attachment.objects.all().delete()
models.Post.objects.all().delete()
models.Forum.objects.all().delete()
def test_depends_related(self):
"""
Test the DependsOnRelated stuff.
"""
# Make a forum, check it's got no posts
f1 = models.Forum.objects.create(title="forumone")
self.assertEqual(f1.post_count, 0)
# Check its database copy too
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 0)
# Add a post
p1 = models.Post.objects.create(forum=f1)
# Has the post count updated?
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
denorm.flush()
# Check its title, in p1 and the DB
self.assertEqual(p1.forum_title, "forumone")
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "forumone")
# Update the forum title
f1.title = "forumtwo"
f1.save()
denorm.flush()
# Has the post's title changed?
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "forumtwo")
# Add and remove some posts and check the post count
models.Post.objects.create(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
models.Post.objects.create(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 3)
p1.delete()
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
# Delete everything, check once more.
models.Post.objects.all().delete()
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 0)
# Make an orphaned post, see what its title is.
# Doesn't work yet - no support for null FKs
#p4 = Post.objects.create(forum=None)
#self.assertEqual(p4.forum_title, None)
def test_dependency_chains(self):
# create a forum, a member and a post
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
models.Post.objects.create(forum=f1, author=m1)
denorm.flush()
# check the forums author list contains the member
self.assertEqual(models.Forum.objects.get(id=f1.id).author_names, "memberone")
# change the member's name
m1.name = "membertwo"
m1.save()
denorm.flush()
# check again
self.assertEqual(models.Forum.objects.get(id=f1.id).author_names, "membertwo")
def test_trees(self):
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumtwo", parent_forum=f1)
f3 = models.Forum.objects.create(title="forumthree", parent_forum=f2)
denorm.flush()
self.assertEqual(f1.path, '/forumone/')
self.assertEqual(f2.path, '/forumone/forumtwo/')
self.assertEqual(f3.path, '/forumone/forumtwo/forumthree/')
f1.title = 'someothertitle'
f1.save()
denorm.flush()
f1 = models.Forum.objects.get(id=f1.id)
f2 = models.Forum.objects.get(id=f2.id)
f3 = models.Forum.objects.get(id=f3.id)
self.assertEqual(f1.path, '/someothertitle/')
self.assertEqual(f2.path, '/someothertitle/forumtwo/')
self.assertEqual(f3.path, '/someothertitle/forumtwo/forumthree/')
def test_reverse_fk_null(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
models.Post.objects.create(forum=f1, author=m1)
models.Attachment.objects.create()
denorm.flush()
def test_bulk_update(self):
"""
Test the DependsOnRelated stuff.
"""
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumtwo")
p1 = models.Post.objects.create(forum=f1)
p2 = models.Post.objects.create(forum=f2)
denorm.flush()
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "forumone")
self.assertEqual(models.Post.objects.get(id=p2.id).forum_title, "forumtwo")
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 1)
models.Post.objects.update(forum=f1)
denorm.flush()
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "forumone")
self.assertEqual(models.Post.objects.get(id=p2.id).forum_title, "forumone")
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 0)
models.Forum.objects.update(title="oneforall")
denorm.flush()
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "oneforall")
self.assertEqual(models.Post.objects.get(id=p2.id).forum_title, "oneforall")
def test_no_dependency(self):
m1 = models.Member.objects.create(first_name="first", name="last")
denorm.flush()
self.assertEqual(models.Member.objects.get(id=m1.id).full_name, "first last")
models.Member.objects.filter(id=m1.id).update(first_name="second")
denorm.flush()
self.assertEqual(models.Member.objects.get(id=m1.id).full_name, "second last")
def test_self_backward_relation(self):
f1 = models.Forum.objects.create(title="forumone")
p1 = models.Post.objects.create(forum=f1, )
p2 = models.Post.objects.create(forum=f1, response_to=p1)
p3 = models.Post.objects.create(forum=f1, response_to=p1)
p4 = models.Post.objects.create(forum=f1, response_to=p2)
denorm.flush()
self.assertEqual(models.Post.objects.get(id=p1.id).response_count, 3)
self.assertEqual(models.Post.objects.get(id=p2.id).response_count, 1)
self.assertEqual(models.Post.objects.get(id=p3.id).response_count, 0)
self.assertEqual(models.Post.objects.get(id=p4.id).response_count, 0)
def test_m2m_relation(self):
f1 = models.Forum.objects.create(title="forumone")
p1 = models.Post.objects.create(forum=f1, title="post1")
m1 = models.Member.objects.create(first_name="first1", name="last1")
denorm.flush()
m1.bookmarks.add(p1)
denorm.flush()
self.assertTrue('post1' in models.Member.objects.get(id=m1.id).bookmark_titles)
p1.title = "othertitle"
p1.save()
denorm.flush()
self.assertTrue('post1' not in models.Member.objects.get(id=m1.id).bookmark_titles)
self.assertTrue('othertitle' in models.Member.objects.get(id=m1.id).bookmark_titles)
p2 = models.Post.objects.create(forum=f1, title="thirdtitle")
m1.bookmarks.add(p2)
denorm.flush()
self.assertTrue('post1' not in models.Member.objects.get(id=m1.id).bookmark_titles)
self.assertTrue('othertitle' in models.Member.objects.get(id=m1.id).bookmark_titles)
self.assertTrue('thirdtitle' in models.Member.objects.get(id=m1.id).bookmark_titles)
m1.bookmarks.remove(p1)
denorm.flush()
self.assertTrue('othertitle' not in models.Member.objects.get(id=m1.id).bookmark_titles)
self.assertTrue('thirdtitle' in models.Member.objects.get(id=m1.id).bookmark_titles)
def test_middleware(self):
# FIXME, this test currently does not work with a transactional
# database, so it's skipped for now.
return
# FIXME, set and de-set middleware values
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(first_name="first1", name="last1")
p1 = models.Post.objects.create(forum=f1, author=m1)
self.assertEqual(models.Post.objects.get(id=p1.id).author_name, "last1")
self.client.login(username="testuser", password="testuser")
self.client.post("/admin/denorm_testapp/member/%s/" % (m1.pk), {
'name': 'last2',
'first_name': 'first2',
})
self.assertEqual(models.Post.objects.get(id=p1.id).author_name, "last2")
def test_countfield(self):
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumone")
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 0)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 0)
models.Post.objects.create(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 0)
p2 = models.Post.objects.create(forum=f2)
p3 = models.Post.objects.create(forum=f2)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 2)
p2.forum = f1
p2.save()
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 1)
models.Post.objects.filter(pk=p3.pk).update(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 3)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 0)
def test_countfield_does_not_write_stale_value(self):
f1 = models.Forum.objects.create(title="forumone")
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 0)
models.Post.objects.create(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
f1 = models.Forum.objects.get(title="forumone")
models.Post.objects.create(forum_id=f1.id)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
f1.title = "new"
self.assertEqual(f1.post_count, 1)
f1.save()
self.assertEqual(f1.post_count, 2)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
self.assertEqual(models.Forum.objects.get(id=f1.id).title, "new")
def test_foreignkey(self):
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumtwo")
m1 = models.Member.objects.create(first_name="first1", name="last1")
p1 = models.Post.objects.create(forum=f1, author=m1)
a1 = models.Attachment.objects.create(post=p1)
self.assertEqual(models.Attachment.objects.get(id=a1.id).forum, f1)
a2 = models.Attachment.objects.create()
self.assertEqual(models.Attachment.objects.get(id=a2.id).forum, None)
# Change forum
p1.forum = f2
p1.save()
denorm.flush()
self.assertEqual(models.Attachment.objects.get(id=a1.id).forum, f2)
# test denorm function returning object, not PK
models.Attachment.forum_as_object = True
a3 = models.Attachment.objects.create(post=p1)
models.Attachment.forum_as_object = False
def test_m2m(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
models.Post.objects.create(forum=f1, author=m1)
denorm.flush()
# check the forums author list contains the member
self.assertTrue(m1 in models.Forum.objects.get(id=f1.id).authors.all())
m2 = models.Member.objects.create(name="membertwo")
p2 = models.Post.objects.create(forum=f1, author=m2)
denorm.flush()
self.assertTrue(m1 in models.Forum.objects.get(id=f1.id).authors.all())
self.assertTrue(m2 in models.Forum.objects.get(id=f1.id).authors.all())
p2.delete()
denorm.flush()
self.assertTrue(m2 not in models.Forum.objects.get(id=f1.id).authors.all())
def test_denorm_rebuild(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
p1 = models.Post.objects.create(forum=f1, author=m1)
denorm.denorms.rebuildall()
f1 = models.Forum.objects.get(id=f1.id)
m1 = models.Member.objects.get(id=m1.id)
p1 = models.Post.objects.get(id=p1.id)
self.assertEqual(f1.post_count, 1)
self.assertEqual(f1.authors.all()[0], m1)
def test_denorm_update(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
p1 = models.Post.objects.create(forum=f1, author=m1)
a1 = models.Attachment.objects.create(post=p1)
denorm.denorms.rebuildall()
f2 = models.Forum.objects.create(title="forumtwo")
p1.forum = f2
p1.save()
# BUG https://github.com/initcrash/django-denorm/issues/24
# We have to update the Attachment.forum field first to trigger this bug. Simply doing rebuildall() will
# trigger an a1.save() at an some earlier point during the update. By the time we get to updating the value of
# forum field the value is already correct and no update is done bypassing the broken code.
denorm.denorms.rebuildall(model_name='Attachment', field_name='forum')
def test_denorm_subclass(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
p1 = models.Post.objects.create(forum=f1, author=m1)
self.assertEqual(f1.tags_string, '')
self.assertEqual(p1.tags_string, '')
models.Tag.objects.create(name='tagone', content_object=f1)
models.Tag.objects.create(name='tagtwo', content_object=f1)
denorm.denorms.flush()
f1 = models.Forum.objects.get(id=f1.id)
m1 = models.Member.objects.get(id=m1.id)
p1 = models.Post.objects.get(id=p1.id)
self.assertEqual(f1.tags_string, 'tagone, tagtwo')
self.assertEqual(p1.tags_string, '')
models.Tag.objects.create(name='tagthree', content_object=p1)
t4 = models.Tag.objects.create(name='tagfour', content_object=p1)
denorm.denorms.flush()
f1 = models.Forum.objects.get(id=f1.id)
m1 = models.Member.objects.get(id=m1.id)
p1 = models.Post.objects.get(id=p1.id)
self.assertEqual(f1.tags_string, 'tagone, tagtwo')
self.assertEqual(p1.tags_string, 'tagfour, tagthree')
t4.content_object = f1
t4.save()
denorm.denorms.flush()
f1 = models.Forum.objects.get(id=f1.id)
m1 = models.Member.objects.get(id=m1.id)
p1 = models.Post.objects.get(id=p1.id)
self.assertEqual(f1.tags_string, 'tagfour, tagone, tagtwo')
self.assertEqual(p1.tags_string, 'tagthree')
def test_cache_key_field_backward(self):
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumtwo")
ck1 = f1.cachekey
ck2 = f2.cachekey
p1 = models.Post.objects.create(forum=f1)
f1 = models.Forum.objects.get(id=f1.id)
f2 = models.Forum.objects.get(id=f2.id)
self.assertNotEqual(ck1, f1.cachekey)
self.assertEqual(ck2, f2.cachekey)
ck1 = f1.cachekey
ck2 = f2.cachekey
p1 = models.Post.objects.get(id=p1.id)
p1.forum = f2
p1.save()
f1 = models.Forum.objects.get(id=f1.id)
f2 = models.Forum.objects.get(id=f2.id)
self.assertNotEqual(ck1, f1.cachekey)
self.assertNotEqual(ck2, f2.cachekey)
def test_cache_key_field_forward(self):
f1 = models.Forum.objects.create(title="forumone")
p1 = models.Post.objects.create(title='initial_title', forum=f1)
a1 = models.Attachment.objects.create(post=p1)
a2 = models.Attachment.objects.create(post=p1)
a1 = models.Attachment.objects.get(id=a1.id)
a2 = models.Attachment.objects.get(id=a2.id)
self.assertNotEqual(a1.cachekey, a2.cachekey)
ck1 = a1.cachekey
ck2 = a2.cachekey
p1.title = 'new_title'
p1.save()
a1 = models.Attachment.objects.get(id=a1.id)
a2 = models.Attachment.objects.get(id=a2.id)
self.assertNotEqual(ck1, a1.cachekey)
self.assertNotEqual(ck2, a2.cachekey)
a1 = models.Attachment.objects.get(id=a1.id)
a2 = models.Attachment.objects.get(id=a2.id)
self.assertNotEqual(a1.cachekey, a2.cachekey)
def test_cache_key_field_m2m(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
p1 = models.Post.objects.create(title='initial_title', forum=f1)
m1 = models.Member.objects.get(id=m1.id)
ck1 = m1.cachekey
m1.bookmarks.add(p1)
m1 = models.Member.objects.get(id=m1.id)
self.assertNotEqual(ck1, m1.cachekey)
ck1 = m1.cachekey
p1 = models.Post.objects.get(id=p1.id)
p1.title = 'new_title'
p1.save()
m1 = models.Member.objects.get(id=m1.id)
self.assertNotEqual(ck1, m1.cachekey)
if not hasattr(django.db.backend, 'sqlite3'):
class TestFilterCount(TestCase):
"""
Tests for the filtered count feature.
"""
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
def test_filter_count(self):
master = models.FilterCountModel.objects.create()
self.assertEqual(master.active_item_count, 0)
master.items.create(active=True, text='text')
master.items.create(active=True, text='')
master = models.FilterCountModel.objects.get(id=master.id)
self.assertEqual(master.active_item_count, 1, 'created active item')
master.items.create(active=False)
master = models.FilterCountModel.objects.get(id=master.id)
self.assertEqual(master.active_item_count, 1, 'created inactive item')
master.items.create(active=True, text='true')
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 2)
master.items.filter(active=False).delete()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 2)
master.items.filter(active=True, text='true')[0].delete()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 1)
item = master.items.filter(active=True, text='text')[0]
item.active = False
item.save()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 0)
item = master.items.filter(active=False, text='text')[0]
item.active = True
item.text = ''
item.save()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 0)
item = master.items.filter(active=True, text='')[0]
item.text = '123'
item.save()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 1)
class TestFilterCountM2M(TestCase):
"""
Tests for the filtered count feature.
"""
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
def test_filter_count(self):
master = models.FilterCountModel.objects.create()
self.assertEqual(master.active_item_count, 0)
master.items.create(active=True, text='true')
master = models.FilterCountModel.objects.get(id=master.id)
self.assertEqual(master.active_item_count, 1, 'created active item')
master.items.create(active=False, text='true')
master = models.FilterCountModel.objects.get(id=master.id)
self.assertEqual(master.active_item_count, 1, 'created inactive item')
master.items.create(active=True, text='true')
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 2)
master.items.filter(active=False).delete()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 2)
master.items.filter(active=True)[0].delete()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 1)
item = master.items.filter(active=True)[0]
item.active = False
item.save()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 0)
item = master.items.filter(active=False)[0]
item.active = True
item.save()
master = models.FilterCountModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_count, 1)
class TestFilterSum(TestCase):
"""
Tests for the filtered count feature.
"""
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
def test_filter_count(self):
master = models.FilterSumModel.objects.create()
self.assertEqual(master.active_item_sum, 0)
master.counts.create(age=18, active_item_count=8)
master = models.FilterSumModel.objects.get(id=master.id)
self.assertEqual(master.active_item_sum, 8)
master.counts.create(age=16, active_item_count=10)
master = models.FilterSumModel.objects.get(id=master.id)
self.assertEqual(master.active_item_sum, 8, 'created inactive item')
master.counts.create(age=19, active_item_count=9)
master = models.FilterSumModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_sum, 17)
master.counts.filter(age__lt=18).delete()
master = models.FilterSumModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_sum, 17)
master.counts.filter(age=19)[0].delete()
master = models.FilterSumModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_sum, 8)
item = master.counts.filter(age=18)[0]
item.age = 15
item.save()
master = models.FilterSumModel.objects.get(pk=master.pk)
self.assertEqual(master.active_item_sum, 0)
item = master.counts.filter(age=15)[0]
item.age = 18
item.save() | [
" master = models.FilterSumModel.objects.get(pk=master.pk)"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for deterministic functionality of SoftmaxCrossEntropyWithLogits op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.kernel_tests import xent_op_test_base
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
# The following import is required to register the gradient function.
from tensorflow.python.ops.nn_grad import _SoftmaxCrossEntropyWithLogitsGrad # pylint: disable=unused-import
from tensorflow.python.platform import test
class XentOpDeterminismExceptionsTest(test.TestCase):
"""Test d9m-unimplemented exceptions from SoftmaxXentWithLogitsOp.
Test that tf.errors.UnimplementedError is thrown, as appropriate, by the GPU
code-paths through SoftmaxXentWithLogitsOp when deterministic ops are
enabled.
This test assumes that xent_op_test.py runs equivalent test cases when
deterministic ops are not enabled and will therefore detect erroneous
exception throwing in those cases.
"""
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testExceptionThrowing(self):
with self.session(), test_util.force_gpu():
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
features = constant_op.constant([[0.3, 0.5], [0.5, 0.6]], dtype=dtype)
labels = constant_op.constant([[0.2, 0.4], [0.1, 0.2]], dtype=dtype)
with self.assertRaisesRegex(
errors_impl.UnimplementedError,
"The GPU implementation of SoftmaxCrossEntropyWithLogits that " +
"would have been executed is not deterministic. Note that the " +
"Python API uses an alternative, deterministic, GPU-accelerated " +
"path when determinism is enabled."):
result = gen_nn_ops.softmax_cross_entropy_with_logits(
features=features, labels=labels)
self.evaluate(result)
class XentOpDeterministicTest(xent_op_test_base.XentOpTestBase):
"""Test that SoftmaxCrossEntropyWithLogits operates reproducibly.
Inheriting from xent_op_test_base.XentTestBase ensures that regular op
functionality is correct when the deterministic code-path is selected.
Note that because nn_ops.softmax_cross_entropy_with_logits calls
nn_ops.cross_entropy_with_logits_v2, the focus of testing is on the
former in order to test both.
"""
def _randomFloats(self, shape, dtype, normalized_rows=False):
a = (2 * np.random.random_sample(shape) - 1).astype(dtype)
if normalized_rows:
def normalize(row):
return row / row.sum()
a = np.apply_along_axis(normalize, 1, a)
return constant_op.constant(a)
def _generateInputs(self, dtype, seed=123, forward_not_backward=False):
batch_size = 1024
if forward_not_backward and dtype == np.float16:
# Generate more noise to expose the internal float32 implementation.
# This is associated with significantly slower test cases (esp. on CPU).
classes_count = 20000
else:
classes_count = 3000
shape = (batch_size, classes_count)
np.random.seed(seed)
labels = self._randomFloats(shape, dtype, normalized_rows=True)
logits = self._randomFloats(shape, dtype)
return labels, logits
@test_util.run_in_graph_and_eager_modes
def testForward(self):
with self.cached_session():
for dtype in [np.float16, np.float32, np.float64]:
for trial in range(5):
seed = 123 + trial
labels, logits = self._generateInputs(
dtype, seed=seed, forward_not_backward=True)
result_a = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
result_b = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
self.assertAllEqual(result_a, result_b)
@test_util.run_in_graph_and_eager_modes
def testBackward(self):
with self.cached_session():
for dtype in [np.float16, np.float32, np.float64]:
labels, logits = self._generateInputs(dtype, seed=456)
output_shape = labels.shape[0]
def gradients(seed):
np.random.seed(seed)
upstream_gradients = self._randomFloats(output_shape, dtype)
with backprop.GradientTape(persistent=True) as tape:
tape.watch(labels)
tape.watch(logits)
op_output = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
gradient_injector_output = op_output * upstream_gradients
return tape.gradient(gradient_injector_output, [labels, logits])
for trial in range(5):
seed = 456 + trial
labels_grad_a, logits_grad_a = gradients(seed=seed)
labels_grad_b, logits_grad_b = gradients(seed=seed)
self.assertAllEqual(labels_grad_a, labels_grad_b)
self.assertAllEqual(logits_grad_a, logits_grad_b)
# Modifications to the parent class (xent_op_test_base.XentOpTestBase) follow
def testSingleClass(self):
"""Modify testing of gradient for single-class case.
The deterministic implementation does not produce the gradients expected by
the original test (for the nondeterministic functionality) when the labels
vector is not a valid probability distribution.
labels: [[-1.], [0.], [1.], [1.]]
logits: [[1.], [-1.], [0.], [1.]]
nondeterministic deterministic
dloss/dlogits: [[2.0], [1.0], [0.0], [0.0]] [[0.0], [0.0], [0.0], [0.0]]
Note that only the second two label vectors are valid probability | [
" distributions (as required by the API) and that the gradient matches for"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.lib.common import rest_client
class RolesClient(rest_client.RestClient):
api_version = "v3"
def create_role(self, **kwargs):
"""Create a Role.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/identity/v3/index.html#create-role
"""
post_body = json.dumps({'role': kwargs})
resp, body = self.post('roles', post_body)
self.expected_success(201, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def show_role(self, role_id):
"""GET a Role."""
resp, body = self.get('roles/%s' % role_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def list_roles(self, **params):
"""Get the list of Roles.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/identity/v3/index.html#list-roles
"""
url = 'roles'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def update_role(self, role_id, **kwargs):
"""Update a Role.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/identity/v3/index.html#update-role
"""
post_body = json.dumps({'role': kwargs})
resp, body = self.patch('roles/%s' % role_id, post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def delete_role(self, role_id):
"""Delete a role."""
resp, body = self.delete('roles/%s' % role_id)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def create_user_role_on_project(self, project_id, user_id, role_id):
"""Add roles to a user on a project."""
resp, body = self.put('projects/%s/users/%s/roles/%s' %
(project_id, user_id, role_id), None)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def create_user_role_on_domain(self, domain_id, user_id, role_id):
"""Add roles to a user on a domain."""
resp, body = self.put('domains/%s/users/%s/roles/%s' %
(domain_id, user_id, role_id), None)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def list_user_roles_on_project(self, project_id, user_id):
"""list roles of a user on a project."""
resp, body = self.get('projects/%s/users/%s/roles' %
(project_id, user_id))
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def list_user_roles_on_domain(self, domain_id, user_id):
"""list roles of a user on a domain."""
resp, body = self.get('domains/%s/users/%s/roles' %
(domain_id, user_id))
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def delete_role_from_user_on_project(self, project_id, user_id, role_id):
"""Delete role of a user on a project."""
resp, body = self.delete('projects/%s/users/%s/roles/%s' %
(project_id, user_id, role_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def delete_role_from_user_on_domain(self, domain_id, user_id, role_id):
"""Delete role of a user on a domain."""
resp, body = self.delete('domains/%s/users/%s/roles/%s' %
(domain_id, user_id, role_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def check_user_role_existence_on_project(self, project_id,
user_id, role_id):
"""Check role of a user on a project."""
resp, body = self.head('projects/%s/users/%s/roles/%s' %
(project_id, user_id, role_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
def check_user_role_existence_on_domain(self, domain_id,
user_id, role_id):
"""Check role of a user on a domain."""
resp, body = self.head('domains/%s/users/%s/roles/%s' %
(domain_id, user_id, role_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
def create_group_role_on_project(self, project_id, group_id, role_id):
"""Add roles to a group on a project."""
resp, body = self.put('projects/%s/groups/%s/roles/%s' %
(project_id, group_id, role_id), None)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def create_group_role_on_domain(self, domain_id, group_id, role_id):
"""Add roles to a group on a domain."""
resp, body = self.put('domains/%s/groups/%s/roles/%s' %
(domain_id, group_id, role_id), None)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def list_group_roles_on_project(self, project_id, group_id):
"""list roles of a group on a project."""
resp, body = self.get('projects/%s/groups/%s/roles' %
(project_id, group_id))
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def list_group_roles_on_domain(self, domain_id, group_id):
"""list roles of a group on a domain."""
resp, body = self.get('domains/%s/groups/%s/roles' %
(domain_id, group_id))
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def delete_role_from_group_on_project(self, project_id, group_id, role_id):
"""Delete role of a group on a project."""
resp, body = self.delete('projects/%s/groups/%s/roles/%s' %
(project_id, group_id, role_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def delete_role_from_group_on_domain(self, domain_id, group_id, role_id):
"""Delete role of a group on a domain."""
resp, body = self.delete('domains/%s/groups/%s/roles/%s' %
(domain_id, group_id, role_id))
self.expected_success(204, resp.status) | [
" return rest_client.ResponseBody(resp, body)"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#!/usr/bin/env python3
'''
Performs normalisation to FFV1/Matroska.
This performs a basic normalisation and does not enforce any folder structure.
This supercedes makeffv1 within our workflows. This is mostly because makeffv1 imposes a specific, outdated
folder structure, and it's best to let SIPCREATOR handle the folder structure and let normalise.py handle
the actual normalisation.
'''
import sys
import os
import subprocess
import argparse
import shutil
import time
import ififuncs
import sipcreator
def parse_args(args_):
'''
Parse command line arguments.
'''
parser = argparse.ArgumentParser(
description='Performs normalisation to FFV1/Matroska.'
' Written by Kieran O\'Leary.'
)
parser.add_argument(
'-i',
help='full path of input file or directory', required=True
)
parser.add_argument(
'-o', '-output',
help='full path of output directory', required=True
)
parser.add_argument(
'-sip',
help='Run sipcreator.py on the resulting file.', action='store_true'
)
parser.add_argument(
'-user',
help='Declare who you are. If this is not set, you will be prompted.')
parser.add_argument(
'-oe',
help='Enter the Object Entry number for the representation.SIP will be placed in a folder with this name.'
)
parser.add_argument(
'-supplement', nargs='+',
help='Enter the full path of files or folders that are to be added to the supplemental subfolder within the metadata folder. Use this for information that supplements your preservation objects but is not to be included in the objects folder.'
)
parsed_args = parser.parse_args(args_)
return parsed_args
def normalise_process(filename, output_folder):
'''
Begins the actual normalisation process using FFmpeg
'''
output_uuid = ififuncs.create_uuid()
print(' - The following UUID has been generated: %s' % output_uuid)
output = "%s/%s.mkv" % (
output_folder, output_uuid
)
print(' - The normalised file will have this filename: %s' % output)
fmd5 = "%s/%s_source.framemd5" % (
output_folder, os.path.basename(filename)
)
print(' - Framemd5s for each frame of your input file will be stored in: %s' % fmd5)
ffv1_logfile = os.path.join(output_folder, '%s_normalise.log' % output_uuid)
print(' - The FFmpeg logfile for the transcode will be stored in: %s' % ffv1_logfile)
print(' - FFmpeg will begin normalisation now.')
ffv1_env_dict = ififuncs.set_environment(ffv1_logfile)
ffv1_command = [
'ffmpeg',
'-i', filename,
'-c:v', 'ffv1', # Use FFv1 codec
'-g', '1', # Use intra-frame only aka ALL-I aka GOP=1
'-level', '3', # Use Version 3 of FFv1
'-c:a', 'copy', # Copy and paste audio bitsream with no transcoding
'-map', '0',
'-dn',
'-report',
'-slicecrc', '1',
'-slices', '16',
]
if ififuncs.check_for_fcp(filename) is True:
print(' - A 720/576 file with no Pixel Aspect Ratio and scan type metadata has been detected.')
ffv1_command += [
'-vf',
'setfield=tff, setdar=4/3'
]
print(' - -vf setfield=tff, setdar=4/3 will be added to the FFmpeg command.')
ffprobe_dict = ififuncs.get_ffprobe_dict(filename)
# let's stipulate the colour metadata if not present for SD PAL material.
if not ififuncs.get_colour_metadata(ffprobe_dict):
ffv1_command += ['-color_primaries', 'bt470bg', '-color_trc', 'bt709', '-colorspace', 'bt470bg' ]
elif ififuncs.check_for_blackmagic(filename) is True:
print(' - A 720/576 with TFF scan type, clap atom featuring 702 width and a PAR of 1.093 has been detected.')
ffv1_command += ['-vf', 'setdar=4/3', '-color_primaries', 'bt470bg', '-color_trc', 'bt709', '-colorspace', 'bt470bg']
ffv1_command += [
output,
'-f', 'framemd5', '-an', # Create decoded md5 checksums for every frame of the input. -an ignores audio
fmd5
]
print(ffv1_command)
subprocess.call(ffv1_command, env=ffv1_env_dict)
return output, output_uuid, fmd5, ffv1_logfile
def verify_losslessness(output_folder, output, output_uuid, fmd5):
'''
Verify the losslessness of the process using framemd5.
An additional metadata check should also occur.
'''
verdict = 'undeclared'
fmd5_logfile = os.path.join(output_folder, '%s_framemd5.log' % output_uuid)
fmd5ffv1 = "%s/%s.framemd5" % (output_folder, output_uuid)
print(' - Framemd5s for each frame of your output file will be stored in: %s' % fmd5ffv1)
fmd5_env_dict = ififuncs.set_environment(fmd5_logfile)
print(' - FFmpeg will attempt to verify the losslessness of the normalisation by using Framemd5s.')
fmd5_command = [
'ffmpeg', # Create decoded md5 checksums for every frame
'-i', output,
'-report',
'-f', 'framemd5', '-an',
fmd5ffv1
]
print(fmd5_command)
subprocess.call(fmd5_command, env=fmd5_env_dict)
checksum_mismatches = ififuncs.diff_framemd5s(fmd5, fmd5ffv1)
if len(checksum_mismatches) == 1:
if checksum_mismatches[0] == 'sar':
print('Image is lossless, but the Pixel Aspect Ratio is different than the source - this may have been intended.')
verdict = 'Image is lossless, but the Pixel Aspect Ratio is different than the source - this may have been intended.'
else:
print('not lossless')
verdict = 'not lossless'
elif len(checksum_mismatches) > 1:
print('not lossless')
verdict = 'not lossless'
elif len(checksum_mismatches) == 0:
print('YOUR FILES ARE LOSSLESS YOU SHOULD BE SO HAPPY!!!')
verdict = 'lossless'
return fmd5_logfile, fmd5ffv1, verdict
def main(args_):
ififuncs.check_existence(['ffmpeg', 'mediainfo'])
print('\n - Normalise.py started')
args = parse_args(args_)
print(args)
source = args.i
output_folder = args.o
file_list = sorted(ififuncs.get_video_files(source))
if args.sip:
if args.user:
user = args.user
else:
user = ififuncs.get_user() | [
" if args.oe:"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2014 - Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create initial Solum DB schema
Revision ID: 498adc6185ae
Revises: None
Create Date: 2014-02-03 15:51:20.819539
"""
from alembic import op
import sqlalchemy as sa
from solum.objects.sqlalchemy import models
from oslo_utils import timeutils
# revision identifiers, used by Alembic.
revision = '498adc6185ae'
down_revision = None
def upgrade():
op.create_table(
'sensor',
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('uuid', sa.String(length=36), nullable=False),
sa.Column('created_at', sa.DateTime, default=timeutils.utcnow),
sa.Column('updated_at', sa.DateTime, onupdate=timeutils.utcnow),
sa.Column('project_id', sa.String(length=36)),
sa.Column('user_id', sa.String(length=36)),
sa.Column('name', sa.String(255)),
sa.Column('sensor_type', sa.String(255)),
sa.Column('value', sa.String(255)),
sa.Column('timestamp', sa.DateTime),
sa.Column('description', sa.String(255)),
sa.Column('documentation', sa.String(255)),
sa.Column('target_resource', sa.String(255)),
)
op.create_table(
'operation',
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('uuid', sa.String(length=36), nullable=False),
sa.Column('created_at', sa.DateTime, default=timeutils.utcnow),
sa.Column('updated_at', sa.DateTime, onupdate=timeutils.utcnow),
sa.Column('name', sa.String(100)),
sa.Column('description', sa.String(255)),
sa.Column('project_id', sa.String(36)),
sa.Column('user_id', sa.String(36)),
sa.Column('tags', sa.Text),
sa.Column('documentation', sa.Text),
sa.Column('target_resource', sa.Text)
)
op.create_table(
'image',
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('uuid', sa.String(36), nullable=False),
sa.Column('created_at', sa.DateTime, default=timeutils.utcnow),
sa.Column('updated_at', sa.DateTime, onupdate=timeutils.utcnow),
sa.Column('name', sa.String(100)),
sa.Column('description', sa.String(255)),
sa.Column('source_uri', sa.String(1024)),
sa.Column('project_id', sa.String(36)),
sa.Column('user_id', sa.String(36)),
sa.Column('tags', sa.Text),
sa.Column('state', sa.String(12)),
sa.Column('base_image_id', sa.String(length=36)),
sa.Column('created_image_id', sa.String(length=36)),
sa.Column('image_format', sa.String(length=12)),
sa.Column('source_format', sa.String(length=36)),
)
op.create_table(
'extension',
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('uuid', sa.String(length=36), nullable=False),
sa.Column('created_at', sa.DateTime, default=timeutils.utcnow),
sa.Column('updated_at', sa.DateTime, onupdate=timeutils.utcnow),
sa.Column('project_id', sa.String(36)),
sa.Column('user_id', sa.String(36)),
sa.Column('description', sa.String(255)),
sa.Column('name', sa.String(100)),
sa.Column('version', sa.String(16)),
sa.Column('documentation', sa.String(255)),
# NOTE(stannie): tags will be added in a dedicated table
)
op.create_table(
'plan',
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('uuid', sa.String(length=36), nullable=False),
sa.Column('created_at', sa.DateTime, default=timeutils.utcnow),
sa.Column('updated_at', sa.DateTime, onupdate=timeutils.utcnow),
sa.Column('project_id', sa.String(length=36)),
sa.Column('user_id', sa.String(length=36)),
sa.Column('raw_content', models.YAMLEncodedDict(2048)),
sa.Column('description', sa.String(length=255)),
sa.Column('name', sa.String(255)),
sa.Column('deploy_keys_uri', sa.String(length=1024)),
)
op.create_table(
'assembly',
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('uuid', sa.String(length=36), nullable=False),
sa.Column('created_at', sa.DateTime, default=timeutils.utcnow),
sa.Column('updated_at', sa.DateTime, onupdate=timeutils.utcnow),
sa.Column('name', sa.String(100)),
sa.Column('description', sa.String(255)),
sa.Column('project_id', sa.String(36)),
sa.Column('user_id', sa.String(36)),
sa.Column('tags', sa.Text),
sa.Column('plan_id', sa.Integer, sa.ForeignKey('plan.id'),
nullable=False), | [
" sa.Column('status', sa.String(length=36)),"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
"""
This code includes simple dense layer.
Dense layer is also well known as fully-connected alyer.
"""
import numpy as np
import theano
import theano.tensor as T
from collections import OrderedDict
from lemontree.layers.layer import BaseLayer
class DenseLayer(BaseLayer):
"""
This class implements dense layer connection.
"""
def __init__(self, input_shape, output_shape, use_bias=True, target_cpu=False):
"""
This function initializes the class.
Input is 2D tensor, output is 2D tensor.
For efficient following batch normalization, use_bias = False.
Parameters
----------
input_shape: tuple
a tuple of single value, i.e., (input dim,)
output_shape: tupe
a tuple of single value, i.e., (output dim,)
use_bias: bool, default: True
a bool value whether we use bias or not.
target_cpu: bool, default: False
a bool value whether shared variable will be on cpu or gpu.
"""
super(DenseLayer, self).__init__()
# check asserts
assert isinstance(input_shape, tuple) and len(input_shape) == 1, '"input_shape" should be a tuple with single value.'
assert isinstance(output_shape, tuple) and len(output_shape) == 1, '"output_shape" should be a tuple with single value.'
assert isinstance(use_bias, bool), '"use_bias" should be a bool value.'
assert isinstance(target_cpu, bool), '"target_cpu" should be a bool value.'
# set members
self.input_shape = input_shape
self.output_shape = output_shape
self.use_bias = use_bias
self.target_cpu = target_cpu
def set_shared(self):
"""
This function overrides the parents' one.
Set shared variables.
Shared Variables
----------------
W: 2D matrix
shape is (input dim, output dim).
b: 1D vector
shape is (output dim,).
"""
W = np.zeros((self.input_shape[0], self.output_shape[0])).astype(theano.config.floatX) # weight matrix
if self.target_cpu:
self.W = theano.shared(W, self.name + '_weight', target='cpu')
else:
self.W = theano.shared(W, self.name + '_weight')
self.W.tags = ['weight', self.name]
b = np.zeros(self.output_shape,).astype(theano.config.floatX) # bias vector, initialize with 0.
if self.target_cpu:
self.b = theano.shared(b, self.name + '_bias', target='cpu')
else:
self.b = theano.shared(b, self.name + '_bias')
self.b.tags = ['bias', self.name]
def set_shared_by(self, params):
if self.use_bias:
self.W = params[0]
self.b = params[1]
else:
self.W = params[0]
def get_output(self, input_):
"""
This function overrides the parents' one.
Creates symbolic function to compute output from an input.
Math Expression
-------------------
Y = dot(X, W) + b
Y = dot(X, W)
bias is automatically broadcasted. (supported theano feature)
Parameters
----------
input_: TensorVariable
Returns
-------
TensorVariable
"""
if self.use_bias:
return T.dot(input_, self.W) + self.b
else:
return T.dot(input_, self.W)
def get_params(self):
"""
This function overrides the parents' one.
Returns interal layer parameters.
Returns
-------
list
a list of shared variables used.
"""
if self.use_bias:
return [self.W, self.b]
else:
return [self.W]
class TimeDistributedDenseLayer(BaseLayer):
"""
This class implements time distributed dense layer connection.
"""
def __init__(self, input_shape, output_shape, use_bias=True, target_cpu=False):
"""
This function initializes the class.
Input is 3D tensor, output is 3D tensor.
For efficient following batch normalization, use_bias = False.
Parameters
----------
input_shape: tuple
a tuple of single value, i.e., (input dim,)
output_shape: tupe
a tuple of single value, i.e., (output dim,)
use_bias: bool, default: True
a bool value whether we use bias or not.
target_cpu: bool, default: False
a bool value whether shared variable will be on cpu or gpu.
"""
super(TimeDistributedDenseLayer, self).__init__()
# check asserts
assert isinstance(input_shape, tuple) and len(input_shape) == 1, '"input_shape" should be a tuple with single value.'
assert isinstance(output_shape, tuple) and len(output_shape) == 1, '"output_shape" should be a tuple with single value.'
assert isinstance(use_bias, bool), '"use_bias" should be a bool value.'
assert isinstance(target_cpu, bool), '"target_cpu" should be a bool value.'
# set members
self.input_shape = input_shape
self.output_shape = output_shape
self.use_bias = use_bias
self.target_cpu = target_cpu
def set_shared(self):
"""
This function overrides the parents' one.
Set shared variables.
Shared Variables
----------------
W: 2D matrix
shape is (input dim, output dim).
b: 1D vector
shape is (output dim,).
"""
W = np.zeros((self.input_shape[0], self.output_shape[0])).astype(theano.config.floatX) # weight matrix
if self.target_cpu:
self.W = theano.shared(W, self.name + '_weight', target='cpu')
else:
self.W = theano.shared(W, self.name + '_weight')
self.W.tags = ['weight', self.name]
b = np.zeros(self.output_shape,).astype(theano.config.floatX) # bias vector, initialize with 0.
if self.target_cpu:
self.b = theano.shared(b, self.name + '_bias', target='cpu')
else:
self.b = theano.shared(b, self.name + '_bias')
self.b.tags = ['bias', self.name]
def set_shared_by(self, params):
if self.use_bias:
self.W = params[0]
self.b = params[1]
else:
self.W = params[0]
def get_output(self, input_):
"""
This function overrides the parents' one.
Creates symbolic function to compute output from an input.
Math Expression
-------------------
Y = dot(X, W) + b
Y = dot(X, W)
bias is automatically broadcasted. (supported theano feature)
| [
" Parameters"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
import json
from datetime import datetime
from html2text import html2text
from dojo.models import Finding
class MobSFParser(object):
def get_scan_types(self):
return ["MobSF Scan"]
def get_label_for_scan_types(self, scan_type):
return "MobSF Scan"
def get_description_for_scan_types(self, scan_type):
return "Export a JSON file using the API, api/v1/report_json."
def get_findings(self, filename, test):
tree = filename.read()
try:
data = json.loads(str(tree, 'utf-8'))
except:
data = json.loads(tree)
find_date = datetime.now()
dupes = {}
test_description = ""
if "name" in data:
test_description = "**Info:**\n"
if "packagename" in data:
test_description = "%s **Package Name:** %s\n" % (test_description, data["packagename"])
if "mainactivity" in data:
test_description = "%s **Main Activity:** %s\n" % (test_description, data["mainactivity"])
if "pltfm" in data:
test_description = "%s **Platform:** %s\n" % (test_description, data["pltfm"])
if "sdk" in data:
test_description = "%s **SDK:** %s\n" % (test_description, data["sdk"])
if "min" in data:
test_description = "%s **Min SDK:** %s\n" % (test_description, data["min"])
if "targetsdk" in data:
test_description = "%s **Target SDK:** %s\n" % (test_description, data["targetsdk"])
if "minsdk" in data:
test_description = "%s **Min SDK:** %s\n" % (test_description, data["minsdk"])
if "maxsdk" in data:
test_description = "%s **Max SDK:** %s\n" % (test_description, data["maxsdk"])
test_description = "%s\n**File Information:**\n" % (test_description)
if "name" in data:
test_description = "%s **Name:** %s\n" % (test_description, data["name"])
if "md5" in data:
test_description = "%s **MD5:** %s\n" % (test_description, data["md5"])
if "sha1" in data:
test_description = "%s **SHA-1:** %s\n" % (test_description, data["sha1"])
if "sha256" in data:
test_description = "%s **SHA-256:** %s\n" % (test_description, data["sha256"])
if "size" in data:
test_description = "%s **Size:** %s\n" % (test_description, data["size"])
if "urls" in data:
curl = ""
for url in data["urls"]:
for curl in url["urls"]:
curl = "%s\n" % (curl)
if curl:
test_description = "%s\n**URL's:**\n %s\n" % (test_description, curl)
if "bin_anal" in data:
test_description = "%s \n**Binary Analysis:** %s\n" % (test_description, data["bin_anal"])
test.description = html2text(test_description)
mobsf_findings = []
# Mobile Permissions
if "permissions" in data:
# for permission, details in data["permissions"].items():
if type(data["permissions"]) is list:
for details in data["permissions"]:
mobsf_item = {
"category": "Mobile Permissions",
"title": details.get("name", ""),
"severity": self.getSeverityForPermission(details.get("status")),
"description": "**Permission Type:** " + details.get("name", "") + " (" + details.get("status", "") + ")\n\n**Description:** " + details.get("description", "") + "\n\n**Reason:** " + details.get("reason", ""),
"file_path": None
}
mobsf_findings.append(mobsf_item)
else:
for permission, details in list(data["permissions"].items()):
mobsf_item = {
"category": "Mobile Permissions",
"title": permission,
"severity": self.getSeverityForPermission(details.get("status", "")),
"description": "**Permission Type:** " + permission + "\n\n**Description:** " + details.get("description", ""),
"file_path": None
}
mobsf_findings.append(mobsf_item)
# Insecure Connections
if "insecure_connections" in data:
for details in data["insecure_connections"]:
insecure_urls = ""
for url in details.split(','):
insecure_urls = insecure_urls + url + "\n"
mobsf_item = {
"category": None,
"title": "Insecure Connections",
"severity": "Low",
"description": insecure_urls,
"file_path": None
}
mobsf_findings.append(mobsf_item)
# Binary Analysis
if "binary_analysis" in data:
if type(data["binary_analysis"]) is list:
for details in data["binary_analysis"]:
for binary_analysis_type in details:
if "name" != binary_analysis_type:
mobsf_item = {
"category": "Binary Analysis",
"title": details[binary_analysis_type]["description"].split(".")[0],
"severity": details[binary_analysis_type]["severity"].replace("warning", "low").title(),
"description": details[binary_analysis_type]["description"],
"file_path": details["name"]
}
mobsf_findings.append(mobsf_item)
else:
for binary_analysis_type, details in list(data["binary_analysis"].items()):
# "Binary makes use of insecure API(s)":{
# "detailed_desc":"The binary may contain the following insecure API(s) _vsprintf.",
# "severity":"high",
# "cvss":6,
# "cwe":"CWE-676 - Use of Potentially Dangerous Function",
# "owasp-mobile":"M7: Client Code Quality",
# "masvs":"MSTG-CODE-8"
# }
mobsf_item = {
"category": "Binary Analysis",
"title": details["detailed_desc"],
"severity": details["severity"].replace("good", "info").title(),
"description": details["detailed_desc"],
"file_path": None
}
mobsf_findings.append(mobsf_item)
# specific node for Android reports
if "android_api" in data:
# "android_insecure_random": {
# "files": {
# "u/c/a/b/a/c.java": "9",
# "kotlinx/coroutines/repackaged/net/bytebuddy/utility/RandomString.java": "3",
# ...
# "hu/mycompany/vbnmqweq/gateway/msg/Response.java": "13"
# },
# "metadata": {
# "id": "android_insecure_random",
# "description": "The App uses an insecure Random Number Generator.",
# "type": "Regex",
# "pattern": "java\\.util\\.Random;",
# "severity": "high",
# "input_case": "exact",
# "cvss": 7.5,
# "cwe": "CWE-330 Use of Insufficiently Random Values",
# "owasp-mobile": "M5: Insufficient Cryptography",
# "masvs": "MSTG-CRYPTO-6"
# }
# },
for api, details in list(data["android_api"].items()):
mobsf_item = {
"category": "Android API",
"title": details["metadata"]["description"],
"severity": details["metadata"]["severity"].replace("warning", "low").title(),
"description": "**API:** " + api + "\n\n**Description:** " + details["metadata"]["description"],
"file_path": None
}
mobsf_findings.append(mobsf_item)
# Manifest
if "manifest" in data:
for details in data["manifest"]:
mobsf_item = {
"category": "Manifest",
"title": details["title"],
"severity": details["stat"],
"description": details["desc"],
"file_path": None
}
mobsf_findings.append(mobsf_item)
# MobSF Findings
if "findings" in data:
for title, finding in list(data["findings"].items()):
description = title
file_path = None
if "path" in finding:
description = description + "\n\n**Files:**\n"
for path in finding["path"]:
if file_path is None:
file_path = path
description = description + " * " + path + "\n"
mobsf_item = { | [
" \"category\": \"Findings\","
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for input pipeline modifications for distribution strategies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.distribute import input_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class AutoShardDatasetTest(test.TestCase):
def setUp(self):
super(AutoShardDatasetTest, self).setUp()
self._num_files = 10
self._num_records = 4
self._num_shards = 2
self._shard_index = 0
self._record_bytes = 10
def _record(self, r, f):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _text_line(self, r, f):
return compat.as_bytes("Text line %d of file %d" % (r, f))
def _fixed_length_record(self, r, f):
return compat.as_bytes(str((r * f) % 10) * self._record_bytes)
def _createTFRecordFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
record = self._record(j, i)
writer.write(record)
writer.close()
return filenames
def _createTextFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
contents = []
for j in range(self._num_records):
contents.append(self._text_line(j, i))
if j + 1 != self._num_records or i == 0:
contents.append(b"\r\n")
contents = b"".join(contents)
with open(fn, "wb") as f:
f.write(contents)
return filenames
def _createFixedLengthRecordFiles(self): | [
" filenames = []"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
import sys
from django.contrib.contenttypes.generic import GenericForeignKey, GenericRelation
from django.core.management.color import color_style
from django.utils.itercompat import is_iterable
try:
any
except NameError:
from django.utils.itercompat import any
class ModelErrorCollection:
def __init__(self, outfile=sys.stdout):
self.errors = []
self.outfile = outfile
self.style = color_style()
def add(self, context, error):
self.errors.append((context, error))
self.outfile.write(self.style.ERROR("%s: %s\n" % (context, error)))
def get_validation_errors(outfile, app=None):
"""
Validates all models that are part of the specified app. If no app name is provided,
validates all models of all installed apps. Writes errors, if any, to outfile.
Returns number of errors.
"""
from django.conf import settings
from django.db import models, connection
from django.db.models.loading import get_app_errors
from django.db.models.fields.related import RelatedObject
from django.db.models.deletion import SET_NULL, SET_DEFAULT
e = ModelErrorCollection(outfile)
for (app_name, error) in get_app_errors().items():
e.add(app_name, error)
for cls in models.get_models(app):
opts = cls._meta
# Do field-specific validation. | [
" for f in opts.local_fields:"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the DynamicPartition op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class DynamicPartitionTest(test.TestCase):
def testSimpleOneDimensional(self):
with self.test_session(use_gpu=True) as sess:
data = constant_op.constant([0, 13, 2, 39, 4, 17], dtype=dtypes.float32)
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = sess.run(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual([0, 13], partition_vals[0])
self.assertAllEqual([17], partition_vals[1])
self.assertAllEqual([2, 4], partition_vals[2])
self.assertAllEqual([39], partition_vals[3])
# Vector data input to DynamicPartition results in
# `num_partitions` vectors of unknown length.
self.assertEqual([None], partitions[0].get_shape().as_list())
self.assertEqual([None], partitions[1].get_shape().as_list())
self.assertEqual([None], partitions[2].get_shape().as_list())
self.assertEqual([None], partitions[3].get_shape().as_list())
def testSimpleTwoDimensional(self):
with self.test_session(use_gpu=True) as sess:
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14], [15, 16, 17]],
dtype=dtypes.float32)
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = sess.run(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], partition_vals[0])
self.assertAllEqual([[15, 16, 17]], partition_vals[1])
self.assertAllEqual([[6, 7, 8], [12, 13, 14]], partition_vals[2])
self.assertAllEqual([[9, 10, 11]], partition_vals[3])
# Vector data input to DynamicPartition results in
# `num_partitions` matrices with an unknown number of rows, and 3 columns.
self.assertEqual([None, 3], partitions[0].get_shape().as_list())
self.assertEqual([None, 3], partitions[1].get_shape().as_list())
self.assertEqual([None, 3], partitions[2].get_shape().as_list())
self.assertEqual([None, 3], partitions[3].get_shape().as_list())
def testLargeOneDimensional(self):
num = 100000
data_list = [x for x in range(num)]
indices_list = [x % 2 for x in range(num)]
part1 = [x for x in range(num) if x % 2 == 0]
part2 = [x for x in range(num) if x % 2 == 1]
with self.test_session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = sess.run(partitions)
self.assertEqual(2, len(partition_vals))
self.assertAllEqual(part1, partition_vals[0])
self.assertAllEqual(part2, partition_vals[1])
def testLargeTwoDimensional(self):
rows = 100000
cols = 100
data_list = [None] * rows
for i in range(rows):
data_list[i] = [i for _ in range(cols)]
num_partitions = 97
indices_list = [(i ** 2) % num_partitions for i in range(rows)]
parts = [[] for _ in range(num_partitions)]
for i in range(rows):
parts[(i ** 2) % num_partitions].append(data_list[i])
with self.test_session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=num_partitions)
partition_vals = sess.run(partitions)
self.assertEqual(num_partitions, len(partition_vals))
for i in range(num_partitions):
# reshape because of empty parts
parts_np = np.array(parts[i], dtype=np.float).reshape(-1, cols)
self.assertAllEqual(parts_np, partition_vals[i])
def testSimpleComplex(self):
data_list = [1 + 2j, 3 + 4j, 5 + 6j, 7 + 8j]
indices_list = [1, 0, 1, 0]
with self.test_session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.complex64)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = sess.run(partitions)
self.assertEqual(2, len(partition_vals))
self.assertAllEqual([3 + 4j, 7 + 8j], partition_vals[0])
self.assertAllEqual([1 + 2j, 5 + 6j], partition_vals[1])
def testScalarPartitions(self):
data_list = [10, 13, 12, 11]
with self.test_session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float64)
indices = 3
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = sess.run(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual(np.array([], dtype=np.float64).reshape(-1, 4),
partition_vals[0])
self.assertAllEqual(np.array([], dtype=np.float64).reshape(-1, 4),
partition_vals[1])
self.assertAllEqual(np.array([], dtype=np.float64).reshape(-1, 4),
partition_vals[2])
self.assertAllEqual(np.array([10, 13, 12, 11],
dtype=np.float64).reshape(-1, 4),
partition_vals[3])
def testHigherRank(self):
np.random.seed(7)
with self.test_session(use_gpu=True) as sess:
for n in 2, 3:
for shape in (4,), (4, 5), (4, 5, 2):
partitions = np.random.randint(n, size=np.prod(shape)).reshape(shape)
for extra_shape in (), (6,), (6, 7):
data = np.random.randn(*(shape + extra_shape))
partitions_t = constant_op.constant(partitions, dtype=dtypes.int32)
data_t = constant_op.constant(data)
outputs = data_flow_ops.dynamic_partition(
data_t, partitions_t, num_partitions=n)
self.assertEqual(n, len(outputs))
outputs_val = sess.run(outputs)
for i, output in enumerate(outputs_val):
self.assertAllEqual(output, data[partitions == i])
# Test gradients
outputs_grad = [7 * output for output in outputs_val]
grads = gradients_impl.gradients(outputs, [data_t, partitions_t],
outputs_grad)
self.assertEqual(grads[1], None) # Partitions has no gradients
self.assertAllEqual(7 * data, sess.run(grads[0]))
def testEmptyParts(self):
data_list = [1, 2, 3, 4]
indices_list = [1, 3, 1, 3]
with self.test_session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = sess.run(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual([], partition_vals[0])
self.assertAllEqual([1, 3], partition_vals[1])
self.assertAllEqual([], partition_vals[2])
self.assertAllEqual([2, 4], partition_vals[3])
def testEmptyDataTwoDimensional(self):
data_list = [[], []]
indices_list = [0, 1]
with self.test_session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=3)
partition_vals = sess.run(partitions)
self.assertEqual(3, len(partition_vals))
self.assertAllEqual([[]], partition_vals[0])
self.assertAllEqual([[]], partition_vals[1])
self.assertAllEqual(np.array([], dtype=np.float).reshape(0, 0),
partition_vals[2])
def testEmptyPartitions(self):
data_list = []
indices_list = []
with self.test_session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = sess.run(partitions)
self.assertEqual(2, len(partition_vals))
self.assertAllEqual([], partition_vals[0]) | [
" self.assertAllEqual([], partition_vals[1])"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
from __future__ import unicode_literals
from django.contrib.admin.util import flatten_fieldsets
from django.db import models
from django import forms
from django.forms.models import modelform_factory
from django.utils.cache import add_never_cache_headers
from django.utils.translation import ugettext as _, string_concat
from django.views import generic
import floppyforms
from djam.widgets import AddWrapper
class RiffViewMixin(object):
riff = None
cacheable = False
def dispatch(self, request, *args, **kwargs):
if not self.has_permission(request):
return self.get_unauthorized_response(request)
response = super(RiffViewMixin, self).dispatch(request, *args, **kwargs)
if not self.cacheable:
add_never_cache_headers(response)
return response
def has_permission(self, request):
return self.riff.has_permission(request)
def get_unauthorized_response(self, request):
return self.riff.get_unauthorized_response(request)
def get_crumbs(self):
"""
Returns a list of breadcrumbs - (url, name) tuples.
"""
return [(r.get_default_url, r.display_name) for r in self.riff.path]
def get_context_data(self, **kwargs):
context = super(RiffViewMixin, self).get_context_data(**kwargs)
context.update({
'base_riff': self.riff.base_riff,
'riff': self.riff,
'crumbs': self.get_crumbs(),
})
return context
class FloppyformsMixin(object):
fieldsets = None
readonly = ()
def get_context_data(self, **kwargs):
context = super(FloppyformsMixin, self).get_context_data(**kwargs)
fieldsets = (self.fieldsets or
((None, {'fields': list(context['form'].fields)}),))
context.update({
'fieldsets': fieldsets,
'readonly': self.readonly,
})
return context
class ModelFloppyformsMixin(FloppyformsMixin):
def _rebuild_kwargs(self, field, **kwargs):
"""
Returns a tuple of (rebuild, kwargs), where rebuild is a boolean
indicating whether the kwargs should be used to construct a new
field instance.
"""
rebuild = False
# Swap in split datetime.
if isinstance(field, forms.DateTimeField):
kwargs['form_class'] = floppyforms.SplitDateTimeField
kwargs['widget'] = floppyforms.SplitDateTimeWidget
rebuild = True
# Swap in floppyforms fields.
mod, cls_name = field.__module__, field.__class__.__name__
if (mod in ('django.forms.fields', 'django.forms.models') and
'form_class' not in kwargs):
kwargs['form_class'] = getattr(floppyforms, cls_name)
rebuild = True
# Swap in floppyforms widgets.
widget = field.widget
mod, cls_name = widget.__module__, widget.__class__.__name__
if mod == 'django.forms.widgets' and 'widget' not in kwargs:
kwargs['widget'] = getattr(floppyforms, cls_name)
rebuild = True
return rebuild, kwargs
def _post_formfield(self, field, db_field):
field.widget.attrs['data-required'] = int(field.required)
if issubclass(db_field.__class__, models.ManyToManyField):
msg = _('Hold down "Control", or "Command" on a Mac, to select '
'more than one.')
msg = unicode(string_concat(' ', msg))
if field.help_text.endswith(msg):
field.help_text = field.help_text[:-len(msg)]
if (isinstance(field, forms.ChoiceField) and
hasattr(field, 'queryset')):
model = field.queryset.model
if isinstance(field, forms.MultipleChoiceField):
msg = string_concat(_("Choose some "),
model._meta.verbose_name_plural,
"...")
else:
msg = string_concat(_("Choose a "),
model._meta.verbose_name,
"...")
field.widget.attrs['data-placeholder'] = msg
for riff in self.riff.base_riff.riffs:
if getattr(riff, 'model', None) == model:
if riff.has_add_permission(self.request):
field.widget = AddWrapper(field.widget, riff)
break
return field
def formfield_callback(self, db_field, **kwargs):
field = db_field.formfield(**kwargs)
# db_field.formfield can return None to signal that the field should
# be ignored.
if field is None:
return None
rebuild, kwargs = self._rebuild_kwargs(field, **kwargs)
if rebuild:
field = db_field.formfield(**kwargs)
return self._post_formfield(field, db_field)
def _get_form_fields(self, form_class, fieldsets=None):
fields = list(form_class._meta.fields or [])
if fieldsets:
fields += flatten_fieldsets(fieldsets)
return fields or None
def _get_form_exclude(self, form_class, readonly=None):
exclude = list(form_class._meta.exclude or [])
if readonly:
exclude += list(readonly)
return exclude or None
def get_form_class(self):
if self.form_class:
form_class = self.form_class
else:
form_class = floppyforms.ModelForm
if self.model is not None:
model = self.model
elif hasattr(self, 'object') and self.object is not None:
model = self.object.__class__
else:
model = self.get_queryset().model
fields = self._get_form_fields(form_class, self.fieldsets)
exclude = self._get_form_exclude(form_class, self.readonly)
return modelform_factory(model,
form=form_class,
exclude=exclude,
fields=fields,
formfield_callback=self.formfield_callback)
class View(RiffViewMixin, generic.View):
pass
class TemplateView(RiffViewMixin, generic.TemplateView):
pass
class RedirectView(RiffViewMixin, generic.RedirectView):
pass
class ArchiveIndexView(RiffViewMixin, generic.ArchiveIndexView):
pass
class YearArchiveView(RiffViewMixin, generic.YearArchiveView):
pass
class MonthArchiveView(RiffViewMixin, generic.MonthArchiveView):
pass
class WeekArchiveView(RiffViewMixin, generic.WeekArchiveView):
pass
class DayArchiveView(RiffViewMixin, generic.DayArchiveView):
pass
| [
"class TodayArchiveView(RiffViewMixin, generic.TodayArchiveView):"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
from __future__ import division, absolute_import, print_function,\
unicode_literals
import ctypes
import platform
import sys
if sys.platform in ('win32', 'cygwin'):
_functype = ctypes.WINFUNCTYPE
_lib = ctypes.windll.nanomsg
elif sys.platform == 'darwin':
_functype = ctypes.CFUNCTYPE
_lib = ctypes.cdll.LoadLibrary('libnanomsg.dylib')
else:
_functype = ctypes.CFUNCTYPE
_lib = ctypes.cdll.LoadLibrary('libnanomsg.so')
def _c_func_wrapper_factory(cdecl_text):
def move_pointer_and_strip(type_def, name):
if '*' in name:
type_def += ' ' + name[:name.rindex('*')+1]
name = name.rsplit('*', 1)[1]
return type_def.strip(), name.strip()
def type_lookup(type_def):
types = {
'void': None,
'char *': ctypes.c_char_p,
'const char *': ctypes.c_char_p,
'int': ctypes.c_int,
'int *': ctypes.POINTER(ctypes.c_int),
'void *': ctypes.c_void_p,
'size_t': ctypes.c_size_t,
'size_t *': ctypes.POINTER(ctypes.c_size_t),
'struct nn_msghdr *': ctypes.c_void_p,
'struct nn_pollfd *': ctypes.c_void_p,
}
type_def_without_const = type_def.replace('const ','')
if type_def_without_const in types:
return types[type_def_without_const]
elif (type_def_without_const.endswith('*') and
type_def_without_const[:-1] in types): | [
" return ctypes.POINTER(types[type_def_without_const[:-1]])"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# -*- coding: utf-8 -*-
# This file is part of MediaFile.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Automatically-generated blanket testing for the MediaFile metadata
layer.
"""
from __future__ import division, absolute_import, print_function
import os
import shutil
import datetime
import time
import unittest
from six import assertCountEqual
from test import _common
from mediafile import MediaFile, Image, \
ImageType, CoverArtField, UnreadableFileError
import mutagen
class ArtTestMixin(object):
"""Test reads and writes of the ``art`` property.
"""
@property
def png_data(self):
if not self._png_data:
image_file = os.path.join(_common.RSRC, b'image-2x3.png')
with open(image_file, 'rb') as f:
self._png_data = f.read()
return self._png_data
_png_data = None
@property
def jpg_data(self):
if not self._jpg_data:
image_file = os.path.join(_common.RSRC, b'image-2x3.jpg')
with open(image_file, 'rb') as f:
self._jpg_data = f.read()
return self._jpg_data
_jpg_data = None
@property
def tiff_data(self):
if not self._jpg_data:
image_file = os.path.join(_common.RSRC, b'image-2x3.tiff')
with open(image_file, 'rb') as f:
self._jpg_data = f.read()
return self._jpg_data
_jpg_data = None
def test_set_png_art(self):
mediafile = self._mediafile_fixture('empty')
mediafile.art = self.png_data
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(mediafile.art, self.png_data)
def test_set_jpg_art(self):
mediafile = self._mediafile_fixture('empty')
mediafile.art = self.jpg_data
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(mediafile.art, self.jpg_data)
def test_delete_art(self):
mediafile = self._mediafile_fixture('empty')
mediafile.art = self.jpg_data
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertIsNotNone(mediafile.art)
del mediafile.art
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertIsNone(mediafile.art)
class ImageStructureTestMixin(ArtTestMixin):
"""Test reading and writing multiple image tags.
The tests use the `image` media file fixture. The tags of these files
include two images, on in the PNG format, the other in JPEG format. If
the tag format supports it they also include additional metadata.
"""
def test_read_image_structures(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
image = next(i for i in mediafile.images
if i.mime_type == 'image/png')
self.assertEqual(image.data, self.png_data)
self.assertExtendedImageAttributes(image, desc=u'album cover',
type=ImageType.front)
image = next(i for i in mediafile.images
if i.mime_type == 'image/jpeg')
self.assertEqual(image.data, self.jpg_data)
self.assertExtendedImageAttributes(image, desc=u'the artist',
type=ImageType.artist)
def test_set_image_structure(self):
mediafile = self._mediafile_fixture('empty')
image = Image(data=self.png_data, desc=u'album cover',
type=ImageType.front)
mediafile.images = [image]
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(len(mediafile.images), 1)
image = mediafile.images[0]
self.assertEqual(image.data, self.png_data)
self.assertEqual(image.mime_type, 'image/png')
self.assertExtendedImageAttributes(image, desc=u'album cover',
type=ImageType.front)
def test_add_image_structure(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
image = Image(data=self.png_data, desc=u'the composer',
type=ImageType.composer)
mediafile.images += [image]
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(len(mediafile.images), 3)
images = (i for i in mediafile.images if i.desc == u'the composer')
image = next(images, None)
self.assertExtendedImageAttributes(
image, desc=u'the composer', type=ImageType.composer
)
def test_delete_image_structures(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
del mediafile.images
mediafile.save()
mediafile = MediaFile(mediafile.filename)
self.assertEqual(len(mediafile.images), 0)
def test_guess_cover(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
cover = CoverArtField.guess_cover_image(mediafile.images)
self.assertEqual(cover.desc, u'album cover')
self.assertEqual(mediafile.art, cover.data)
def assertExtendedImageAttributes(self, image, **kwargs): # noqa
"""Ignore extended image attributes in the base tests.
"""
pass
class ExtendedImageStructureTestMixin(ImageStructureTestMixin):
"""Checks for additional attributes in the image structure.
Like the base `ImageStructureTestMixin`, per-format test classes
should include this mixin to add image-related tests.
"""
def assertExtendedImageAttributes(self, image, desc=None, type=None): # noqa
self.assertEqual(image.desc, desc)
self.assertEqual(image.type, type)
def test_add_tiff_image(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
image = Image(data=self.tiff_data, desc=u'the composer',
type=ImageType.composer)
mediafile.images += [image]
mediafile.save()
mediafile = MediaFile(mediafile.filename) | [
" self.assertEqual(len(mediafile.images), 3)"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
import numpy as np
from numpy.core._rational_tests import rational
from numpy.testing import (
assert_equal, assert_array_equal, assert_raises, assert_,
assert_raises_regex, assert_warns,
)
from numpy.lib.stride_tricks import (
as_strided, broadcast_arrays, _broadcast_shape, broadcast_to,
broadcast_shapes, sliding_window_view,
)
import pytest
def assert_shapes_correct(input_shapes, expected_shape):
# Broadcast a list of arrays with the given input shapes and check the
# common output shape.
inarrays = [np.zeros(s) for s in input_shapes]
outarrays = broadcast_arrays(*inarrays)
outshapes = [a.shape for a in outarrays]
expected = [expected_shape] * len(inarrays)
assert_equal(outshapes, expected)
def assert_incompatible_shapes_raise(input_shapes):
# Broadcast a list of arrays with the given (incompatible) input shapes
# and check that they raise a ValueError.
inarrays = [np.zeros(s) for s in input_shapes]
assert_raises(ValueError, broadcast_arrays, *inarrays)
def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False):
# Broadcast two shapes against each other and check that the data layout
# is the same as if a ufunc did the broadcasting.
x0 = np.zeros(shape0, dtype=int)
# Note that multiply.reduce's identity element is 1.0, so when shape1==(),
# this gives the desired n==1.
n = int(np.multiply.reduce(shape1))
x1 = np.arange(n).reshape(shape1)
if transposed:
x0 = x0.T
x1 = x1.T
if flipped:
x0 = x0[::-1]
x1 = x1[::-1]
# Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the
# result should be exactly the same as the broadcasted view of x1.
y = x0 + x1
b0, b1 = broadcast_arrays(x0, x1)
assert_array_equal(y, b1)
def test_same():
x = np.arange(10)
y = np.arange(10)
bx, by = broadcast_arrays(x, y)
assert_array_equal(x, bx)
assert_array_equal(y, by)
def test_broadcast_kwargs():
# ensure that a TypeError is appropriately raised when
# np.broadcast_arrays() is called with any keyword
# argument other than 'subok'
x = np.arange(10)
y = np.arange(10)
with assert_raises_regex(TypeError, 'got an unexpected keyword'):
broadcast_arrays(x, y, dtype='float64')
def test_one_off():
x = np.array([[1, 2, 3]])
y = np.array([[1], [2], [3]])
bx, by = broadcast_arrays(x, y)
bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
by0 = bx0.T
assert_array_equal(bx0, bx)
assert_array_equal(by0, by)
def test_same_input_shapes():
# Check that the final shape is just the input shape.
data = [
(),
(1,),
(3,),
(0, 1),
(0, 3),
(1, 0),
(3, 0),
(1, 3),
(3, 1),
(3, 3),
]
for shape in data:
input_shapes = [shape]
# Single input.
assert_shapes_correct(input_shapes, shape)
# Double input.
input_shapes2 = [shape, shape]
assert_shapes_correct(input_shapes2, shape)
# Triple input.
input_shapes3 = [shape, shape, shape]
assert_shapes_correct(input_shapes3, shape)
def test_two_compatible_by_ones_input_shapes():
# Check that two different input shapes of the same length, but some have
# ones, broadcast to the correct shape.
data = [
[[(1,), (3,)], (3,)],
[[(1, 3), (3, 3)], (3, 3)],
[[(3, 1), (3, 3)], (3, 3)],
[[(1, 3), (3, 1)], (3, 3)],
[[(1, 1), (3, 3)], (3, 3)],
[[(1, 1), (1, 3)], (1, 3)],
[[(1, 1), (3, 1)], (3, 1)],
[[(1, 0), (0, 0)], (0, 0)],
[[(0, 1), (0, 0)], (0, 0)],
[[(1, 0), (0, 1)], (0, 0)],
[[(1, 1), (0, 0)], (0, 0)],
[[(1, 1), (1, 0)], (1, 0)],
[[(1, 1), (0, 1)], (0, 1)],
]
for input_shapes, expected_shape in data:
assert_shapes_correct(input_shapes, expected_shape)
# Reverse the input shapes since broadcasting should be symmetric.
assert_shapes_correct(input_shapes[::-1], expected_shape)
def test_two_compatible_by_prepending_ones_input_shapes():
# Check that two different input shapes (of different lengths) broadcast
# to the correct shape.
data = [
[[(), (3,)], (3,)],
[[(3,), (3, 3)], (3, 3)],
[[(3,), (3, 1)], (3, 3)],
[[(1,), (3, 3)], (3, 3)],
[[(), (3, 3)], (3, 3)],
[[(1, 1), (3,)], (1, 3)],
[[(1,), (3, 1)], (3, 1)],
[[(1,), (1, 3)], (1, 3)],
[[(), (1, 3)], (1, 3)],
[[(), (3, 1)], (3, 1)],
[[(), (0,)], (0,)],
[[(0,), (0, 0)], (0, 0)],
[[(0,), (0, 1)], (0, 0)],
[[(1,), (0, 0)], (0, 0)],
[[(), (0, 0)], (0, 0)],
[[(1, 1), (0,)], (1, 0)],
[[(1,), (0, 1)], (0, 1)],
[[(1,), (1, 0)], (1, 0)],
[[(), (1, 0)], (1, 0)],
[[(), (0, 1)], (0, 1)],
]
for input_shapes, expected_shape in data:
assert_shapes_correct(input_shapes, expected_shape)
# Reverse the input shapes since broadcasting should be symmetric.
assert_shapes_correct(input_shapes[::-1], expected_shape)
def test_incompatible_shapes_raise_valueerror():
# Check that a ValueError is raised for incompatible shapes.
data = [
[(3,), (4,)],
[(2, 3), (2,)],
[(3,), (3,), (4,)],
[(1, 3, 4), (2, 3, 3)],
]
for input_shapes in data:
assert_incompatible_shapes_raise(input_shapes)
# Reverse the input shapes since broadcasting should be symmetric.
assert_incompatible_shapes_raise(input_shapes[::-1])
def test_same_as_ufunc():
# Check that the data layout is the same as if a ufunc did the operation.
data = [
[[(1,), (3,)], (3,)],
[[(1, 3), (3, 3)], (3, 3)],
[[(3, 1), (3, 3)], (3, 3)],
[[(1, 3), (3, 1)], (3, 3)],
[[(1, 1), (3, 3)], (3, 3)],
[[(1, 1), (1, 3)], (1, 3)],
[[(1, 1), (3, 1)], (3, 1)],
[[(1, 0), (0, 0)], (0, 0)],
[[(0, 1), (0, 0)], (0, 0)],
[[(1, 0), (0, 1)], (0, 0)],
[[(1, 1), (0, 0)], (0, 0)],
[[(1, 1), (1, 0)], (1, 0)],
[[(1, 1), (0, 1)], (0, 1)],
[[(), (3,)], (3,)],
[[(3,), (3, 3)], (3, 3)],
[[(3,), (3, 1)], (3, 3)],
[[(1,), (3, 3)], (3, 3)],
[[(), (3, 3)], (3, 3)],
[[(1, 1), (3,)], (1, 3)],
[[(1,), (3, 1)], (3, 1)],
[[(1,), (1, 3)], (1, 3)],
[[(), (1, 3)], (1, 3)],
[[(), (3, 1)], (3, 1)],
[[(), (0,)], (0,)],
[[(0,), (0, 0)], (0, 0)],
[[(0,), (0, 1)], (0, 0)],
[[(1,), (0, 0)], (0, 0)],
[[(), (0, 0)], (0, 0)],
[[(1, 1), (0,)], (1, 0)],
[[(1,), (0, 1)], (0, 1)],
[[(1,), (1, 0)], (1, 0)],
[[(), (1, 0)], (1, 0)],
[[(), (0, 1)], (0, 1)],
]
for input_shapes, expected_shape in data:
assert_same_as_ufunc(input_shapes[0], input_shapes[1],
"Shapes: %s %s" % (input_shapes[0], input_shapes[1]))
# Reverse the input shapes since broadcasting should be symmetric.
assert_same_as_ufunc(input_shapes[1], input_shapes[0])
# Try them transposed, too.
assert_same_as_ufunc(input_shapes[0], input_shapes[1], True)
# ... and flipped for non-rank-0 inputs in order to test negative
# strides.
if () not in input_shapes:
assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True)
assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True)
def test_broadcast_to_succeeds():
data = [
[np.array(0), (0,), np.array(0)],
[np.array(0), (1,), np.zeros(1)],
[np.array(0), (3,), np.zeros(3)],
[np.ones(1), (1,), np.ones(1)],
[np.ones(1), (2,), np.ones(2)],
[np.ones(1), (1, 2, 3), np.ones((1, 2, 3))],
[np.arange(3), (3,), np.arange(3)],
[np.arange(3), (1, 3), np.arange(3).reshape(1, -1)],
[np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])],
# test if shape is not a tuple
[np.ones(0), 0, np.ones(0)],
[np.ones(1), 1, np.ones(1)],
[np.ones(1), 2, np.ones(2)],
# these cases with size 0 are strange, but they reproduce the behavior
# of broadcasting with ufuncs (see test_same_as_ufunc above)
[np.ones(1), (0,), np.ones(0)],
[np.ones((1, 2)), (0, 2), np.ones((0, 2))],
[np.ones((2, 1)), (2, 0), np.ones((2, 0))],
]
for input_array, shape, expected in data:
actual = broadcast_to(input_array, shape)
assert_array_equal(expected, actual)
def test_broadcast_to_raises():
data = [
[(0,), ()],
[(1,), ()],
[(3,), ()],
[(3,), (1,)],
[(3,), (2,)],
[(3,), (4,)],
[(1, 2), (2, 1)],
[(1, 1), (1,)],
[(1,), -1],
[(1,), (-1,)],
[(1, 2), (-1, 2)],
]
for orig_shape, target_shape in data:
arr = np.zeros(orig_shape)
assert_raises(ValueError, lambda: broadcast_to(arr, target_shape))
def test_broadcast_shape():
# tests internal _broadcast_shape
# _broadcast_shape is already exercised indirectly by broadcast_arrays
# _broadcast_shape is also exercised by the public broadcast_shapes function
assert_equal(_broadcast_shape(), ())
assert_equal(_broadcast_shape([1, 2]), (2,))
assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1))
assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4))
assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2))
assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2))
# regression tests for gh-5862
assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,))
bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32
assert_raises(ValueError, lambda: _broadcast_shape(*bad_args))
def test_broadcast_shapes_succeeds():
# tests public broadcast_shapes
data = [
[[], ()],
[[()], ()],
[[(7,)], (7,)],
[[(1, 2), (2,)], (1, 2)],
[[(1, 1)], (1, 1)],
[[(1, 1), (3, 4)], (3, 4)],
[[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)],
[[(5, 6, 1)], (5, 6, 1)],
[[(1, 3), (3, 1)], (3, 3)],
[[(1, 0), (0, 0)], (0, 0)],
[[(0, 1), (0, 0)], (0, 0)],
[[(1, 0), (0, 1)], (0, 0)],
[[(1, 1), (0, 0)], (0, 0)],
[[(1, 1), (1, 0)], (1, 0)],
[[(1, 1), (0, 1)], (0, 1)],
[[(), (0,)], (0,)],
[[(0,), (0, 0)], (0, 0)],
[[(0,), (0, 1)], (0, 0)],
[[(1,), (0, 0)], (0, 0)],
[[(), (0, 0)], (0, 0)],
[[(1, 1), (0,)], (1, 0)],
[[(1,), (0, 1)], (0, 1)],
[[(1,), (1, 0)], (1, 0)],
[[(), (1, 0)], (1, 0)],
[[(), (0, 1)], (0, 1)],
[[(1,), (3,)], (3,)],
[[2, (3, 2)], (3, 2)],
]
for input_shapes, target_shape in data:
assert_equal(broadcast_shapes(*input_shapes), target_shape)
assert_equal(broadcast_shapes(*([(1, 2)] * 32)), (1, 2))
assert_equal(broadcast_shapes(*([(1, 2)] * 100)), (1, 2))
# regression tests for gh-5862
assert_equal(broadcast_shapes(*([(2,)] * 32)), (2,))
def test_broadcast_shapes_raises():
# tests public broadcast_shapes
data = [
[(3,), (4,)],
[(2, 3), (2,)],
[(3,), (3,), (4,)],
[(1, 3, 4), (2, 3, 3)],
[(1, 2), (3,1), (3,2), (10, 5)],
[2, (2, 3)],
]
for input_shapes in data:
assert_raises(ValueError, lambda: broadcast_shapes(*input_shapes))
bad_args = [(2,)] * 32 + [(3,)] * 32
assert_raises(ValueError, lambda: broadcast_shapes(*bad_args))
def test_as_strided():
a = np.array([None])
a_view = as_strided(a)
expected = np.array([None])
assert_array_equal(a_view, np.array([None]))
a = np.array([1, 2, 3, 4])
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
expected = np.array([1, 3])
assert_array_equal(a_view, expected)
a = np.array([1, 2, 3, 4])
a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize))
expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
assert_array_equal(a_view, expected)
# Regression test for gh-5081
dt = np.dtype([('num', 'i4'), ('obj', 'O')])
a = np.empty((4,), dtype=dt)
a['num'] = np.arange(1, 5)
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
expected_num = [[1, 2, 3, 4]] * 3
expected_obj = [[None]*4]*3
assert_equal(a_view.dtype, dt)
assert_array_equal(expected_num, a_view['num'])
assert_array_equal(expected_obj, a_view['obj'])
# Make sure that void types without fields are kept unchanged
a = np.empty((4,), dtype='V4')
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
assert_equal(a.dtype, a_view.dtype)
# Make sure that the only type that could fail is properly handled
dt = np.dtype({'names': [''], 'formats': ['V4']})
a = np.empty((4,), dtype=dt)
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
assert_equal(a.dtype, a_view.dtype)
# Custom dtypes should not be lost (gh-9161)
r = [rational(i) for i in range(4)]
a = np.array(r, dtype=rational)
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
assert_equal(a.dtype, a_view.dtype)
assert_array_equal([r] * 3, a_view)
class TestSlidingWindowView:
def test_1d(self):
arr = np.arange(5)
arr_view = sliding_window_view(arr, 2)
expected = np.array([[0, 1],
[1, 2],
[2, 3],
[3, 4]])
assert_array_equal(arr_view, expected)
def test_2d(self):
i, j = np.ogrid[:3, :4]
arr = 10*i + j
shape = (2, 2)
arr_view = sliding_window_view(arr, shape)
expected = np.array([[[[0, 1], [10, 11]],
[[1, 2], [11, 12]],
[[2, 3], [12, 13]]],
[[[10, 11], [20, 21]],
[[11, 12], [21, 22]],
[[12, 13], [22, 23]]]])
assert_array_equal(arr_view, expected)
def test_2d_with_axis(self):
i, j = np.ogrid[:3, :4]
arr = 10*i + j
arr_view = sliding_window_view(arr, 3, 0)
expected = np.array([[[0, 10, 20],
[1, 11, 21],
[2, 12, 22],
[3, 13, 23]]])
assert_array_equal(arr_view, expected)
def test_2d_repeated_axis(self):
i, j = np.ogrid[:3, :4]
arr = 10*i + j
arr_view = sliding_window_view(arr, (2, 3), (1, 1))
expected = np.array([[[[0, 1, 2],
[1, 2, 3]]],
[[[10, 11, 12],
[11, 12, 13]]],
[[[20, 21, 22],
[21, 22, 23]]]])
assert_array_equal(arr_view, expected)
def test_2d_without_axis(self):
i, j = np.ogrid[:4, :4]
arr = 10*i + j
shape = (2, 3)
arr_view = sliding_window_view(arr, shape)
expected = np.array([[[[0, 1, 2], [10, 11, 12]],
[[1, 2, 3], [11, 12, 13]]],
[[[10, 11, 12], [20, 21, 22]],
[[11, 12, 13], [21, 22, 23]]],
[[[20, 21, 22], [30, 31, 32]],
[[21, 22, 23], [31, 32, 33]]]])
assert_array_equal(arr_view, expected)
def test_errors(self):
i, j = np.ogrid[:4, :4]
arr = 10*i + j
with pytest.raises(ValueError, match='cannot contain negative values'):
sliding_window_view(arr, (-1, 3))
with pytest.raises(
ValueError,
match='must provide window_shape for all dimensions of `x`'):
sliding_window_view(arr, (1,))
with pytest.raises(
ValueError,
match='Must provide matching length window_shape and axis'):
sliding_window_view(arr, (1, 3, 4), axis=(0, 1))
with pytest.raises(
ValueError,
match='window shape cannot be larger than input array'):
sliding_window_view(arr, (5, 5))
def test_writeable(self):
arr = np.arange(5)
view = sliding_window_view(arr, 2, writeable=False)
assert_(not view.flags.writeable)
with pytest.raises(
ValueError,
match='assignment destination is read-only'):
view[0, 0] = 3
view = sliding_window_view(arr, 2, writeable=True)
assert_(view.flags.writeable)
view[0, 1] = 3
assert_array_equal(arr, np.array([0, 3, 2, 3, 4]))
def test_subok(self):
class MyArray(np.ndarray):
pass
arr = np.arange(5).view(MyArray)
assert_(not isinstance(sliding_window_view(arr, 2,
subok=False),
MyArray))
assert_(isinstance(sliding_window_view(arr, 2, subok=True), MyArray))
# Default behavior
assert_(not isinstance(sliding_window_view(arr, 2), MyArray))
def as_strided_writeable():
arr = np.ones(10)
view = as_strided(arr, writeable=False)
assert_(not view.flags.writeable)
# Check that writeable also is fine:
view = as_strided(arr, writeable=True)
assert_(view.flags.writeable)
view[...] = 3
assert_array_equal(arr, np.full_like(arr, 3))
# Test that things do not break down for readonly:
arr.flags.writeable = False
view = as_strided(arr, writeable=False)
view = as_strided(arr, writeable=True)
assert_(not view.flags.writeable)
class VerySimpleSubClass(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, subok=True, **kwargs).view(cls)
class SimpleSubClass(VerySimpleSubClass):
def __new__(cls, *args, **kwargs):
self = np.array(*args, subok=True, **kwargs).view(cls) | [
" self.info = 'simple'"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
from django.conf import settings
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from cinderclient.v2.contrib import list_extensions as cinder_list_extensions
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
# API static values
VOLUME_STATE_AVAILABLE = "available"
DEFAULT_QUOTA_NAME = 'default'
# Available consumer choices associated with QOS Specs
CONSUMER_CHOICES = (
('back-end', _('back-end')),
('front-end', _('front-end')),
('both', pgettext_lazy('Both of front-end and back-end', u'both')),
)
VERSIONS = base.APIVersionManager("volume", preferred_version=2)
try:
from cinderclient.v2 import client as cinder_client_v2
VERSIONS.load_supported_version(2, {"client": cinder_client_v2,
"version": 2})
except ImportError:
pass
class BaseCinderAPIResourceWrapper(base.APIResourceWrapper):
@property
def name(self):
# If a volume doesn't have a name, use its id.
return (getattr(self._apiresource, 'name', None) or
getattr(self._apiresource, 'display_name', None) or
getattr(self._apiresource, 'id', None))
@property
def description(self):
return (getattr(self._apiresource, 'description', None) or
getattr(self._apiresource, 'display_description', None))
class Volume(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'size', 'status', 'created_at',
'volume_type', 'availability_zone', 'imageRef', 'bootable',
'snapshot_id', 'source_volid', 'attachments', 'tenant_name',
'os-vol-host-attr:host', 'os-vol-tenant-attr:tenant_id',
'metadata', 'volume_image_metadata', 'encrypted', 'transfer']
@property
def is_bootable(self):
return self.bootable == 'true'
class VolumeSnapshot(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'size', 'status',
'created_at', 'volume_id',
'os-extended-snapshot-attributes:project_id']
class VolumeType(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'extra_specs', 'created_at',
'os-extended-snapshot-attributes:project_id']
class VolumeBackup(BaseCinderAPIResourceWrapper):
_attrs = ['id', 'name', 'description', 'container', 'size', 'status',
'created_at', 'volume_id', 'availability_zone']
_volume = None
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value):
self._volume = value
class VolTypeExtraSpec(object):
def __init__(self, type_id, key, val):
self.type_id = type_id
self.id = key
self.key = key
self.value = val
class QosSpec(object):
def __init__(self, id, key, val):
self.id = id
self.key = key
self.value = val
class VolumeTransfer(base.APIResourceWrapper):
_attrs = ['id', 'name', 'created_at', 'volume_id', 'auth_key']
@memoized
def cinderclient(request):
api_version = VERSIONS.get_active_version()
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
cinder_url = ""
try:
# The cinder client assumes that the v2 endpoint type will be
# 'volumev2'.
if api_version['version'] == 2:
try:
cinder_url = base.url_for(request, 'volumev2')
except exceptions.ServiceCatalogException:
LOG.warning("Cinder v2 requested but no 'volumev2' service "
"type available in Keystone catalog.")
except exceptions.ServiceCatalogException:
LOG.debug('no volume service configured.')
raise
c = api_version['client'].Client(request.user.username,
request.user.token.id,
project_id=request.user.tenant_id,
auth_url=cinder_url,
insecure=insecure,
cacert=cacert,
http_log_debug=settings.DEBUG)
c.client.auth_token = request.user.token.id
c.client.management_url = cinder_url
return c
def _replace_v2_parameters(data):
if VERSIONS.active < 2:
data['display_name'] = data['name']
data['display_description'] = data['description']
del data['name']
del data['description']
return data
def version_get():
api_version = VERSIONS.get_active_version()
return api_version['version']
def volume_list(request, search_opts=None):
"""To see all volumes in the cloud as an admin you can pass in a special
search option: {'all_tenants': 1}
"""
c_client = cinderclient(request)
if c_client is None:
return []
# build a dictionary of volume_id -> transfer
transfers = {t.volume_id: t
for t in transfer_list(request, search_opts=search_opts)}
volumes = []
for v in c_client.volumes.list(search_opts=search_opts):
v.transfer = transfers.get(v.id)
volumes.append(Volume(v))
return volumes
def volume_get(request, volume_id):
volume_data = cinderclient(request).volumes.get(volume_id)
for attachment in volume_data.attachments:
if "server_id" in attachment:
instance = nova.server_get(request, attachment['server_id'])
attachment['instance_name'] = instance.name
else:
# Nova volume can occasionally send back error'd attachments
# the lack a server_id property; to work around that we'll
# give the attached instance a generic name.
attachment['instance_name'] = _("Unknown instance")
volume_data.transfer = None
if volume_data.status == 'awaiting-transfer':
for transfer in transfer_list(request):
if transfer.volume_id == volume_id:
volume_data.transfer = transfer
break
return Volume(volume_data)
def volume_create(request, size, name, description, volume_type,
snapshot_id=None, metadata=None, image_id=None,
availability_zone=None, source_volid=None):
data = {'name': name,
'description': description,
'volume_type': volume_type,
'snapshot_id': snapshot_id,
'metadata': metadata,
'imageRef': image_id,
'availability_zone': availability_zone,
'source_volid': source_volid}
data = _replace_v2_parameters(data)
volume = cinderclient(request).volumes.create(size, **data)
return Volume(volume)
def volume_extend(request, volume_id, new_size):
return cinderclient(request).volumes.extend(volume_id, new_size)
def volume_delete(request, volume_id):
return cinderclient(request).volumes.delete(volume_id)
def volume_retype(request, volume_id, new_type, migration_policy):
return cinderclient(request).volumes.retype(volume_id,
new_type,
migration_policy)
def volume_set_bootable(request, volume_id, bootable):
return cinderclient(request).volumes.set_bootable(volume_id,
bootable)
def volume_update(request, volume_id, name, description):
vol_data = {'name': name,
'description': description}
vol_data = _replace_v2_parameters(vol_data)
return cinderclient(request).volumes.update(volume_id,
**vol_data)
def volume_reset_state(request, volume_id, state):
return cinderclient(request).volumes.reset_state(volume_id, state)
def volume_upload_to_image(request, volume_id, force, image_name,
container_format, disk_format):
return cinderclient(request).volumes.upload_to_image(volume_id,
force,
image_name,
container_format,
disk_format)
def volume_get_encryption_metadata(request, volume_id):
return cinderclient(request).volumes.get_encryption_metadata(volume_id)
def volume_snapshot_get(request, snapshot_id):
snapshot = cinderclient(request).volume_snapshots.get(snapshot_id)
return VolumeSnapshot(snapshot)
def volume_snapshot_list(request, search_opts=None):
c_client = cinderclient(request)
if c_client is None:
return []
return [VolumeSnapshot(s) for s in c_client.volume_snapshots.list(
search_opts=search_opts)]
def volume_snapshot_create(request, volume_id, name,
description=None, force=False):
data = {'name': name,
'description': description,
'force': force}
data = _replace_v2_parameters(data)
return VolumeSnapshot(cinderclient(request).volume_snapshots.create(
volume_id, **data))
def volume_snapshot_delete(request, snapshot_id):
return cinderclient(request).volume_snapshots.delete(snapshot_id)
def volume_snapshot_update(request, snapshot_id, name, description):
snapshot_data = {'name': name,
'description': description}
snapshot_data = _replace_v2_parameters(snapshot_data)
return cinderclient(request).volume_snapshots.update(snapshot_id,
**snapshot_data)
def volume_snapshot_reset_state(request, snapshot_id, state):
return cinderclient(request).volume_snapshots.reset_state(
snapshot_id, state)
@memoized
def volume_backup_supported(request):
"""This method will determine if cinder supports backup.
"""
# TODO(lcheng) Cinder does not expose the information if cinder
# backup is configured yet. This is a workaround until that
# capability is available.
# https://bugs.launchpad.net/cinder/+bug/1334856
cinder_config = getattr(settings, 'OPENSTACK_CINDER_FEATURES', {})
return cinder_config.get('enable_backup', False)
def volume_backup_get(request, backup_id):
backup = cinderclient(request).backups.get(backup_id)
return VolumeBackup(backup)
def volume_backup_list(request):
c_client = cinderclient(request)
if c_client is None:
return []
return [VolumeBackup(b) for b in c_client.backups.list()]
def volume_backup_create(request,
volume_id,
container_name,
name,
description):
backup = cinderclient(request).backups.create(
volume_id,
container=container_name,
name=name,
description=description)
return VolumeBackup(backup)
def volume_backup_delete(request, backup_id):
return cinderclient(request).backups.delete(backup_id)
def volume_backup_restore(request, backup_id, volume_id):
return cinderclient(request).restores.restore(backup_id=backup_id,
volume_id=volume_id)
def tenant_quota_get(request, tenant_id):
c_client = cinderclient(request)
if c_client is None:
return base.QuotaSet()
return base.QuotaSet(c_client.quotas.get(tenant_id))
def tenant_quota_update(request, tenant_id, **kwargs):
return cinderclient(request).quotas.update(tenant_id, **kwargs)
def default_quota_get(request, tenant_id):
return base.QuotaSet(cinderclient(request).quotas.defaults(tenant_id))
def volume_type_list_with_qos_associations(request):
vol_types = volume_type_list(request)
vol_types_dict = {}
# initialize and build a dictionary for lookup access below
for vol_type in vol_types:
vol_type.associated_qos_spec = ""
vol_types_dict[vol_type.id] = vol_type
# get all currently defined qos specs
qos_specs = qos_spec_list(request)
for qos_spec in qos_specs:
# get all volume types this qos spec is associated with
assoc_vol_types = qos_spec_get_associations(request, qos_spec.id)
for assoc_vol_type in assoc_vol_types:
# update volume type to hold this association info
vol_type = vol_types_dict[assoc_vol_type.id]
vol_type.associated_qos_spec = qos_spec.name
return vol_types
def default_quota_update(request, **kwargs):
cinderclient(request).quota_classes.update(DEFAULT_QUOTA_NAME, **kwargs)
def volume_type_list(request):
return cinderclient(request).volume_types.list()
def volume_type_create(request, name):
return cinderclient(request).volume_types.create(name)
def volume_type_delete(request, volume_type_id):
return cinderclient(request).volume_types.delete(volume_type_id)
def volume_type_get(request, volume_type_id):
return cinderclient(request).volume_types.get(volume_type_id)
def volume_encryption_type_create(request, volume_type_id, data):
return cinderclient(request).volume_encryption_types.create(volume_type_id,
specs=data)
def volume_encryption_type_delete(request, volume_type_id):
return cinderclient(request).volume_encryption_types.delete(volume_type_id)
def volume_encryption_type_get(request, volume_type_id):
return cinderclient(request).volume_encryption_types.get(volume_type_id)
def volume_encryption_type_list(request):
return cinderclient(request).volume_encryption_types.list()
def volume_type_extra_get(request, type_id, raw=False):
vol_type = volume_type_get(request, type_id)
extras = vol_type.get_keys()
if raw:
return extras
return [VolTypeExtraSpec(type_id, key, value) for
key, value in extras.items()]
def volume_type_extra_set(request, type_id, metadata):
vol_type = volume_type_get(request, type_id)
if not metadata:
return None
return vol_type.set_keys(metadata)
def volume_type_extra_delete(request, type_id, keys):
vol_type = volume_type_get(request, type_id)
return vol_type.unset_keys([keys])
def qos_spec_list(request):
return cinderclient(request).qos_specs.list()
def qos_spec_get(request, qos_spec_id):
return cinderclient(request).qos_specs.get(qos_spec_id)
def qos_spec_delete(request, qos_spec_id):
return cinderclient(request).qos_specs.delete(qos_spec_id, force=True)
def qos_spec_create(request, name, specs):
return cinderclient(request).qos_specs.create(name, specs)
def qos_spec_get_keys(request, qos_spec_id, raw=False):
spec = qos_spec_get(request, qos_spec_id)
qos_specs = spec.specs
if raw:
return spec
return [QosSpec(qos_spec_id, key, value) for
key, value in qos_specs.items()]
def qos_spec_set_keys(request, qos_spec_id, specs):
return cinderclient(request).qos_specs.set_keys(qos_spec_id, specs)
def qos_spec_unset_keys(request, qos_spec_id, specs):
return cinderclient(request).qos_specs.unset_keys(qos_spec_id, specs)
def qos_spec_associate(request, qos_specs, vol_type_id):
return cinderclient(request).qos_specs.associate(qos_specs, vol_type_id)
def qos_spec_disassociate(request, qos_specs, vol_type_id):
return cinderclient(request).qos_specs.disassociate(qos_specs, vol_type_id)
def qos_spec_get_associations(request, qos_spec_id):
return cinderclient(request).qos_specs.get_associations(qos_spec_id)
@memoized
def tenant_absolute_limits(request):
limits = cinderclient(request).limits.get().absolute
limits_dict = {}
for limit in limits:
if limit.value < 0:
# In some cases, the absolute limits data in Cinder can get
# out of sync causing the total.*Used limits to return
# negative values instead of 0. For such cases, replace
# negative values with 0.
if limit.name.startswith('total') and limit.name.endswith('Used'):
limits_dict[limit.name] = 0
else:
# -1 is used to represent unlimited quotas
limits_dict[limit.name] = float("inf")
else:
limits_dict[limit.name] = limit.value
return limits_dict
def service_list(request): | [
" return cinderclient(request).services.list()"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['Process', 'current_process', 'active_children']
#
# Imports
#
import os
import sys
import signal
import itertools
from _weakrefset import WeakSet
#
#
#
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
#
# Public functions
#
def current_process():
''' | [
" Return process object representing the current process"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
from collections import defaultdict
from django.contrib.auth.decorators import login_required
from django.contrib.messages import info
from django.core.urlresolvers import get_callable, reverse
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404, redirect
from django.template import RequestContext
from django.template.defaultfilters import slugify
from django.template.loader import get_template
from django.utils import simplejson
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from mezzanine.conf import settings
from mezzanine.utils.importing import import_dotted_path
from mezzanine.utils.views import render, set_cookie, paginate
from cartridge.shop import checkout
from cartridge.shop.forms import AddProductForm, DiscountForm, CartItemFormSet
from cartridge.shop.models import Product, ProductVariation, Order, OrderItem
from cartridge.shop.models import DiscountCode
from cartridge.shop.utils import recalculate_discount, sign
# Set up checkout handlers.
handler = lambda s: import_dotted_path(s) if s else lambda *args: None
billship_handler = handler(settings.SHOP_HANDLER_BILLING_SHIPPING)
payment_handler = handler(settings.SHOP_HANDLER_PAYMENT)
order_handler = handler(settings.SHOP_HANDLER_ORDER)
def product(request, slug, template="shop/product.html"):
"""
Display a product - convert the product variations to JSON as well as
handling adding the product to either the cart or the wishlist.
"""
published_products = Product.objects.published(for_user=request.user)
product = get_object_or_404(published_products, slug=slug)
fields = [f.name for f in ProductVariation.option_fields()]
variations = product.variations.all()
variations_json = simplejson.dumps([dict([(f, getattr(v, f))
for f in fields + ["sku", "image_id"]])
for v in variations])
to_cart = (request.method == "POST" and
request.POST.get("add_wishlist") is None)
initial_data = {}
if variations:
initial_data = dict([(f, getattr(variations[0], f)) for f in fields])
initial_data["quantity"] = 1
add_product_form = AddProductForm(request.POST or None, product=product,
initial=initial_data, to_cart=to_cart)
if request.method == "POST":
if add_product_form.is_valid():
if to_cart:
quantity = add_product_form.cleaned_data["quantity"]
request.cart.add_item(add_product_form.variation, quantity)
recalculate_discount(request)
info(request, _("Item added to cart"))
return redirect("shop_cart")
else:
skus = request.wishlist
sku = add_product_form.variation.sku
if sku not in skus:
skus.append(sku)
info(request, _("Item added to wishlist"))
response = redirect("shop_wishlist")
set_cookie(response, "wishlist", ",".join(skus))
return response
context = {
"product": product,
"editable_obj": product,
"images": product.images.all(),
"variations": variations,
"variations_json": variations_json,
"has_available_variations": any([v.has_price() for v in variations]),
"related_products": product.related_products.published(
for_user=request.user),
"add_product_form": add_product_form
}
return render(request, template, context)
@never_cache
def wishlist(request, template="shop/wishlist.html"):
"""
Display the wishlist and handle removing items from the wishlist and
adding them to the cart.
"""
skus = request.wishlist
error = None
if request.method == "POST":
to_cart = request.POST.get("add_cart")
add_product_form = AddProductForm(request.POST or None,
to_cart=to_cart)
if to_cart:
if add_product_form.is_valid():
request.cart.add_item(add_product_form.variation, 1)
recalculate_discount(request)
message = _("Item added to cart")
url = "shop_cart"
else:
error = add_product_form.errors.values()[0]
else:
message = _("Item removed from wishlist")
url = "shop_wishlist"
sku = request.POST.get("sku")
if sku in skus:
skus.remove(sku)
if not error:
info(request, message)
response = redirect(url)
set_cookie(response, "wishlist", ",".join(skus))
return response
# Remove skus from the cookie that no longer exist.
published_products = Product.objects.published(for_user=request.user)
f = {"product__in": published_products, "sku__in": skus}
wishlist = ProductVariation.objects.filter(**f).select_related(depth=1)
wishlist = sorted(wishlist, key=lambda v: skus.index(v.sku))
context = {"wishlist_items": wishlist, "error": error}
response = render(request, template, context)
if len(wishlist) < len(skus):
skus = [variation.sku for variation in wishlist]
set_cookie(response, "wishlist", ",".join(skus))
return response
@never_cache
def cart(request, template="shop/cart.html"):
"""
Display cart and handle removing items from the cart.
"""
cart_formset = CartItemFormSet(instance=request.cart)
discount_form = DiscountForm(request, request.POST or None)
if request.method == "POST":
valid = True
if request.POST.get("update_cart"):
valid = request.cart.has_items()
if not valid:
# Session timed out.
info(request, _("Your cart has expired"))
else:
cart_formset = CartItemFormSet(request.POST,
instance=request.cart)
valid = cart_formset.is_valid()
if valid:
cart_formset.save()
recalculate_discount(request)
info(request, _("Cart updated"))
else:
valid = discount_form.is_valid()
if valid:
discount_form.set_discount()
if valid:
return redirect("shop_cart")
context = {"cart_formset": cart_formset}
settings.use_editable()
if (settings.SHOP_DISCOUNT_FIELD_IN_CART and
DiscountCode.objects.active().count() > 0):
context["discount_form"] = discount_form
return render(request, template, context)
@never_cache
def checkout_steps(request):
"""
Display the order form and handle processing of each step.
"""
# Do the authentication check here rather than using standard
# login_required decorator. This means we can check for a custom
# LOGIN_URL and fall back to our own login view.
authenticated = request.user.is_authenticated()
if settings.SHOP_CHECKOUT_ACCOUNT_REQUIRED and not authenticated:
url = "%s?next=%s" % (settings.LOGIN_URL, reverse("shop_checkout"))
return redirect(url)
# Determine the Form class to use during the checkout process
form_class = get_callable(settings.SHOP_CHECKOUT_FORM_CLASS)
step = int(request.POST.get("step", checkout.CHECKOUT_STEP_FIRST))
initial = checkout.initial_order_data(request)
form = form_class(request, step, initial=initial)
data = request.POST
checkout_errors = []
if request.POST.get("back") is not None:
# Back button in the form was pressed - load the order form
# for the previous step and maintain the field values entered.
step -= 1
form = form_class(request, step, initial=initial)
elif request.method == "POST" and request.cart.has_items():
form = form_class(request, step, initial=initial, data=data)
if form.is_valid():
# Copy the current form fields to the session so that
# they're maintained if the customer leaves the checkout
# process, but remove sensitive fields from the session
# such as the credit card fields so that they're never
# stored anywhere.
request.session["order"] = dict(form.cleaned_data)
sensitive_card_fields = ("card_number", "card_expiry_month",
"card_expiry_year", "card_ccv")
for field in sensitive_card_fields:
if field in request.session["order"]:
del request.session["order"][field]
# FIRST CHECKOUT STEP - handle shipping and discount code.
if step == checkout.CHECKOUT_STEP_FIRST:
try:
billship_handler(request, form)
except checkout.CheckoutError, e:
checkout_errors.append(e)
form.set_discount()
# FINAL CHECKOUT STEP - handle payment and process order.
if step == checkout.CHECKOUT_STEP_LAST and not checkout_errors:
# Create and save the inital order object so that
# the payment handler has access to all of the order
# fields. If there is a payment error then delete the
# order, otherwise remove the cart items from stock
# and send the order reciept email.
order = form.save(commit=False)
order.setup(request)
# Try payment.
try:
transaction_id = payment_handler(request, form, order)
except checkout.CheckoutError, e:
# Error in payment handler.
order.delete()
checkout_errors.append(e)
if settings.SHOP_CHECKOUT_STEPS_CONFIRMATION:
step -= 1
else:
# Finalize order - ``order.complete()`` performs
# final cleanup of session and cart.
# ``order_handler()`` can be defined by the
# developer to implement custom order processing.
# Then send the order email to the customer.
order.transaction_id = transaction_id
order.complete(request)
order_handler(request, form, order)
checkout.send_order_email(request, order)
# Set the cookie for remembering address details
# if the "remember" checkbox was checked.
response = redirect("shop_complete")
if form.cleaned_data.get("remember") is not None:
remembered = "%s:%s" % (sign(order.key), order.key)
set_cookie(response, "remember", remembered,
secure=request.is_secure())
else:
response.delete_cookie("remember")
return response
# If any checkout errors, assign them to a new form and
# re-run is_valid. If valid, then set form to the next step.
form = form_class(request, step, initial=initial, data=data,
errors=checkout_errors)
if form.is_valid():
step += 1
form = form_class(request, step, initial=initial)
step_vars = checkout.CHECKOUT_STEPS[step - 1]
template = "shop/%s.html" % step_vars["template"]
CHECKOUT_STEP_FIRST = step == checkout.CHECKOUT_STEP_FIRST
context = {"form": form, "CHECKOUT_STEP_FIRST": CHECKOUT_STEP_FIRST,
"step_title": step_vars["title"], "step_url": step_vars["url"],
"steps": checkout.CHECKOUT_STEPS, "step": step}
return render(request, template, context)
@never_cache | [
"def complete(request, template=\"shop/complete.html\"):"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
import numpy as np
import math
_use_pysb = False
try:
import pysb.core
import pysb.integrate
_use_pysb = True
except ImportError:
pass
__all__ = ['MCMC', 'MCMCOpts']
class MCMC(object):
"""An interface for Markov-Chain Monte Carlo parameter estimation.
Parameters
----------
options : bayessb.MCMCOpts
Option set -- defines the problem and sets some parameters to control
the MCMC algorithm.
Attributes
----------
options : bayessb.MCMCOpts
Validated copy of options passed to constructor.
num_estimate : int
Number of parameters to estimate.
estimate_idx : list of int
Indices of parameters to estimate in the model's full parameter list.
initial_values : list of float
Starting values for parameters to estimate, taken from the parameters'
nominal values in the model or explicitly specified in `options`.
initial_position : list of float
Starting position of MCMC walk in parameter space (log10 of
`initial_values`).
position : list of float
Current position of MCMC walk in parameter space, i.e. the most recently
accepted move).
test_position : list of float
Proposed MCMC move.
acceptance : int
Number of accepted moves.
T : float
Current value of the simulated annealing temperature.
T_decay : float
Constant for exponential decay of `T`, automatically calculated such
that T will decay from `options.T_init` down to 1 over the first
`options.anneal_length` steps.
sig_value : float
Current value of 'Sigma', the scaling factor for the proposal
distribution. The MCMC algorithm dynamically tunes this to maintain the
acceptance rate specified in `options.accept_rate_target`.
iter : int
Current MCMC step number.
start_iter : int
Starting MCMC step number.
ode_options : dict
Options for the ODE integrator, currently just 'rtol' for relative
tolerance and 'atol' for absolute tolerance.
random : numpy.random.RandomState
Random number generator. Seeded with `options.seed` for reproducible
runs.
solver : pysb.integrate.Solver
ODE solver.
initial_prior : float
Starting prior value, i.e. the value at `initial_position`.
initial_likelihood : float
Starting likelihood value, i.e. the value at `initial_position`.
initial_posterior : float
Starting posterior value, i.e. the value at `initial_position`.
accept_prior : float
Current prior value, i.e the value at `position`.
accept_likelihood : float
Current likelihood value, i.e the value at `position`.
accept_posterior : float
Current posterior value, i.e the value at `position`.
test_prior : float
Prior value at `test_position`.
test_likelihood : float
Likelihood value at `test_position`.
test_posterior : float
Posterior value at `test_position`.
hessian : numpy.ndarray of float
Current hessian of the posterior landscape. Size is
`num_estimate`x`num_estimate`.
positions : numpy.ndarray of float
Trace of all proposed moves. Size is `num_estimate`x`nsteps`.
priors, likelihoods, posteriors : numpy.ndarray of float
Trace of all priors, likelihoods, and posteriors corresponding to
`positions`. Length is `nsteps`.
alphas, sigmas, delta_posteriors, ts : numpy.ndarray of float
Trace of various MCMC parameters and calculated values. Length is
`nsteps`.
accepts, rejects : numpy.ndarray of bool
Trace of whether each propsed move was accepted or rejected. Length is
`nsteps`.
hessians : numpy.ndarray of float
Trace of all hessians. Size is `num_estimate`x`num_estimate`x`num_hessians`
where num_hessians is the actual number of hessians to be calculated.
Notes
-----
"""
def __init__(self, options):
self.options = self.validate(options)
def __getstate__(self):
# clear solver since it causes problems with pickling
state = self.__dict__.copy()
del state['solver']
return state
def __setstate__(self, state):
# re-init the solver which we didn't pickle
self.__dict__.update(state)
self.init_solver()
def run(self):
"""Initialize internal state and runs the parameter estimation."""
self.initialize()
self.estimate()
def validate(self, options):
"""Return a validated copy of options with defaults applied."""
# FIXME should this live in MCMCOpts?
options = options.copy()
if options.model is None:
raise Exception("model not defined")
if options.estimate_params is None or not len(options.estimate_params):
raise Exception("estimate_params must contain a list of parameters")
# clamp hessian_period to actual number of steps
if options.use_hessian:
options.hessian_period = min(options.hessian_period, options.nsteps)
else:
options.hessian_period = np.inf
if options.anneal_length is None:
# default for anneal_length if unspecified
if options.use_hessian:
# if using hessian, anneal until we start using it
options.anneal_length = options.hessian_period
else:
# otherwise, anneal for 10% of the run
options.anneal_length = np.floor(options.nsteps * 0.10)
else:
# clamp it to actual number of steps
options.anneal_length = min(options.anneal_length, options.nsteps)
# default for sigma_adj_interval if unspecified
if options.sigma_adj_interval is None:
# default to 10 adjustments throughout the annealing phase
options.sigma_adj_interval = max(int(options.anneal_length / 10), 1)
return options
def initialize(self):
"""Initialize internal state from the option set."""
# create list of starting values from initial parameter values given by
# user. vector only contains values which are to be estimated!
self.num_estimate = len(self.options.estimate_params)
if self.options.initial_values is not None:
self.initial_values = self.options.initial_values
else:
# if no explicit values given, take values from model
self.initial_values = [p.value
for p in self.options.estimate_params]
# indices of parameters to be estimated
self.estimate_idx = [i for i, p
in enumerate(self.options.model.parameters)
if p in self.options.estimate_params]
# we actually work in a log-transformed phase space
self.initial_position = np.log10(self.initial_values)
self.position = self.initial_position
# need to do this before init_solver
self.ode_options = {};
if self.options.rtol is not None:
self.ode_options['rtol'] = self.options.rtol
if self.options.atol is not None: | [
" self.ode_options['atol'] = self.options.atol"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#
# Copyright (c) 2014 by Christian E. Hopps.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, print_function, nested_scopes
from ctypes import create_string_buffer, sizeof
from pyisis.bstr import memspan # pylint: disable=E0611
from pyisis.lib.util import bchr, memcpy, buffer3, stringify3, tlvwrb
import errno
import logbook
import pyisis.adjacency as adjacency
import pyisis.lib.bpf as bpf
import pyisis.clns as clns
import pyisis.lib.debug as debug
import pyisis.lsp as lsp
import pyisis.pdu as pdu
import pyisis.lib.rawsock as rawsock
import pyisis.lib.timers as timers
import pyisis.tlv as tlv
import pyisis.lib.util as util
import select
import sys
import threading
import traceback
logger = logbook.Logger(__name__)
SRM = 0
SSN = 1
class LinkDB (object):
"""A container for all the enabled links in an instance"""
def __init__ (self, inst):
self.inst = inst
self.links = [] | [
" self.linkfds = set()"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
from django.conf import settings
from django.conf.urls import url, patterns, include
from django.views.generic import TemplateView, RedirectView
from django.contrib import admin
from django.conf.urls.static import static
from threadedcomments.models import ThreadedComment
admin.autodiscover()
from audiotracks.models import get_track_model
Track = get_track_model()
from microblogging.feeds import TweetFeedAll, TweetFeedUser
from microblogging.feeds import TweetFeedUserWithFriends
from microblogging.models import Tweet
from photos.models import Image
from tagging.models import TaggedItem
from account.openid_consumer import PinaxConsumer
from blog.feeds import BlogFeedAll, BlogFeedUser
from blog.models import Post
from blog.forms import BlogForm
from smeuhoverride import feeds
handler500 = "pinax.views.server_error"
tweets_feed_dict = {"feed_dict": {
"all": TweetFeedAll,
"only": TweetFeedUser,
"with_friends": TweetFeedUserWithFriends,
}}
blogs_feed_dict = {"feed_dict": {
"all": BlogFeedAll,
"only": BlogFeedUser,
}}
urlpatterns = patterns(
"",
url(r"^favicon.ico/?$", RedirectView.as_view(
url=settings.STATIC_URL + 'img/favicon.ico',
permanent=True)),
url(r"^$", "timeline.views.home", name="home"),
url(r"5c/$", "timeline.views.legacy",),
url(r"^admin/", include(admin.site.urls)),
url(r"^about/", include("about.urls")),
url(r"^account/", include("account.urls")),
url(r"^openid/(.*)", PinaxConsumer()),
url(r"^profiles/", include("profiles.urls")),
# Blog URLs ############################################
# all blog posts
url(r"^blogs/?$", "blog.views.blogs",
name="blog_list_all"),
url(r"^(?P<username>[\w\._-]+)/blog/feed/?$", feeds.UserBlogPosts(),
name="user_blog_feed"),
# blog post
url(r"^(?P<username>[-\w]+)/blog/(?P<slug>[-\w]+)/source/?$",
"smeuhoverride.views.blog_post_source", name="blog_post_source"),
url(r"^(?P<username>[-\w]+)/blog/(?P<slug>[-\w]+)/?$",
"blog.views.post", name="blog_post"),
# blog post for user
url(r"^(?P<username>\w+)/blog/?$",
"smeuhoverride.views.user_blog_index", name="blog_list_user"),
# your posts
url(r"^blogs/your_posts/?$",
"blog.views.your_posts", name="blog_list_yours"),
# new blog post
url(r"^blogs/new/$", "blog.views.new", name="blog_new"),
# edit blog post
url(r"^blogs/edit/(\d+)/$",
"blog.views.edit", name="blog_edit"),
# destory blog post
url(r"^blogs/destroy/(\d+)/$",
"blog.views.destroy", name="blog_destroy"),
# ajax validation
(r"^blogs/validate/$", "ajax_validation.views.validate", {
"form_class": BlogForm,
"callback": lambda request, *args, **kwargs: {"user": request.user}
}, "blog_form_validate"),
# /END Blog URLs #######################################
url(r"^invitations/", include("friends_app.urls")),
url(r"^notices/", include("notification.urls")),
url(r"^messages/", include("messages.urls")),
url(r"^touites/", include("microblogging.urls")),
url(r"^comments/", include("threadedcomments.urls")),
url(r"^i18n/", include("django.conf.urls.i18n")),
url(r"^photos/", include("photos.urls")),
url(r"^avatar/", include("avatar.urls")),
url(r"^fu/", include("fukung.urls")),
url(r"^timeline/", include("timeline.urls")),
url(r"^artist/", include("artist.urls")),
# Feeds urls
url(r"^feeds/touites/(?P<username>[\w\._-]+)/with_friends/?$",
feeds.UserTweetWithFriends( | [
" ), name=\"user_friends_tweets\"),"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 777
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
| [
"\tsettings['port'] = int(settings['port'])"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# -*- coding: utf-8 -*-
"""
InformationMachineAPILib.Controllers.UserScansController
"""
import unirest
from InformationMachineAPILib.APIHelper import APIHelper
from InformationMachineAPILib.Configuration import Configuration
from InformationMachineAPILib.APIException import APIException
from InformationMachineAPILib.Models.UploadReceiptWrapper import UploadReceiptWrapper
from InformationMachineAPILib.Models.UploadReceiptStatusWrapper import UploadReceiptStatusWrapper
from InformationMachineAPILib.Models.UploadBarcodeWrapper import UploadBarcodeWrapper
class UserScansController(object):
"""A Controller to access Endpoints in the InformationMachineAPILib API."""
def __init__(self,
client_id, | [
" client_secret):"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# (c) 2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import time
import errno
from abc import ABCMeta, abstractmethod
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import with_metaclass
from ansible.module_utils._text import to_bytes
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.plugins.loader import cache_loader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class BaseCacheModule(with_metaclass(ABCMeta, object)):
# Backwards compat only. Just import the global display instead
_display = display
@abstractmethod
def get(self, key):
pass
@abstractmethod
def set(self, key, value):
pass
@abstractmethod
def keys(self):
pass
@abstractmethod
def contains(self, key):
pass
@abstractmethod
def delete(self, key):
pass
@abstractmethod
def flush(self):
pass
@abstractmethod
def copy(self):
pass
class BaseFileCacheModule(BaseCacheModule):
"""
A caching module backed by file based storage.
"""
def __init__(self, *args, **kwargs):
self.plugin_name = self.__module__.split('.')[-1] | [
" self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import unittest
import os
from barf.analysis.symbolic.emulator import ReilSymbolicEmulator
from barf.analysis.symbolic.emulator import State
from barf.analysis.symbolic.emulator import SymExecResult
from barf.arch.x86 import X86ArchitectureInformation
from barf.core.binary import BinaryFile
from barf.utils.reil import ReilContainerBuilder
def get_full_path(filename):
return os.path.dirname(os.path.abspath(__file__)) + filename
class ReilSymbolicEmulatorTests(unittest.TestCase):
def setUp(self):
self.__arch_mode = None
self.__arch = None
self.__disassembler = None
self.__translator = None
def test_se_1(self):
binary = BinaryFile(get_full_path("/data/bin/check_serial_1"))
arch_info = X86ArchitectureInformation(binary.architecture_mode)
functions = [
("check_serial", 0x0804841d, 0x08048452)
]
reil_container = ReilContainerBuilder(binary).build(functions)
# Set up initial state
initial_state = State(arch_info, mode="initial")
# Set up stack
esp = 0xffffceec
initial_state.write_register("esp", esp)
# Set up parameters
user_password_addr = 0xdeadbeef
user_password_len = 0x6
initial_state.write_memory(esp + 0x4, 4, user_password_addr) # password
initial_state.write_memory(esp + 0x0, 4, 0x41414141) # fake return address
# Each byte of the password should be an ascii char.
for i in range(0, user_password_len):
value = initial_state.query_memory(user_password_addr + i, 1)
initial_state.add_constraint(value.uge(0x21))
initial_state.add_constraint(value.ule(0x7e))
sym_exec = ReilSymbolicEmulator(arch_info)
paths = sym_exec.find_address(
reil_container, start=0x0804841d, end=0x08048452,
find=0x08048451, avoid=[0x0804843b], initial_state=initial_state)
# There's only one way to reach 'find' address.
self.assertEqual(len(paths), 1)
final_state = State(arch_info, mode="final")
user_password_expected = bytearray(b"AAAAAA")
user_password = bytearray()
se_res = SymExecResult(arch_info, initial_state, paths[0], final_state)
for i in range(0, user_password_len):
value = se_res.query_memory(user_password_addr + i, 1)
user_password.append(value)
self.assertEqual(user_password, user_password_expected)
def test_se_2(self):
binary = BinaryFile(get_full_path("/data/bin/check_serial_2"))
arch_info = X86ArchitectureInformation(binary.architecture_mode)
functions = [
("check_serial", 0x0804841d, 0x08048467)
]
reil_container = ReilContainerBuilder(binary).build(functions)
# Set up initial state
initial_state = State(arch_info, mode="initial")
# Set up stack
esp = 0xffffceec
initial_state.write_register("esp", esp)
# Set up parameters
user_password_addr = 0xdeadbeef
user_password_len = 0x6
initial_state.write_memory(esp + 0x8, 4, user_password_len) # password length
initial_state.write_memory(esp + 0x4, 4, user_password_addr) # password
initial_state.write_memory(esp + 0x0, 4, 0x41414141) # fake return address
# Each byte of the password should be an ascii char.
for i in range(0, user_password_len):
value = initial_state.query_memory(user_password_addr + i, 1)
initial_state.add_constraint(value.uge(0x21))
initial_state.add_constraint(value.ule(0x7e))
# Set up memory
ref_key = bytearray(b"\x31\x27\x30\x2b\x23\x2e")
initial_state.write_memory(0x0804a020, 4, 0xcafecafe)
for i in range(0, len(ref_key)):
initial_state.write_memory(0xcafecafe + i, 1, ref_key[i])
sym_exec = ReilSymbolicEmulator(arch_info)
paths = sym_exec.find_address(
reil_container, start=0x0804841d, end=0x08048467,
find=0x08048466, avoid=[0x0804844e], initial_state=initial_state)
# There's only one way to reach 'find' address.
self.assertEqual(len(paths), 1)
final_state = State(arch_info, mode="final")
user_password_expected = bytearray(b"serial")
user_password = bytearray()
se_res = SymExecResult(arch_info, initial_state, paths[0], final_state)
for i in range(0, user_password_len):
value = se_res.query_memory(user_password_addr + i, 1)
user_password.append(value)
| [
" self.assertEqual(user_password, user_password_expected)"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for filter.py."""
import unittest
from filter import _CategoryFilter as CategoryFilter
from filter import validate_filter_rules
from filter import FilterConfiguration
# On Testing __eq__() and __ne__():
#
# In the tests below, we deliberately do not use assertEqual() or
# assertNotEquals() to test __eq__() or __ne__(). We do this to be
# very explicit about what we are testing, especially in the case
# of assertNotEquals().
#
# Part of the reason is that it is not immediately clear what
# expression the unittest module uses to assert "not equals" -- the
# negation of __eq__() or __ne__(), which are not necessarily
# equivalent expresions in Python. For example, from Python's "Data
# Model" documentation--
#
# "There are no implied relationships among the comparison
# operators. The truth of x==y does not imply that x!=y is
# false. Accordingly, when defining __eq__(), one should
# also define __ne__() so that the operators will behave as
# expected."
#
# (from http://docs.python.org/reference/datamodel.html#object.__ne__ )
class ValidateFilterRulesTest(unittest.TestCase):
"""Tests validate_filter_rules() function."""
def test_validate_filter_rules(self):
all_categories = ["tabs", "whitespace", "build/include"]
bad_rules = [
"tabs",
"*tabs",
" tabs",
" +tabs",
"+whitespace/newline",
"+xxx",
]
good_rules = [
"+tabs",
"-tabs",
"+build"
]
for rule in bad_rules:
self.assertRaises(ValueError, validate_filter_rules,
[rule], all_categories)
for rule in good_rules:
# This works: no error.
validate_filter_rules([rule], all_categories)
class CategoryFilterTest(unittest.TestCase):
"""Tests CategoryFilter class."""
def test_init(self):
"""Test __init__ method."""
# Test that the attributes are getting set correctly.
filter = CategoryFilter(["+"])
self.assertEqual(["+"], filter._filter_rules)
def test_init_default_arguments(self):
"""Test __init__ method default arguments."""
filter = CategoryFilter()
self.assertEqual([], filter._filter_rules)
def test_str(self):
"""Test __str__ "to string" operator."""
filter = CategoryFilter(["+a", "-b"])
self.assertEqual(str(filter), "+a,-b")
def test_eq(self):
"""Test __eq__ equality function."""
filter1 = CategoryFilter(["+a", "+b"])
filter2 = CategoryFilter(["+a", "+b"])
filter3 = CategoryFilter(["+b", "+a"])
# See the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertTrue(filter1.__eq__(filter2))
self.assertFalse(filter1.__eq__(filter3))
def test_ne(self):
"""Test __ne__ inequality function."""
# By default, __ne__ always returns true on different objects.
# Thus, just check the distinguishing case to verify that the
# code defines __ne__.
#
# Also, see the notes at the top of this module about testing
# __eq__() and __ne__().
self.assertFalse(CategoryFilter().__ne__(CategoryFilter()))
def test_should_check(self):
"""Test should_check() method."""
filter = CategoryFilter()
self.assertTrue(filter.should_check("everything"))
# Check a second time to exercise cache.
self.assertTrue(filter.should_check("everything"))
filter = CategoryFilter(["-"])
self.assertFalse(filter.should_check("anything"))
# Check a second time to exercise cache.
self.assertFalse(filter.should_check("anything"))
filter = CategoryFilter(["-", "+ab"])
self.assertTrue(filter.should_check("abc"))
self.assertFalse(filter.should_check("a"))
filter = CategoryFilter(["+", "-ab"])
self.assertFalse(filter.should_check("abc"))
self.assertTrue(filter.should_check("a"))
class FilterConfigurationTest(unittest.TestCase):
"""Tests FilterConfiguration class."""
| [
" def _config(self, base_rules, path_specific, user_rules):"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
"""Module to handle image quality calculations."""
#
# iqcalc.py -- image quality calculations on FITS data
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import math
import logging
import threading
import numpy as np
try:
import scipy.optimize as optimize
import scipy.ndimage as ndimage
from scipy.ndimage import maximum_filter
from scipy.interpolate import interp1d
have_scipy = True
except ImportError:
have_scipy = False
from ginga.misc import Bunch
__all__ = ['get_mean', 'get_median', 'IQCalcError', 'IQCalc']
def get_mean(data_np):
"""Calculate mean for valid values.
Parameters
----------
data_np : ndarray
Input array. Can contain masked values.
Returns
-------
result : float
Mean of array values that are finite.
If array contains no finite values, returns NaN.
"""
i = np.isfinite(data_np)
if not np.any(i):
return np.nan
# NOTE: we use "ma" version of mean because this can be used with
# masked arrays created by cutting out non-rectangular shapes
return np.ma.mean(data_np[i])
def get_median(data_np):
"""Like :func:`get_mean` but for median."""
i = np.isfinite(data_np)
if not np.any(i):
return np.nan
# NOTE: we use "ma" version of median because this can be used with
# masked arrays created by cutting out non-rectangular shapes
return np.ma.median(data_np[i])
class IQCalcError(Exception):
"""Base exception for raising errors in this module."""
pass
class IQCalc(object):
"""Class to handle model fitting and FWHM calculations.
Parameters
----------
logger : obj or `None`
Python logger. If not given, one will be created.
Attributes
----------
lock : :py:class:`threading.RLock`
For mutex around `scipy.optimize`, which seems to be non-threadsafe.
skylevel_magnification, skylevel_offset : float
For adjustments to sky background level.
"""
def __init__(self, logger=None):
if not logger:
logger = logging.getLogger('IQCalc')
self.logger = logger
# for mutex around scipy.optimize, which seems to be non-threadsafe
self.lock = threading.RLock()
# for adjustments to background level
self.skylevel_magnification = 1.05
self.skylevel_offset = 40.0
# FWHM CALCULATION
def gaussian(self, x, p):
"""Evaluate Gaussian function in 1D. See :meth:`calc_fwhm`.
Parameters
----------
x : array-like
X values.
p : tuple of float
Parameters for Gaussian, i.e., ``(mean, stddev, amplitude)``.
Returns
-------
y : array-like
Y values.
"""
y = (1.0 / (p[1] * np.sqrt(2 * np.pi)) *
np.exp(-(x - p[0]) ** 2 / (2 * p[1] ** 2))) * p[2]
return y
def calc_fwhm_gaussian(self, arr1d, medv=None, gauss_fn=None):
"""FWHM calculation on a 1D array by using least square fitting of
a Gaussian function on the data.
Parameters
----------
arr1d : array-like
1D array cut in either X or Y direction on the object.
medv : float or `None`
Median of the data. If not given, it is calculated from ``arr1d``.
gauss_fn : func or `None`
Gaussian function for fitting. If not given, :meth:`gaussian`
is used.
Returns
-------
res : `~ginga.misc.Bunch.Bunch`
Fitting results.
Raises
------
IQCalcError
Fitting failed.
"""
if not have_scipy:
raise IQCalcError("Please install the 'scipy' module "
"to use this function")
if gauss_fn is None:
gauss_fn = self.gaussian
N = len(arr1d)
X = np.array(list(range(N)))
Y = arr1d
# Fitting works more reliably if we do the following
# a. subtract sky background
if medv is None:
medv = get_median(Y)
Y = Y - medv
maxv = Y.max()
# b. clamp to 0..max (of the sky subtracted field)
Y = Y.clip(0, maxv)
# Fit a gaussian
p0 = [0, N - 1, maxv] # Inital guess
# Distance to the target function
errfunc = lambda p, x, y: gauss_fn(x, p) - y # noqa
# Least square fit to the gaussian
with self.lock:
# NOTE: without this mutex, optimize.leastsq causes a fatal error
# sometimes--it appears not to be thread safe.
# The error is:
# "SystemError: null argument to internal routine"
# "Fatal Python error: GC object already tracked"
p1, success = optimize.leastsq(errfunc, p0[:], args=(X, Y))
if not success:
raise IQCalcError("FWHM Gaussian fitting failed")
mu, sdev, maxv = p1
self.logger.debug("mu=%f sdev=%f maxv=%f" % (mu, sdev, maxv))
# Now that we have the sdev from fitting, we can calculate FWHM
fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0)) * sdev
# some routines choke on numpy values and need "pure" Python floats
# e.g. when marshalling through a remote procedure interface
fwhm = float(fwhm)
mu = float(mu)
sdev = float(sdev)
maxv = float(maxv)
res = Bunch.Bunch(fwhm=fwhm, mu=mu, sdev=sdev, maxv=maxv,
fit_fn=gauss_fn, fit_args=[mu, sdev, maxv])
return res
def moffat(self, x, p):
"""Evaluate Moffat function in 1D. See :meth:`calc_fwhm`.
Parameters
----------
x : array-like
X values.
p : tuple of float
Parameters for Moffat, i.e., ``(x_0, gamma, alpha, amplitude)``,
where ``x_0`` a.k.a. mean and ``gamma`` core width.
Returns
-------
y : array-like
Y values.
"""
y = (1.0 + (x - p[0]) ** 2 / p[1] ** 2) ** (-1.0 * p[2]) * p[3]
return y
def calc_fwhm_moffat(self, arr1d, medv=None, moffat_fn=None):
"""FWHM calculation on a 1D array by using least square fitting of
a Moffat function on the data.
Parameters
----------
arr1d : array-like
1D array cut in either X or Y direction on the object.
medv : float or `None`
Median of the data. If not given, it is calculated from ``arr1d``.
moffat_fn : func or `None`
Moffat function for fitting. If not given, :meth:`moffat` is used.
Returns
-------
res : `~ginga.misc.Bunch.Bunch`
Fitting results.
Raises
------
IQCalcError
Fitting failed.
"""
if not have_scipy:
raise IQCalcError("Please install the 'scipy' module "
"to use this function")
if moffat_fn is None:
moffat_fn = self.moffat
N = len(arr1d)
X = np.array(list(range(N)))
Y = arr1d
# Fitting works more reliably if we do the following
# a. subtract sky background
if medv is None:
medv = get_median(Y)
Y = Y - medv
maxv = Y.max()
# b. clamp to 0..max (of the sky subtracted field)
Y = Y.clip(0, maxv)
# Fit a moffat
p0 = [0, N - 1, 2, maxv] # Inital guess
# Distance to the target function
errfunc = lambda p, x, y: moffat_fn(x, p) - y # noqa
# Least square fit to the gaussian
with self.lock:
# NOTE: without this mutex, optimize.leastsq causes a fatal error
# sometimes--it appears not to be thread safe.
# The error is:
# "SystemError: null argument to internal routine"
# "Fatal Python error: GC object already tracked"
p1, success = optimize.leastsq(errfunc, p0[:], args=(X, Y))
if not success:
raise IQCalcError("FWHM Moffat fitting failed")
mu, width, power, maxv = p1
width = np.abs(width)
self.logger.debug("mu=%f width=%f power=%f maxv=%f" % (
mu, width, power, maxv))
fwhm = 2.0 * width * np.sqrt(2.0 ** (1.0 / power) - 1.0)
# some routines choke on numpy values and need "pure" Python floats
# e.g. when marshalling through a remote procedure interface
fwhm = float(fwhm)
mu = float(mu)
width = float(width)
power = float(power)
maxv = float(maxv)
res = Bunch.Bunch(fwhm=fwhm, mu=mu, width=width, power=power,
maxv=maxv, fit_fn=moffat_fn,
fit_args=[mu, width, power, maxv])
return res
def calc_fwhm(self, arr1d, medv=None, method_name='gaussian'):
"""Calculate FWHM for the given input array.
Parameters
----------
arr1d : array-like
1D array cut in either X or Y direction on the object.
medv : float or `None`
Median of the data. If not given, it is calculated from ``arr1d``.
method_name : {'gaussian', 'moffat'}
Function to use for fitting.
Returns
-------
res : `~ginga.misc.Bunch.Bunch`
Fitting results.
"""
# Calculate FWHM in each direction
fwhm_fn = self.calc_fwhm_gaussian
if method_name == 'moffat':
fwhm_fn = self.calc_fwhm_moffat
return fwhm_fn(arr1d, medv=medv)
def get_fwhm(self, x, y, radius, data, medv=None, method_name='gaussian'):
"""Get the FWHM values of the object at the given coordinates and
radius.
Parameters
----------
x, y : int
Indices of the object location in data array.
radius : float
Radius of the region encompassing the object.
data : array-like
Data array.
medv, method_name
See :meth:`calc_fwhm`.
Returns
-------
fwhm_x, fwhm_y : float
FWHM in X and Y, respectively.
ctr_x, ctr_y : float
Center in X and Y, respectively.
x_res, y_res : dict
Fit results from :meth:`calc_fwhm` in X and Y, respectively.
"""
if medv is None:
medv = get_median(data)
# Get two cuts of the data, one in X and one in Y
x0, y0, xarr, yarr = self.cut_cross(x, y, radius, data)
# Calculate FWHM in each direction
x_res = self.calc_fwhm(xarr, medv=medv, method_name=method_name)
fwhm_x, cx = x_res.fwhm, x_res.mu
y_res = self.calc_fwhm(yarr, medv=medv, method_name=method_name)
fwhm_y, cy = y_res.fwhm, y_res.mu
ctr_x = x0 + cx
ctr_y = y0 + cy
self.logger.debug("fwhm_x,fwhm_y=%f,%f center=%f,%f" % (
fwhm_x, fwhm_y, ctr_x, ctr_y))
return (fwhm_x, fwhm_y, ctr_x, ctr_y, x_res, y_res)
def starsize(self, fwhm_x, deg_pix_x, fwhm_y, deg_pix_y):
"""Calculate average FWHM in arcseconds.
Parameters
----------
fwhm_x : float
FWHM in X (pixels).
deg_pix_x : float
Plate scale from CDELT1 in degrees per pixel.
fwhm_y : float
FWHM in Y (pixels).
deg_pix_y : float
Plate scale from CDELT2 in degrees per pixel.
Returns
-------
fwhm : float
Average FWHM in arcseconds.
"""
cdelta1 = math.fabs(deg_pix_x)
cdelta2 = math.fabs(deg_pix_y)
fwhm = (fwhm_x * cdelta1 + fwhm_y * cdelta2) / 2.0
fwhm = fwhm * 3600.0
return fwhm
def centroid(self, data, xc, yc, radius):
"""Calculate centroid from center of mass.
Parameters
----------
data : array-like
Data array.
xc, yc : int
X and Y indices of the approximate center.
radius : float
Half-width of the region to consider around the given center.
Returns
-------
x, y : float
Centroid indices.
Raises
------
IQCalcError
Missing dependency.
"""
if not have_scipy:
raise IQCalcError("Please install the 'scipy' module "
"to use this function")
xc, yc = int(xc), int(yc)
x0, y0, arr = self.cut_region(xc, yc, int(radius), data)
# See https://stackoverflow.com/questions/25369982/center-of-mass-for-roi-in-python
cp_arr = np.asarray(arr)
cy, cx = ndimage.center_of_mass(cp_arr)
return (x0 + cx, y0 + cy)
# FINDING BRIGHT PEAKS
def get_threshold(self, data, sigma=5.0):
"""Calculate threshold for :meth:`find_bright_peaks`.
Parameters
----------
data : array-like
Data array.
sigma : float
Sigma for the threshold.
Returns
-------
threshold : float
Threshold based on good data, its median, and the given sigma.
"""
# remove masked elements
fdata = data[np.logical_not(np.ma.getmaskarray(data))]
# remove Inf or NaN
fdata = fdata[np.isfinite(fdata)]
# find the median
median = get_median(fdata)
# NOTE: for this method a good default sigma is 5.0
dist = np.fabs(fdata - median).mean()
threshold = median + sigma * dist
# NOTE: for this method a good default sigma is 2.0
## std = np.std(fdata - median)
## threshold = median + sigma * std
self.logger.debug("calc threshold=%f" % (threshold))
return threshold
def find_bright_peaks(self, data, threshold=None, sigma=5, radius=5):
"""Find bright peak candidates in in the given data.
Parameters
----------
data : array-like
Input data to find peaks from.
threshold : float or `None`
Detection threshold. Below this value, an object is not
considered a candidate. If not given, a default is calculated
using :meth:`get_threshold` with the given ``sigma``.
sigma : float
Sigma for the threshold.
radius : float
Pixel radius for determining local maxima. If the
desired objects are larger in size, specify a larger radius.
Returns
-------
peaks : list of tuple
A list of candidate object coordinate tuples ``(x, y)`` in data.
"""
if not have_scipy:
raise IQCalcError("Please install the 'scipy' module "
"to use this function")
if threshold is None:
# set threshold to default if none provided
threshold = self.get_threshold(data, sigma=sigma)
self.logger.debug("threshold defaults to %f (sigma=%f)" % (
threshold, sigma))
#self.logger.debug("filtering")
data_max = maximum_filter(data, radius)
maxima = (data == data_max)
diff = data_max > threshold
maxima[diff == 0] = 0
#self.logger.debug("finding")
labeled, num_objects = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
peaks = []
for dy, dx in slices:
xc = (dx.start + dx.stop - 1) / 2.0
yc = (dy.start + dy.stop - 1) / 2.0
# This is only an approximate center; use FWHM or centroid
# calculation to refine further
peaks.append((xc, yc))
self.logger.debug("peaks=%s" % (str(peaks)))
return peaks
def cut_region(self, x, y, radius, data):
"""Return a cut region.
Parameters
----------
x, y : int
Indices of central pixel.
radius : int
Half-width in both X and Y directions.
data : array-like
Data array to cut from.
Returns
-------
x0, y0 : int
Origin of the region.
arr : array-like
Cut region (a view, not copy).
"""
n = radius
ht, wd = data.shape
x0, x1 = max(0, x - n), min(wd - 1, x + n)
y0, y1 = max(0, y - n), min(ht - 1, y + n)
arr = data[y0:y1 + 1, x0:x1 + 1]
return (x0, y0, arr)
def cut_cross(self, x, y, radius, data):
"""Cut data vertically and horizontally at the given position
with the given radius.
Parameters
----------
x, y : int
Indices where vertical and horizontal cuts meet.
radius : float
Radius of both cuts.
data : array-like
Data array to cut from.
Returns
-------
x0 : array-like
Starting pixel of horizontal cut (in X).
y0 : array-like
Starting pixel of vertical cut (in Y).
xarr : array-like
Horizontal cut (in X).
yarr : array-like
Vertical cut (in Y).
"""
n = int(round(radius))
ht, wd = data.shape
x, y = int(round(x)), int(round(y))
x0, x1 = int(max(0, x - n)), int(min(wd - 1, x + n))
y0, y1 = int(max(0, y - n)), int(min(ht - 1, y + n))
xarr = data[y, x0:x1 + 1]
yarr = data[y0:y1 + 1, x]
return (x0, y0, xarr, yarr)
def brightness(self, x, y, radius, medv, data):
"""Return the brightness value found in a region defined by input
location and radius. Region is cut using :meth:`cut_region`.
Parameters
----------
x, y : int
Indices of central pixel.
radius : int
Half-width in both X and Y directions.
medv : float
Background to subtract off.
data : array-like
Data array.
Returns
-------
res : float
Brightness.
"""
x0, y0, arr = self.cut_region(x, y, radius, data)
arr2 = np.sort(arr.flat)
idx = int(len(arr2) * 0.8)
res = arr2[idx] - medv
return float(res)
def fwhm_data(self, x, y, data, radius=15, method_name='gaussian'):
"""Equivalent to :meth:`get_fwhm`."""
return self.get_fwhm(x, y, radius, data, method_name=method_name)
# Encircled and ensquared energies (EE)
def ensquared_energy(self, data):
"""Return a function of ensquared energy across pixel indices.
Ideally, data is already a masked array and is assumed to be centered.
"""
if not have_scipy:
raise IQCalcError("Please install the 'scipy' module "
"to use this function")
tot = data.sum()
ny, nx = data.shape
cen_x = int(nx // 2)
cen_y = int(ny // 2)
ee = []
if ny > nx:
n_max = ny
cen = cen_y
else:
n_max = nx
cen = cen_x
if n_max % 2 == 0: # Even
delta_i1 = -1
else: # Odd
delta_i1 = 0
xr = range(n_max - cen)
for i in xr:
ix1 = cen_x - i + delta_i1
if ix1 < 0:
ix1 = 0
ix2 = cen_x + i + 1
if ix2 > nx:
ix2 = nx
iy1 = cen_y - i + delta_i1
if iy1 < 0:
iy1 = 0
iy2 = cen_y + i + 1
if iy2 > ny:
iy2 = ny
ee.append(data[iy1:iy2, ix1:ix2].sum() / tot)
return interp1d(xr, ee, kind='cubic', bounds_error=False,
assume_sorted=True)
# This is adapted from poppy package. See licenses/POPPY_LICENSE.md .
def encircled_energy(self, data):
"""Return a function of encircled energy across pixel indices.
Ideally, data is already a masked array and is assumed to be centered.
"""
if not have_scipy:
raise IQCalcError("Please install the 'scipy' module "
"to use this function")
y, x = np.indices(data.shape, dtype=float)
cen = tuple((i - 1) * 0.5 for i in data.shape[::-1])
x -= cen[0]
y -= cen[1]
r = np.sqrt(x * x + y * y)
ind = np.argsort(r.flat)
sorted_r = r.flat[ind]
sorted_data = data.flat[ind]
# data is already masked
csim = sorted_data.cumsum(dtype=float)
sorted_r_int = sorted_r.astype(int)
deltar = sorted_r_int[1:] - sorted_r_int[:-1] # assume all radii represented
rind = np.where(deltar)[0]
ee = csim[rind] / sorted_data.sum() # Normalize
if isinstance(ee, np.ma.MaskedArray):
ee.set_fill_value(0)
ee = ee.filled()
return interp1d(range(ee.size), ee, kind='cubic', bounds_error=False,
assume_sorted=True)
# EVALUATION ON A FIELD
def evaluate_peaks(self, peaks, data, bright_radius=2, fwhm_radius=15,
fwhm_method='gaussian', ee_total_radius=10,
cb_fn=None, ev_intr=None):
"""Evaluate photometry for given peaks in data array.
Parameters
----------
peaks : list of tuple
List of ``(x, y)`` tuples containing indices of peaks.
data : array-like
Data array that goes with the given peaks.
bright_radius : int
**This is not used.**
fwhm_radius, fwhm_method
See :meth:`get_fwhm`.
ee_total_radius : float
Radius, in pixels, where encircled and ensquared energy fractions
are defined as 1.
cb_fn : func or `None`
If applicable, provide a callback function that takes a
`ginga.misc.Bunch.Bunch` containing the result for each peak.
It should not return anything.
ev_intr : :py:class:`threading.Event` or `None`
For threading, if applicable.
Returns
-------
objlist : list of `ginga.misc.Bunch.Bunch`
A list of successful results for the given peaks.
Each result contains the following keys:
* ``objx``, ``objy``: Fitted centroid from :meth:`get_fwhm`.
* ``pos``: A measure of distance from the center of the image.
* ``oid_x``, ``oid_y``: Center-of-mass centroid from :meth:`centroid`.
* ``fwhm_x``, ``fwhm_y``: Fitted FWHM from :meth:`get_fwhm`.
* ``fwhm``: Overall measure of fwhm as a single value.
* ``fwhm_radius``: Input FWHM radius.
* ``brightness``: Average peak value based on :meth:`get_fwhm` fits.
* ``elipse``: A measure of ellipticity.
* ``x``, ``y``: Input indices of the peak.
* ``skylevel``: Sky level estimated from median of data array and
``skylevel_magnification`` and ``skylevel_offset`` attributes.
* ``background``: Median of the input array.
* ``ensquared_energy_fn``: Function of ensquared energy for different pixel radii.
* ``encircled_energy_fn``: Function of encircled energy for different pixel radii.
"""
height, width = data.shape
hh = float(height) / 2.0
ht = float(height)
h4 = float(height) * 4.0
wh = float(width) / 2.0
wd = float(width)
w4 = float(width) * 4.0
# Find the median (sky/background) level
median = float(get_median(data))
#skylevel = median
# Old SOSS qualsize() applied this calculation to skylevel
skylevel = median * self.skylevel_magnification + self.skylevel_offset
# Form a list of objects and their characteristics
objlist = []
for x, y in peaks:
if ev_intr and ev_intr.is_set():
raise IQCalcError("Evaluation interrupted!")
# centroid calculation on local peak
oid_x, oid_y = None, None
try:
oid_x, oid_y = self.centroid(data, x, y, fwhm_radius)
except Exception as e:
# Error doing centroid
self.logger.debug("Error doing centroid on object at %.2f,%.2f: %s" % (
x, y, str(e)))
# Find the fwhm in x and y, using local peak
try:
res = self.fwhm_data(x, y, data, radius=fwhm_radius,
method_name=fwhm_method)
fwhm_x, fwhm_y, ctr_x, ctr_y, x_res, y_res = res
bx = x_res.fit_fn(round(ctr_x),
(ctr_x,) + tuple(x_res.fit_args[1:]))
by = y_res.fit_fn(round(ctr_y),
(ctr_y,) + tuple(y_res.fit_args[1:]))
bright = float((bx + by) / 2.0)
except Exception as e:
# Error doing FWHM, skip this object
self.logger.debug("Error doing FWHM on object at %.2f,%.2f: %s" % (
x, y, str(e)))
continue
self.logger.debug("orig=%f,%f ctr=%f,%f fwhm=%f,%f bright=%f" % (
x, y, ctr_x, ctr_y, fwhm_x, fwhm_y, bright))
# overall measure of fwhm as a single value
fwhm = (math.sqrt(fwhm_x * fwhm_x + fwhm_y * fwhm_y) *
(1.0 / math.sqrt(2.0)))
# calculate a measure of ellipticity
elipse = math.fabs(min(fwhm_x, fwhm_y) / max(fwhm_x, fwhm_y))
# calculate a measure of distance from center of image
dx = wh - ctr_x
dy = hh - ctr_y
dx2 = dx * dx / wd / w4
dy2 = dy * dy / ht / h4
if dx2 > dy2:
pos = 1.0 - dx2
else:
pos = 1.0 - dy2
# EE on background subtracted image
ee_sq_fn = None
ee_circ_fn = None
iy1 = int(ctr_y - ee_total_radius)
iy2 = int(ctr_y + ee_total_radius) + 1
ix1 = int(ctr_x - ee_total_radius)
ix2 = int(ctr_x + ee_total_radius) + 1
if iy1 < 0 or iy2 > height or ix1 < 0 or ix2 > width:
self.logger.debug("Error calculating EE on object at %.2f,%.2f: Box out of range with radius=%.2f" % (x, y, ee_total_radius))
else:
ee_data = data[iy1:iy2, ix1:ix2] - median
try:
ee_sq_fn = self.ensquared_energy(ee_data)
except Exception as e:
self.logger.debug("Error calculating ensquared energy on object at %.2f,%.2f: %s" % (x, y, str(e)))
try:
ee_circ_fn = self.encircled_energy(ee_data)
except Exception as e:
self.logger.debug("Error calculating encircled energy on object at %.2f,%.2f: %s" % (x, y, str(e)))
obj = Bunch.Bunch(objx=ctr_x, objy=ctr_y, pos=pos, | [
" oid_x=oid_x, oid_y=oid_y,"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
from __future__ import absolute_import, print_function, division
import sys, time, unittest
import numpy
import numpy as N
from six.moves import xrange
from theano.tests import unittest_tools as utt
from theano import function, Mode
import theano.tensor as T
from theano.tensor.nnet.conv import ConvOp
def flip(kern, kshp):
"flip the kernel as scipy.convolv2d do it flipped."
flip = N.zeros(kern.shape)
if len(kern.shape) == 2:
kern = kern.reshape(-1)
it = reversed(kern)
for i in xrange(kshp[0]):
for j in xrange(kshp[1]):
flip[i, j] = next(it)
elif len(kern.shape) == 3:
kern = kern.reshape(kern.shape[0], -1)
for k in xrange(kern.shape[0]):
it = reversed(kern[k, :])
for i in xrange(kshp[0]):
for j in xrange(kshp[1]): | [
" flip[k, i, j] = next(it)"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2015 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""
cocos.director.director is the singleton that creates and handles the main ``Window``
and manages the logic behind the ``Scenes``.
Initializing
------------
The first thing to do, is to initialize the ``director``::
from cocos.director import director
director.init( parameters )
This will initialize the director, and will create a display area
(a 640x480 window by default).
The parameters that are supported by director.init() are the same
parameters that are supported by pyglet.window.Window(), plus a few
cocos exclusive ones. They are all named parameters (kwargs).
See ``Director.init()`` for details.
Example::
director.init( width=800, height=600, caption="Hello World", fullscreen=True )
Running a Scene
----------------
Once you have initialized the director, you can run your first ``Scene``::
director.run( Scene( MyLayer() ) )
This will run a scene that has only 1 layer: ``MyLayer()``. You can run a scene
that has multiple layers. For more information about ``Layers`` and ``Scenes``
refer to the ``Layers`` and ``Scene`` documentation.
Once a scene is running you can do the following actions:
* ``director.replace( new_scene ):``
Replaces the running scene with the new_scene
You could also use a transition. For example:
director.replace( SplitRowsTransition( new_scene, duration=2 ) )
* ``director.push( new_scene ):``
The running scene will be pushed to a queue of scenes to run,
and new_scene will be executed.
* ``director.pop():``
Will pop out a scene from the queue, and it will replace the running scene.
* ``director.scene.end( end_value ):``
Finishes the current scene with an end value of ``end_value``. The next scene
to be run will be popped from the queue.
Other functions you can use are:
* ``director.get_window_size():``
Returns an (x,y) pair with the _logical_ dimensions of the display.
The display might have been resized, but coordinates are always relative
to this size. If you need the _physical_ dimensions, check the dimensions
of ``director.window``
* ``get_virtual_coordinates(self, x, y):``
Transforms coordinates that belongs the real (physical) window size, to
the coordinates that belongs to the virtual (logical) window. Returns
an x,y pair in logical coordinates.
The director also has some useful attributes:
* ``director.return_value``: The value returned by the last scene that
called ``director.scene.end``. This is useful to use scenes somewhat like
function calls: you push a scene to call it, and check the return value
when the director returns control to you.
* ``director.window``: This is the pyglet window handled by this director,
if you happen to need low level access to it.
* ``self.show_FPS``: You can set this to a boolean value to enable, disable
the framerate indicator.
* ``self.scene``: The scene currently active
"""
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
import sys
from os import getenv
import warnings
import pyglet
from pyglet import window, event
from pyglet import clock
# from pyglet import media
from pyglet.gl import *
import cocos
import cocos.audio
import cocos.custom_clocks
if hasattr(sys, 'is_cocos_sphinx') and sys.is_cocos_sphinx:
__all__ = ['director', 'Director', 'DefaultHandler']
else:
__all__ = ['director', 'DefaultHandler']
class DefaultHandler(object):
def __init__(self):
super(DefaultHandler, self).__init__()
self.wired = False
def on_key_press(self, symbol, modifiers):
if symbol == pyglet.window.key.F and (modifiers & pyglet.window.key.MOD_ACCEL):
director.window.set_fullscreen(not director.window.fullscreen)
return True
elif symbol == pyglet.window.key.P and (modifiers & pyglet.window.key.MOD_ACCEL):
import cocos.scenes.pause as pause
pause_sc = pause.get_pause_scene()
if pause:
director.push(pause_sc)
return True
elif symbol == pyglet.window.key.W and (modifiers & pyglet.window.key.MOD_ACCEL):
# import wired
if not self.wired:
glDisable(GL_TEXTURE_2D)
glPolygonMode(GL_FRONT, GL_LINE)
glPolygonMode(GL_BACK, GL_LINE)
# wired.wired.install()
# wired.wired.uset4F('color', 1.0, 1.0, 1.0, 1.0 )
self.wired = True
else:
glEnable(GL_TEXTURE_2D)
glPolygonMode(GL_FRONT, GL_FILL)
glPolygonMode(GL_BACK, GL_FILL)
self.wired = False
# wired.wired.uninstall()
return True
elif symbol == pyglet.window.key.X and (modifiers & pyglet.window.key.MOD_ACCEL):
director.show_FPS = not director.show_FPS
return True
elif symbol == pyglet.window.key.I and (modifiers & pyglet.window.key.MOD_ACCEL):
from .layer import PythonInterpreterLayer
if not director.show_interpreter:
if director.python_interpreter is None:
director.python_interpreter = cocos.scene.Scene(PythonInterpreterLayer())
director.python_interpreter.enable_handlers(True)
director.python_interpreter.on_enter()
director.show_interpreter = True
else:
director.python_interpreter.on_exit()
director.show_interpreter = False
return True
elif symbol == pyglet.window.key.S and (modifiers & pyglet.window.key.MOD_ACCEL):
import time
pyglet.image.get_buffer_manager().get_color_buffer().save('screenshot-%d.png' % (int(time.time())))
return True
if symbol == pyglet.window.key.ESCAPE:
director.pop()
return True
class Director(event.EventDispatcher):
"""Class that creates and handle the main Window and manages how
and when to execute the Scenes
You should not directly instantiate the class, instead you do::
from cocos.director import director
to access the only one Director instance.
"""
#: a dict with locals for the interactive python interpreter (fill with what you need)
interpreter_locals = {}
def init(self, *args, **kwargs):
"""
Initializes the Director creating the main window.
There are a few cocos exclusive parameters, the rest are the
standard pyglet parameters for pyglet.window.Window.__init__
This docstring only partially list the pyglet parameters; a full
list is available at pyglet Window API Reference at
http://pyglet.org/doc/api/pyglet.window.Window-class.html
:Parameters:
`autoscale` : bool
True: on window resizes, cocos will scale the view so that your
app don't need to handle resizes.
False: your app must include logic to deal with different window
sizes along the session.
Defaults to False
`do_not_scale` : bool
Deprecated. The logical negation of autoscale
`audio_backend` : string
one in ['pyglet','sdl']. Defaults to 'pyglet' for legacy support.
`audio` : dict or None
None or a dict providing parameters for the sdl audio backend.
None: in this case a "null" audio system will be used, where all the
sdl sound operations will be no-ops. This may be useful if you do not
want to depend on SDL_mixer
A dictionary with string keys; these are the arguments for setting up
the audio output (sample rate and bit-width, channels, buffer size).
The key names/values should match the positional arguments of
http://www.pygame.org/docs/ref/mixer.html#pygame.mixer.init | [
" The default value is {}, which means sound enabled with default"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# -*- coding:utf-8 -*-
# Copyright 2017 Xiaomi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Ulric Qin'
from .bean import Bean
from rrd.config import MAINTAINERS
from rrd.model.portal.action import Action
from rrd.model.user import User
class Expression(Bean):
_tbl = 'expression'
_cols = 'id, expression, func, op, right_value, max_step, priority, note, action_id, create_user, pause'
def __init__(self, _id, expression, func, op, right_value, max_step, priority, note, action_id,
create_user, pause):
self.id = _id
self.expression = expression
self.func = func
self.op = op
self.right_value = right_value
self.max_step = max_step
self.priority = priority
self.note = note
self.action_id = action_id
self.create_user = create_user
self.pause = pause
self.action = None
@classmethod
def save_or_update(cls, expression_id, expression, func, op, right_value, uic_groups, max_step, priority, note, url,
callback, before_callback_sms, before_callback_mail,
after_callback_sms, after_callback_mail, login_user):
if not expression.startswith('each('):
return 'only support each expression. e.g. each(metric=? xx=yy)'
if not 'metric=' in expression:
return 'expression is invalid. e.g. each(metric=? xx=yy)'
left = expression.find('(')
right = expression.find(')')
if left <= 0:
return 'left parentheses ( not found'
if right <= 0:
return 'right parentheses ) not found'
in_parentheses = expression[left + 1:right]
in_parentheses = ' '.join(in_parentheses.replace(',', ' ').replace(';', ' ').split())
arr = in_parentheses.split()
arr = [item for item in arr if '=' in item]
if len(arr) < 2:
return 'expression is invalid. e.g. each(metric=? xx=yy)'
expression = 'each(%s)' % in_parentheses
if expression_id:
return cls.update_expression(expression_id, expression, func, op, right_value, uic_groups, max_step,
priority, note, url,
callback, before_callback_sms, before_callback_mail,
after_callback_sms, after_callback_mail)
else:
return cls.insert_expression(expression, func, op, right_value, uic_groups, max_step,
priority, note, url, callback,
before_callback_sms, before_callback_mail,
after_callback_sms, after_callback_mail, login_user)
@classmethod
def insert_expression(cls, content, func, op, right_value, uic_groups, max_step, priority, note, url,
callback, before_callback_sms, before_callback_mail,
after_callback_sms, after_callback_mail, user_name):
action_id = Action.insert({
'uic': uic_groups,
'url': url,
'callback': callback,
'before_callback_sms': before_callback_sms,
'before_callback_mail': before_callback_mail,
'after_callback_sms': after_callback_sms,
'after_callback_mail': after_callback_mail,
})
if not action_id:
return 'save action fail'
expression_id = Expression.insert({
'expression': content,
'func': func,
'op': op,
'right_value': right_value,
'max_step': max_step,
'priority': priority,
'note': note,
'action_id': action_id,
'create_user': user_name
})
if expression_id:
return ''
return 'save expression fail'
@classmethod
def update_expression(cls, expression_id, content, func, op, right_value, uic_groups, max_step, priority, note, url,
callback, before_callback_sms, before_callback_mail,
after_callback_sms, after_callback_mail):
e = Expression.get(expression_id)
if not e:
return 'no such expression %s' % expression_id
a = Action.get(e.action_id)
if not a: | [
" return 'no relation action'"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
'''
mali_remove_gaps.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python mali_remove_gaps.py --help
Type::
python mali_remove_gaps.py --help
for command line help.
Command line options
--------------------
'''
import sys
import string
import re
import getopt
import CGAT.Experiment as E
import CGAT.Genomics as Genomics
import CGAT.MaliIO as MaliIO
USAGE = """python %s [OPTIONS] < exonerate_output > filtered
Prune a nucelotide multiple alignment according to a master sequence.
1. Go in codon steps through the multiple alignment according
to the master sequence.
2. Remove all columns in other sequences, that
1. fall out of frame
2. are incomplete codons
Version = $Id: mali_remove_gaps.py 2782 2009-09-10 11:40:29Z andreas $
Options:
-h, --help print this message.
-v, --verbose= loglevel.
-o, --file-output output
""" % sys.argv[0]
param_long_options = ["verbose=", "help", "file-output=", "version"]
param_short_options = "v:hm:e:p:c"
param_loglevel = 1
param_gap_char = "-"
param_mask_char = "x"
param_filename_output = None
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
try:
optlist, args = getopt.getopt(
sys.argv[1:], param_short_options, param_long_options)
except getopt.error, msg:
print USAGE, msg
sys.exit(2)
for o, a in optlist:
if o in ("-v", "--verbose"):
param_loglevel = int(a)
elif o in ("--version", ):
print "version="
sys.exit(0)
elif o in ("-h", "--help"):
print USAGE
sys.exit(0)
elif o in ("-o", "--file-output"):
param_filename_output = a
# 1. read multiple alignment in fasta format
mali, identifiers = MaliIO.readFasta(sys.stdin)
if param_loglevel >= 1:
print "# read mali with %i entries." % len(identifiers)
print E.GetHeader()
print E.GetParams()
# 1. remove gaps in multiple alignment
mali = MaliIO.removeGaps(mali)
if param_master:
frame_columns = GetFrameColumns(mali, param_master)
elif param_master_pattern:
columns = []
for id in identifiers:
if re.search(param_master_pattern, id):
columns += GetFrameColumns(mali, id)
if len(columns) == 0:
columns += GetFrameColumns(mali, identifiers[0])
# sort all columns by tuple. The "shortest" codon will be first (1,2,3)
# before (1,2,100)
columns.sort()
# select codons
frame_columns = []
last_codon = columns[0]
for codon in columns[1:]:
# skip identical codons
if codon == last_codon:
continue
# take first (shortest) codon in case of identical first residue
if codon[0] == last_codon[0]:
continue
# if not overlapping, keep
if codon[0] > last_codon[2]:
frame_columns.append(last_codon)
# if overlapping, but out of register: skip
last_codon = codon
frame_columns.append(last_codon)
# translate characters to upper/lower case according to exon info.
if exons:
for id in mali:
if id in exons:
mali[id] = AddExonInformation(
mali[id], exons[id], mask_char=param_mask_char)
if param_loglevel >= 1:
print "# found %i columns" % (len(frame_columns))
mask_chars = (string.upper(param_mask_char), string.lower(param_mask_char))
for id in mali.keys():
sequence = mali[id]
fragments = []
nstops, ncodons, naligned = 0, 0, 0
for a, b, c in frame_columns:
codon = sequence[a] + sequence[b] + sequence[c]
codon_is_aligned = False
codon_is_ok = True
for x in codon:
# a codon will be masked, if it either
# 1. contains a gap character
# 2. is an unaligned character, i.e.,
# exons and masked, or no exons and lowerwase
residue_is_unaligned = (x == param_gap_char) or \
(not exons and x in string.lowercase) or \
(exons and x in mask_chars)
codon_is_aligned = codon_is_aligned or not residue_is_unaligned
codon_is_ok = codon_is_ok and not residue_is_unaligned
if codon_is_aligned:
naligned += 1
if codon_is_ok:
ncodons += 1
if string.upper(codon) in ("TAG", "TAA", "TGA"):
if param_remove_stops:
fragments.append(param_gap_char * 3)
else:
fragments.append(codon)
nstops += 1
else:
fragments.append(codon)
else:
fragments.append(param_gap_char * 3)
mali[id] = string.join(fragments, "")
if param_loglevel >= 1:
print "# sequence: %s\tpositions: %i\taligned:%i\tcodons: %i\t stops: %i" % (id, len(fragments), naligned, ncodons, nstops)
sys.stdout.flush()
for id in mali.keys():
if param_mark_codons:
a = mali[id]
f = lambda x: a[x:x + 3]
s = string.join([f(x) for x in range(0, len(a), 3)], " ")
else:
s = mali[id]
print ">%s\n%s" % (id, s)
if param_filename_translation:
outfile = open(param_filename_translation, "w")
for id in mali.keys():
outfile.write(">%s\n%s\n" %
(id, Genomics.TranslateDNA2Protein(mali[id])))
outfile.close()
| [
" print E.GetFooter()"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# coding=utf-8
"""
This is the implementation of Copy-NET
We start from the basic Seq2seq framework for a auto-encoder.
"""
import logging
import time
import numpy as np
import sys
import copy
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from experiments.config import setup_lcsts, setup_weibo, setup_syn, setup_bst
from emolga.utils.generic_utils import *
from emolga.models.covc_encdec import NRM
from emolga.models.encdec import NRM as NRM0
from emolga.dataset.build_dataset import deserialize_from_file
from collections import OrderedDict
from fuel import datasets
from fuel import transformers
from fuel import schemes
# setup = setup_lcsts
# setup = setup_syn
setup = setup_bst
def init_logging(logfile):
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S' )
fh = logging.FileHandler(logfile)
# ch = logging.StreamHandler()
fh.setFormatter(formatter)
# ch.setFormatter(formatter)
# fh.setLevel(logging.INFO)
# ch.setLevel(logging.INFO)
# logging.getLogger().addHandler(ch)
logging.getLogger().addHandler(fh)
logging.getLogger().setLevel(logging.INFO)
return logging
# prepare logging.
tmark = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))
config = setup() # load settings.
for w in config:
print '{0}={1}'.format(w, config[w])
logger = init_logging(config['path_log'] + '/experiments.CopyLCSTS.id={}.log'.format(tmark))
n_rng = np.random.RandomState(config['seed'])
np.random.seed(config['seed'])
rng = RandomStreams(n_rng.randint(2 ** 30))
logger.info('Start!')
train_set, test_set, idx2word, word2idx = deserialize_from_file(config['dataset'])
if config['voc_size'] == -1: # not use unk
config['enc_voc_size'] = len(word2idx)
config['dec_voc_size'] = config['enc_voc_size']
else:
config['enc_voc_size'] = config['voc_size']
config['dec_voc_size'] = config['enc_voc_size']
samples = len(train_set['source'])
logger.info('build dataset done. ' +
'dataset size: {} ||'.format(samples) +
'vocabulary size = {0}/ batch size = {1}'.format(
config['dec_voc_size'], config['batch_size']))
def build_data(data):
# create fuel dataset.
dataset = datasets.IndexableDataset(indexables=OrderedDict([('source', data['source']),
('target', data['target']),
('target_c', data['target_c']),
]))
dataset.example_iteration_scheme \
= schemes.ShuffledExampleScheme(dataset.num_examples)
return dataset
train_data = build_data(train_set)
train_data_plain = zip(*(train_set['source'], train_set['target']))
test_data_plain = zip(*(test_set['source'], test_set['target']))
# train_data_plain = zip(*(train_set['source'], train_set['target']))
# test_data_plain = zip(*(test_set['source'], test_set['target']))
train_size = len(train_data_plain)
test_size = len(test_data_plain)
tr_idx = n_rng.permutation(train_size)[:2000].tolist()
ts_idx = n_rng.permutation(test_size )[:2000].tolist()
logger.info('load the data ok.')
notrain = False
# build the agent
if config['copynet']:
agent = NRM(config, n_rng, rng, mode=config['mode'],
use_attention=True, copynet=config['copynet'], identity=config['identity'])
else:
agent = NRM0(config, n_rng, rng, mode=config['mode'],
use_attention=True, copynet=config['copynet'], identity=config['identity'])
agent.build_()
if notrain:
agent.compile_('display')
else:
agent.compile_('all')
print 'compile ok.'
# load the model
# agent.load(config['path_h5'] +
# '/experiments.Copy{2}.id={0}.epoch={1}.pkl'.format('20160229-105153', 1, config['modelname']))
echo = 0
epochs = 10
skip = -1 # 25000
if echo > 0:
tmark = '20160229-105153' # '20160227-013418' # copynet multi-source model | [
" agent.load(config['path_h5'] + '/experiments.Copy{2}.id={0}.epoch={1}.pkl'.format(tmark, echo, config['modelname']))"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#Copyright 2015 Daniel Gusenleitner, Stefano Monti
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""HTSeq module
This module contains functions for initializing all HTSEQ specific variables and
a wrapper that that runs tophat using those parameters on a single sample.
In addition it also contains functions to extract and write statistics and
a wrapper that calls an R script
"""
from hydra_pkg import module_helper as MODULE_HELPER
from hydra_pkg import helper as HELPER
import os
import re
import subprocess
def init(param):
"""Initialization function that checks the all relevant tophat parameters
:Parameter param: dictionary that contains all general RNASeq pipeline parameters
"""
MODULE_HELPER.check_parameter(param, key='HTSeq_exec', dtype=str)
MODULE_HELPER.check_parameter(param, key='sam_exec', dtype=str)
MODULE_HELPER.check_parameter(param, key='HTSeq_t', dtype=str)
MODULE_HELPER.check_parameter(param,
key='HTSeq_r',
dtype=str,
allowed=['name',
'pos'])
MODULE_HELPER.check_parameter(param,
key='HTSeq_m',
dtype=str,
allowed=['union',
'intersection-strict',
'intersection-nonempty'])
MODULE_HELPER.check_parameter(param, key='HTSeq_id', dtype=str)
MODULE_HELPER.check_parameter(param, key='Rscript_exec', dtype=str)
def process_stat_files(param):
"""Copies all relevant files into the report directory and also extracts
the total number of reads from the bamqc output
:Parameter param: dictionary that contains all general RNASeq pipeline parameters
"""
if not os.path.exists(param['working_dir']+'report/htseq/'):
os.makedirs(param['working_dir']+'report/htseq/')
#get the files that are actually in the output directory
call = ['cp', '-R']
call.append(param['working_dir']+'results/htseq/htseq_stats.txt')
call.append(param['working_dir']+'report/htseq')
_, _ = subprocess.Popen(call,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
htseq_file = param['working_dir']+'results/htseq/htseq_stats.txt'
#extract table
table = []
filehandle = open(htseq_file)
#header
table.append(filehandle.readlines()[0].rstrip().split('\t'))
table[0] = table[0][1:]
filehandle.close()
#total number of aligned reads
tot_reads = param['bam_qc']['unique_aligned_reads']
counter = [0] * len(param['bam_qc']['unique_aligned_reads'])
filehandle = open(htseq_file)
for line in filehandle.readlines()[1:]:
cur_line = line.rstrip().split('\t')
cur_line[0] = re.sub(r'_',' ',cur_line[0])
if cur_line[0] != ' alignment not unique':
counter = [ct + int(cr) for ct, cr in zip(counter, cur_line[1:])]
perc = ([cur_line[0]]+
MODULE_HELPER.get_percentage(cur_line[1:],
tot_reads,
len(cur_line)-1))
table.append(perc)
filehandle.close()
assigned = [tot_reads[idx] - counter[idx] for idx in range(len(tot_reads))]
perc = ['feature'] + MODULE_HELPER.get_percentage(assigned,
tot_reads,
len(counter))
table.append(perc)
return table
def report(param):
"""Function that writes all HTSeq related statistics into the html report
:Parameter param: dictionary that contains all general RNASeq pipeline parameters
"""
#if there is no htseq directory in the report make one
htseq_dir = param['working_dir']+'report/htseq/'
if not os.path.exists(htseq_dir):
os.makedirs(htseq_dir)
out_file = param['working_dir']+'deliverables/htseq_raw_counts.txt'
#report only if there were actually results
if os.path.exists(out_file):
param['report'].write('<center><br><br><h2>HTSeq statistics</h2>')
table = process_stat_files(param)
MODULE_HELPER.create_sub_report(param, out_file, table, 'htseq', 'HTSeq')
MODULE_HELPER.plot_count_overview(param, 'htseq', table)
def finalize(param, input_files='count_files'):
"""This function is run after HTSeq is run on each sample. It collects all results
and puts them into a file
:Parameter param: dictionary that contains all general RNASeq pipeline parameters
:Parameter input_files: flag that indicates the input files
"""
HELPER.writeLog('Collecting HTSeq raw counts ... \n', param)
#extracts the counts from the htseq output | [
" import csv"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#!/usr/bin/env python
# PyRadmon - Python Radiance Monitoring Tool
# Copyright 2014 Albert Huang.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Data File Enumeration Library -
# library for enumerating all of the applicable data files in a
# directory, given certain conditions
#
import os
import sys
import datetime
import re
from dictr import *
from core import *
# Variables to determine what to look for!
# These are the default variables - the actual variables can be
# changed with the call to enumerate().
DATA_PATH_FORMAT = "MERRA2/%EXPERIMENT_ID%/obs/Y%YEAR4%/M%MONTH2%/D%DAY2%/H%HOUR2%/%EXPERIMENT_ID%.diag_%INSTRUMENT_SAT%_%DATA_TYPE%.%YEAR4%%MONTH2%%DAY2%_%HOUR2%z.txt"
EXPERIMENT_ID = "d5124_m2_jan91"
START_YEAR = "1991"
START_MONTH = "01"
START_DAY = "01"
START_HOUR = "00"
END_YEAR = "1991"
END_MONTH = "02"
END_DAY = "28"
END_HOUR = "18"
INSTRUMENT_SAT = "ssmi_f08"
DATA_TYPE = "anl|ges"
def make_subst_variable(year, month, day, hour, experiment_id, instrument_sat, data_type):
"""Create a dict with var substitution values for data_path_format.
Given information about the requested data, create a dictionary
with variables to be substituted as keys, and the respective
information from the given data information as values.
Variables are %VAR% variables for data_path_format, without the
percent signs ("%").
Variables include:
* %YEAR%, %YEAR4%, %YEAR2%
Current year of the data. Expressed as a 4 digit integer except
with %YEAR2%, which is expressed as a 2 digit integer from the
last 2 digits of the full year (abbreviated year). Sourced from
the argument `year`.
* %MONTH%, %MONTH2%
Current month. Expressed as a 2 digit integer. Sourced from the
argument `month`.
* %DAY%, %DAY2%
Current day. Expressed as a 2 digit integer. Sourced from the
argument `day`.
* %HOUR%, %HOUR2%
Current hour. Expressed as a 2 digit integer. Sourced from the
argument `hour`.
* %EXPERIMENT_ID%
Experiment ID of the data. Expressed as a string. Sourced from
the argument `experiment_id`.
* %INSTRUMENT_SAT%
Instrument/satellite ID of the data. Expressed as a string.
Sourced from the argument `instrument_sat`.
* %DATA_TYPE%
Data type of the data. Expressed as a string. Sourced from the
argument `data_type`.
Args:
year (int): An integer with the full 4 digit year.
month (int): An integer with the full 2 digit month.
day (int): An integer with the full 2 digit day.
hour (int): An integer with the full 2 digit hour.
experiment_id (str): A string with the experiment ID for the
data.
instrument_sat (str): A string with the instrument/satellite ID
for the data.
data_type (str): A string with the data type for the data.
(Typically one of the values in data.VALID_PREFIX, or
multiple values seperated by a pipe ("|").)
Returns:
dict: Dictonary with the keys being the data_path_format %VAR%
variables without the percent signs ("%"), and the values being
their respective values, provided from the data information
passed into this function.
"""
# Convert the date parts into strings and pad them with zeros!
syear = str(year).zfill(2)
smonth = str(month).zfill(2)
sday = str(day).zfill(2)
shour = str(hour).zfill(2)
# Quick sanity check
if (len(syear) != 4) or (len(smonth) != 2) or (len(sday) != 2) or (len(shour) != 2):
print "ERROR: Date is invalid!"
sys.exit(1)
# Build the dictionary!
subst_var = {}
subst_var["YEAR"] = syear
subst_var["YEAR4"] = syear
subst_var["YEAR2"] = syear[2:]
subst_var["MONTH"] = smonth
subst_var["MONTH2"] = smonth
subst_var["DAY"] = sday
subst_var["DAY2"] = sday
subst_var["HOUR"] = shour
subst_var["HOUR2"] = shour
subst_var["EXPERIMENT_ID"] = experiment_id
subst_var["INSTRUMENT_SAT"] = instrument_sat
subst_var["DATA_TYPE"] = data_type
# Return the final dictionary
return subst_var
def path_substitute(path_format, variables):
"""Substitute variables in the path format template, and return.
Given a variables dictionary (from :py:func:`make_subst_variable`)
and the data path format template, substitute variables from the
dictionary into the template, and return the resulting path string.
Args:
path_format (str): The data path format template describing
where the data files are located, indicated as a string.
variables (dict): A dictionary whose keys are the %VAR%
variables without the percent signs ("%"), and whose values
are the respective values to the %VAR% variables.
Returns:
str: Path string with all of the supported %VAR% variables
substituted in. For more information about which %VAR%
variables are substituted, see :py:func:`make_subst_variable`
help.
"""
# Copy the path_format into final_path
final_path = path_format
# Iterate the variables dict's keys
for variable in variables:
# Create a case-insensitive regex...
var_re = re.compile(re.escape('%' + variable + '%'), re.IGNORECASE)
# ...and perform the substitution!
final_path = var_re.sub(variables[variable], final_path)
# Return the substituted path!
return final_path
def enumerate(**opts):
"""Returns a list of files that matches the given search range.
Searches a given folder and returns a list of files that matches
the given search range.
Arguments are keywords arguments; some, none, or all of them may
be specified. For arguments not specified, the default capital
letter version of the variable will be used instead. See the
source file (enumerate.py) for capital variable defaults.
Args:
None
Keyword Args:
data_path_format (str): The data path format template
describing where the data files are located, indicated as a
string.
experiment_id (str): The experiment ID, indicated as a string.
start_year (str): The start year, indicated as a string.
start_month (str): The start month, indicated as a string.
start_day (str): The start day, indicated as a string.
start_hour (str): The start hour, indicated as a string.
end_year (str): The end year, indicated as a string.
end_month (str): The end month, indicated as a string.
end_day (str): The end day, indicated as a string.
end_hour (str): The end hour, indicated as a string
instrument_sat (str or list of str): Instrument satellite,
indicated as a string or array of strings.
data_type (str): "anl", "ges", or "anl|ges" string to indicate
analyzed or guessed data.
time_delta (:py:class:`datetime.timedelta`):
:py:class:`datetime.timedelta` object to increment the date
with. Default is one hour.
For all variables except time_delta, the default value is the
capitalized global variable version specified in this file.
Returns:
list of dict: A list with the list of file dicts that matches
the given search range. Each file is returned as a dictionary,
with 'instrument_sat' and 'type' of the file specified in the
dict, along with the file name 'filename'.
Raises:
Exception(...) - error exception when either:
- the directory specified does not exist
- file validation failed and ALLOW_WARN_PASS is False
"""
# Depending on the inputs, assign variables to input or defaults!
data_path_format = opts["data_path_format"] if "data_path_format" in opts \
else DATA_PATH_FORMAT
experiment_id = opts["experiment_id"] if "experiment_id" in opts \
else EXPERIMENT_ID
start_year = opts["start_year"] if "start_year" in opts \
else START_YEAR
start_month = opts["start_month"] if "start_month" in opts \
else START_MONTH
start_day = opts["start_day"] if "start_day" in opts \
else START_DAY
start_hour = opts["start_hour"] if "start_hour" in opts \
else START_HOUR
end_year = opts["end_year"] if "end_year" in opts \
else END_YEAR
end_month = opts["end_month"] if "end_month" in opts \
else END_MONTH
end_day = opts["end_day"] if "end_day" in opts \
else END_DAY
end_hour = opts["end_hour"] if "end_hour" in opts \
else END_HOUR
instrument_sat = opts["instrument_sat"] if "instrument_sat" in opts \
else INSTRUMENT_SAT
data_type = opts["data_type"] if "data_type" in opts \
else DATA_TYPE
time_delta = opts["time_delta"] if "time_delta" in opts \
else None
# Split up the data types, as necessary. (Basically, are there
# pipes ("|") in the data type definition?)
if data_type:
if len(data_type.split('|')) > 1:
data_type = data_type.split('|')
else:
data_type = [ data_type ]
else:
data_type = [ "" ]
# Make reference datetimes
cur_date = datetime.datetime(int(start_year), int(start_month), int(start_day), int(start_hour))
end_date = datetime.datetime(int(end_year), int(end_month), int(end_day), int(end_hour))
# Now loop and generate a list of files to read!
files_to_read = []
# Data statistics variables:
# Available instrument/satellite IDs:
available_instrument_sat = []
# Available data types:
available_data_type = []
# Interval counter:
# (amount of time before a measurement was found)
interval_count = 0
# Measurement counter:
# (times that a measurement was made when a file was found)
interval_measurements = 0
# Total files found
total_files = 0
# Files found that met the criteria
criteria_total_files = 0
# Average interval before finding a measurement
average_interval = 0
# Check for custom time delta. If it's custom, let the user know!
if time_delta:
info("Using custom delta for file enumeration.")
while 1:
# Check if we meet criteria!
if (cur_date <= end_date):
# Rebuild formatted parts - first, convert the date parts
# into strings and pad them with zeros!
syear = str(cur_date.year).zfill(2)
smonth = str(cur_date.month).zfill(2)
sday = str(cur_date.day).zfill(2)
shour = str(cur_date.hour).zfill(2)
# Loop through each data type!
for indv_data_type in data_type:
# Create a substitution dictionary given criteria
# information
subs_var = make_subst_variable(syear, smonth, sday, shour, experiment_id, instrument_sat, indv_data_type)
# Then go ahead and substitute those variables in from
# the dictionary!
file_path = path_substitute(data_path_format, subs_var)
if check_file(file_path):
# Success! Calculate the interval average!
average_interval = ((average_interval * interval_measurements) + interval_count) / (interval_measurements + 1)
# Reset interval count and increment measurement
# count.
interval_count = 0
interval_measurements += 1
# Add new entry
newdatdict = { "instrument_sat" : instrument_sat, "type" : indv_data_type, "filename" : file_path, "date" : cur_date }
# BUGFIX: If using minutes or less, this will cause
# duplicate entries. Check to make sure we're not
# adding dups!
if not newdatdict in files_to_read:
files_to_read.append(newdatdict)
| [
" if not instrument_sat in available_instrument_sat:"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
"""
sentry.models.groupassignee
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
import six
from collections import defaultdict
from django.conf import settings
from django.db import models
from django.utils import timezone
from sentry.db.models import FlexibleForeignKey, Model, sane_repr, \
BaseManager
from sentry.models.activity import Activity
from sentry.signals import issue_assigned
def get_user_project_ids(users):
"""
Given a list of users, return a dict where keys are user_ids
and values are a set of the project_ids the user is a member of
"""
from sentry.models import OrganizationMemberTeam, ProjectTeam
user_teams = list(OrganizationMemberTeam.objects.filter(
organizationmember__user__in=users,
is_active=True,
).values('organizationmember__user', 'team'))
# team_id to list of projects
projects_by_team = defaultdict(set)
for tp in ProjectTeam.objects.filter(team__in=[ut['team'] for ut in user_teams]):
projects_by_team[tp.team_id].add(tp.project_id)
# user_id to projects
projects_by_user = defaultdict(set)
for ut in user_teams:
projects_by_user[ut['organizationmember__user']].update(projects_by_team[ut['team']])
return projects_by_user
def sync_group_assignee_inbound(integration, email, external_issue_key, assign=True):
"""
Given an integration, user email address and an external issue key,
assign linked groups to matching users. Checks project membership.
Returns a list of groups that were successfully assigned.
"""
from sentry.models import Group, UserEmail, User
logger = logging.getLogger('sentry.integrations.%s' % integration.provider)
orgs_with_sync_enabled = []
for org_id in integration.organizations.values_list('id', flat=True):
installation = integration.get_installation(org_id)
if installation.should_sync('inbound_assignee'):
orgs_with_sync_enabled.append(org_id)
affected_groups = list(
Group.objects.get_groups_by_external_issue(
integration, external_issue_key,
).filter(project__organization_id__in=orgs_with_sync_enabled),
)
if not affected_groups:
return []
if not assign:
for group in affected_groups:
GroupAssignee.objects.deassign(group)
return affected_groups
users = {u.id: u for u in User.objects.filter(
id__in=UserEmail.objects.filter(
is_verified=True,
email=email,
).values_list('user_id', flat=True),
)}
projects_by_user = get_user_project_ids(users.values())
groups_assigned = []
for group in affected_groups:
try:
user_id = [
user_id for user_id, projects in projects_by_user.items()
if group.project_id in projects
][0]
except IndexError:
logger.info(
'assignee-not-found-inbound',
extra={
'integration_id': integration.id,
'email': email,
'issue_key': external_issue_key,
}
)
else:
user = users[user_id]
GroupAssignee.objects.assign(group, user)
groups_assigned.append(group)
return groups_assigned
def sync_group_assignee_outbound(group, user_id, assign=True):
from sentry.tasks.integrations import sync_assignee_outbound
from sentry.models import GroupLink
external_issue_ids = GroupLink.objects.filter(
project_id=group.project_id,
group_id=group.id,
linked_type=GroupLink.LinkedType.issue,
).values_list('linked_id', flat=True)
for external_issue_id in external_issue_ids:
sync_assignee_outbound.apply_async(
kwargs={
'external_issue_id': external_issue_id,
'user_id': user_id,
'assign': assign,
}
)
class GroupAssigneeManager(BaseManager):
def assign(self, group, assigned_to, acting_user=None):
from sentry import features
from sentry.models import User, Team, GroupSubscription, GroupSubscriptionReason
GroupSubscription.objects.subscribe_actor(
group=group,
actor=assigned_to,
reason=GroupSubscriptionReason.assigned,
)
if isinstance(assigned_to, User):
assignee_type = 'user'
other_type = 'team'
elif isinstance(assigned_to, Team):
assignee_type = 'team'
other_type = 'user'
else:
raise AssertionError('Invalid type to assign to: %r' % type(assigned_to))
now = timezone.now()
assignee, created = GroupAssignee.objects.get_or_create(
group=group,
defaults={
'project': group.project,
assignee_type: assigned_to,
'date_added': now,
}
)
if not created:
affected = GroupAssignee.objects.filter(
group=group,
).exclude(**{
assignee_type: assigned_to,
}).update(**{
assignee_type: assigned_to,
other_type: None,
'date_added': now,
})
else:
affected = True
issue_assigned.send(project=group.project, group=group, sender=acting_user)
if affected:
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.ASSIGNED,
user=acting_user,
data={
'assignee': six.text_type(assigned_to.id),
'assigneeEmail': getattr(assigned_to, 'email', None),
'assigneeType': assignee_type,
},
)
activity.send_notification()
# sync Sentry assignee to external issues
if assignee_type == 'user' and features.has(
'organizations:internal-catchall', group.organization, actor=acting_user):
sync_group_assignee_outbound(group, assigned_to.id, assign=True)
def deassign(self, group, acting_user=None):
from sentry import features
affected = GroupAssignee.objects.filter(
group=group,
)[:1].count()
GroupAssignee.objects.filter(
group=group,
).delete()
if affected > 0:
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.UNASSIGNED,
user=acting_user,
)
activity.send_notification()
# sync Sentry assignee to external issues
if features.has('organizations:internal-catchall',
group.organization, actor=acting_user):
sync_group_assignee_outbound(group, None, assign=False)
class GroupAssignee(Model):
"""
Identifies an assignment relationship between a user/team and an
aggregated event (Group).
"""
__core__ = False
objects = GroupAssigneeManager()
| [
" project = FlexibleForeignKey('sentry.Project', related_name=\"assignee_set\")"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
from __future__ import absolute_import, division, unicode_literals
from future.builtins import int, open, str
from hashlib import md5
import os
try:
from urllib.parse import quote, unquote
except ImportError:
from urllib import quote, unquote
from django.apps import apps
from django.contrib import admin
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.sites.models import Site
from django.core.files import File
from django.core.files.storage import default_storage
from django.core.urlresolvers import reverse, resolve, NoReverseMatch
from django.db.models import Model
from django.template import Context, Node, Template, TemplateSyntaxError
from django.template.base import (TOKEN_BLOCK, TOKEN_COMMENT,
TOKEN_TEXT, TOKEN_VAR, TextNode)
from django.template.defaultfilters import escape
from django.template.loader import get_template
from django.utils import translation
from django.utils.html import strip_tags
from django.utils.text import capfirst
from mezzanine.conf import settings
from mezzanine.core.fields import RichTextField
from mezzanine.core.forms import get_edit_form
from mezzanine.utils.cache import nevercache_token, cache_installed
from mezzanine.utils.html import decode_entities
from mezzanine.utils.importing import import_dotted_path
from mezzanine.utils.sites import current_site_id, has_site_permission
from mezzanine.utils.urls import admin_url
from mezzanine.utils.views import is_editable
from mezzanine import template
register = template.Library()
if "compressor" in settings.INSTALLED_APPS:
@register.tag
def compress(parser, token):
"""
Shadows django-compressor's compress tag so it can be
loaded from ``mezzanine_tags``, allowing us to provide
a dummy version when django-compressor isn't installed.
"""
from compressor.templatetags.compress import compress
return compress(parser, token)
else:
@register.to_end_tag
def compress(parsed, context, token):
"""
Dummy tag for fallback when django-compressor isn't installed.
"""
return parsed
if cache_installed():
@register.tag
def nevercache(parser, token):
"""
Tag for two phased rendering. Converts enclosed template
code and content into text, which gets rendered separately
in ``mezzanine.core.middleware.UpdateCacheMiddleware``.
This is to bypass caching for the enclosed code and content.
"""
text = []
end_tag = "endnevercache"
tag_mapping = {
TOKEN_TEXT: ("", ""),
TOKEN_VAR: ("{{", "}}"),
TOKEN_BLOCK: ("{%", "%}"),
TOKEN_COMMENT: ("{#", "#}"),
}
delimiter = nevercache_token()
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == end_tag:
return TextNode(delimiter + "".join(text) + delimiter)
start, end = tag_mapping[token.token_type]
text.append("%s%s%s" % (start, token.contents, end))
parser.unclosed_block_tag(end_tag)
else:
@register.to_end_tag
def nevercache(parsed, context, token):
"""
Dummy fallback ``nevercache`` for when caching is not
configured.
"""
return parsed
@register.simple_tag(takes_context=True)
def fields_for(context, form, template="includes/form_fields.html"):
"""
Renders fields for a form with an optional template choice.
"""
context["form_for_fields"] = form
return get_template(template).render(Context(context))
@register.inclusion_tag("includes/form_errors.html", takes_context=True)
def errors_for(context, form):
"""
Renders an alert if the form has any errors.
"""
context["form"] = form
return context
@register.filter
def sort_by(items, attr):
"""
General sort filter - sorts by either attribute or key.
"""
def key_func(item):
try:
return getattr(item, attr)
except AttributeError:
try:
return item[attr]
except TypeError:
getattr(item, attr) # Reraise AttributeError
return sorted(items, key=key_func)
@register.filter
def is_installed(app_name):
"""
Returns ``True`` if the given app name is in the
``INSTALLED_APPS`` setting.
"""
from warnings import warn
warn("The is_installed filter is deprecated. Please use the tag "
"{% ifinstalled appname %}{% endifinstalled %}")
return app_name in settings.INSTALLED_APPS
@register.tag
def ifinstalled(parser, token):
"""
Old-style ``if`` tag that renders contents if the given app is
installed. The main use case is:
{% ifinstalled app_name %}
{% include "app_name/template.html" %}
{% endifinstalled %}
so we need to manually pull out all tokens if the app isn't
installed, since if we used a normal ``if`` tag with a False arg,
the include tag will still try and find the template to include.
"""
try:
tag, app = token.split_contents()
except ValueError:
raise TemplateSyntaxError("ifinstalled should be in the form: "
"{% ifinstalled app_name %}"
"{% endifinstalled %}")
end_tag = "end" + tag
unmatched_end_tag = 1
if app.strip("\"'") not in settings.INSTALLED_APPS:
while unmatched_end_tag:
token = parser.tokens.pop(0)
if token.token_type == TOKEN_BLOCK:
block_name = token.contents.split()[0]
if block_name == tag:
unmatched_end_tag += 1
if block_name == end_tag:
unmatched_end_tag -= 1
parser.tokens.insert(0, token)
nodelist = parser.parse((end_tag,))
parser.delete_first_token()
class IfInstalledNode(Node):
def render(self, context):
return nodelist.render(context)
return IfInstalledNode()
@register.render_tag
def set_short_url_for(context, token):
"""
Sets the ``short_url`` attribute of the given model for share
links in the template.
"""
obj = context[token.split_contents()[1]]
obj.set_short_url()
return ""
@register.simple_tag
def gravatar_url(email, size=32):
"""
Return the full URL for a Gravatar given an email hash.
"""
bits = (md5(email.lower().encode("utf-8")).hexdigest(), size)
return "//www.gravatar.com/avatar/%s?s=%s&d=identicon&r=PG" % bits
@register.to_end_tag
def metablock(parsed):
"""
Remove HTML tags, entities and superfluous characters from
meta blocks.
"""
parsed = " ".join(parsed.replace("\n", "").split()).replace(" ,", ",")
return escape(strip_tags(decode_entities(parsed)))
@register.inclusion_tag("includes/pagination.html", takes_context=True)
def pagination_for(context, current_page, page_var="page", exclude_vars=""):
"""
Include the pagination template and data for persisting querystring
in pagination links. Can also contain a comma separated string of
var names in the current querystring to exclude from the pagination
links, via the ``exclude_vars`` arg.
"""
querystring = context["request"].GET.copy()
exclude_vars = [v for v in exclude_vars.split(",") if v] + [page_var]
for exclude_var in exclude_vars:
if exclude_var in querystring:
del querystring[exclude_var]
querystring = querystring.urlencode()
return {
"current_page": current_page,
"querystring": querystring,
"page_var": page_var,
}
@register.inclusion_tag("includes/search_form.html", takes_context=True)
def search_form(context, search_model_names=None):
"""
Includes the search form with a list of models to use as choices
for filtering the search by. Models should be a string with models
in the format ``app_label.model_name`` separated by spaces. The
string ``all`` can also be used, in which case the models defined
by the ``SEARCH_MODEL_CHOICES`` setting will be used.
"""
if not search_model_names or not settings.SEARCH_MODEL_CHOICES:
search_model_names = []
elif search_model_names == "all":
search_model_names = list(settings.SEARCH_MODEL_CHOICES)
else:
search_model_names = search_model_names.split(" ")
search_model_choices = []
for model_name in search_model_names:
try:
model = apps.get_model(*model_name.split(".", 1))
except LookupError:
pass
else:
verbose_name = model._meta.verbose_name_plural.capitalize()
search_model_choices.append((verbose_name, model_name))
context["search_model_choices"] = sorted(search_model_choices)
return context
@register.simple_tag
def thumbnail(image_url, width, height, upscale=True, quality=95, left=.5,
top=.5, padding=False, padding_color="#fff"):
"""
Given the URL to an image, resizes the image using the given width
and height on the first time it is requested, and returns the URL
to the new resized image. If width or height are zero then original
ratio is maintained. When ``upscale`` is False, images smaller than
the given size will not be grown to fill that size. The given width
and height thus act as maximum dimensions.
"""
if not image_url:
return ""
try:
from PIL import Image, ImageFile, ImageOps
except ImportError:
return ""
image_url = unquote(str(image_url)).split("?")[0]
if image_url.startswith(settings.MEDIA_URL):
image_url = image_url.replace(settings.MEDIA_URL, "", 1)
image_dir, image_name = os.path.split(image_url)
image_prefix, image_ext = os.path.splitext(image_name)
filetype = {".png": "PNG", ".gif": "GIF"}.get(image_ext, "JPEG")
thumb_name = "%s-%sx%s" % (image_prefix, width, height)
if not upscale:
thumb_name += "-no-upscale"
if left != .5 or top != .5:
left = min(1, max(0, left))
top = min(1, max(0, top))
thumb_name = "%s-%sx%s" % (thumb_name, left, top)
thumb_name += "-padded-%s" % padding_color if padding else ""
thumb_name = "%s%s" % (thumb_name, image_ext)
# `image_name` is used here for the directory path, as each image
# requires its own sub-directory using its own name - this is so
# we can consistently delete all thumbnails for an individual
# image, which is something we do in filebrowser when a new image
# is written, allowing us to purge any previously generated
# thumbnails that may match a new image name.
thumb_dir = os.path.join(settings.MEDIA_ROOT, image_dir,
settings.THUMBNAILS_DIR_NAME, image_name)
if not os.path.exists(thumb_dir):
try:
os.makedirs(thumb_dir)
except OSError:
pass
thumb_path = os.path.join(thumb_dir, thumb_name)
thumb_url = "%s/%s/%s" % (settings.THUMBNAILS_DIR_NAME,
quote(image_name.encode("utf-8")),
quote(thumb_name.encode("utf-8")))
image_url_path = os.path.dirname(image_url)
if image_url_path:
thumb_url = "%s/%s" % (image_url_path, thumb_url)
try:
thumb_exists = os.path.exists(thumb_path)
except UnicodeEncodeError:
# The image that was saved to a filesystem with utf-8 support,
# but somehow the locale has changed and the filesystem does not
# support utf-8.
from mezzanine.core.exceptions import FileSystemEncodingChanged
raise FileSystemEncodingChanged()
if thumb_exists:
# Thumbnail exists, don't generate it.
return thumb_url
elif not default_storage.exists(image_url):
# Requested image does not exist, just return its URL.
return image_url
f = default_storage.open(image_url)
try:
image = Image.open(f)
except:
# Invalid image format.
return image_url
image_info = image.info
to_width = int(width)
to_height = int(height)
from_width = image.size[0]
from_height = image.size[1]
if not upscale:
to_width = min(to_width, from_width)
to_height = min(to_height, from_height)
# Set dimensions.
if to_width == 0:
to_width = from_width * to_height // from_height
elif to_height == 0:
to_height = from_height * to_width // from_width
if image.mode not in ("P", "L", "RGBA"):
try:
image = image.convert("RGBA")
except:
return image_url
# Required for progressive jpgs.
ImageFile.MAXBLOCK = 2 * (max(image.size) ** 2)
# Padding.
if padding and to_width and to_height:
from_ratio = float(from_width) / from_height
to_ratio = float(to_width) / to_height
pad_size = None
if to_ratio < from_ratio:
pad_height = int(to_height * (float(from_width) / to_width))
pad_size = (from_width, pad_height)
pad_top = (pad_height - from_height) // 2
pad_left = 0
elif to_ratio > from_ratio:
pad_width = int(to_width * (float(from_height) / to_height))
pad_size = (pad_width, from_height)
pad_top = 0
pad_left = (pad_width - from_width) // 2
if pad_size is not None:
pad_container = Image.new("RGBA", pad_size, padding_color)
pad_container.paste(image, (pad_left, pad_top))
image = pad_container
# Create the thumbnail.
to_size = (to_width, to_height)
to_pos = (left, top)
try:
image = ImageOps.fit(image, to_size, Image.ANTIALIAS, 0, to_pos)
image = image.save(thumb_path, filetype, quality=quality, **image_info)
# Push a remote copy of the thumbnail if MEDIA_URL is
# absolute.
if "://" in settings.MEDIA_URL:
with open(thumb_path, "rb") as f:
default_storage.save(thumb_url, File(f))
except Exception:
# If an error occurred, a corrupted image may have been saved,
# so remove it, otherwise the check for it existing will just
# return the corrupted image next time it's requested.
try:
os.remove(thumb_path)
except Exception:
pass
return image_url
return thumb_url
@register.inclusion_tag("includes/editable_loader.html", takes_context=True)
def editable_loader(context):
"""
Set up the required JS/CSS for the in-line editing toolbar and controls.
"""
user = context["request"].user
context["has_site_permission"] = has_site_permission(user)
if settings.INLINE_EDITING_ENABLED and context["has_site_permission"]:
t = get_template("includes/editable_toolbar.html")
context["REDIRECT_FIELD_NAME"] = REDIRECT_FIELD_NAME
try:
context["editable_obj"]
except KeyError:
context["editable_obj"] = context.get("page", None)
context["toolbar"] = t.render(Context(context))
context["richtext_media"] = RichTextField().formfield().widget.media
return context
@register.filter
def richtext_filters(content):
"""
Takes a value edited via the WYSIWYG editor, and passes it through
each of the functions specified by the RICHTEXT_FILTERS setting.
"""
filter_names = settings.RICHTEXT_FILTERS
if not filter_names:
try:
filter_names = [settings.RICHTEXT_FILTER]
except AttributeError:
pass
else:
from warnings import warn
warn("The `RICHTEXT_FILTER` setting is deprecated in favor of "
"the new plural setting `RICHTEXT_FILTERS`.")
for filter_name in filter_names:
filter_func = import_dotted_path(filter_name)
content = filter_func(content)
return content
@register.filter
def richtext_filter(content):
"""
Deprecated version of richtext_filters above.
"""
from warnings import warn
warn("The `richtext_filter` template tag is deprecated in favor of "
"the new plural tag `richtext_filters`.")
return richtext_filters(content)
@register.to_end_tag
def editable(parsed, context, token):
"""
Add the required HTML to the parsed content for in-line editing,
such as the icon and edit form if the object is deemed to be
editable - either it has an ``editable`` method which returns
``True``, or the logged in user has change permissions for the
model.
"""
def parse_field(field):
field = field.split(".")
obj = context.get(field.pop(0), None)
attr = field.pop()
while field:
obj = getattr(obj, field.pop(0))
if callable(obj):
# Allows {% editable page.get_content_model.content %}
obj = obj()
return obj, attr
fields = [parse_field(f) for f in token.split_contents()[1:]]
if fields:
fields = [f for f in fields if len(f) == 2 and f[0] is fields[0][0]]
if not parsed.strip():
try:
parsed = "".join([str(getattr(*field)) for field in fields])
except AttributeError:
pass
if settings.INLINE_EDITING_ENABLED and fields and "request" in context:
obj = fields[0][0]
if isinstance(obj, Model) and is_editable(obj, context["request"]):
field_names = ",".join([f[1] for f in fields])
context["editable_form"] = get_edit_form(obj, field_names)
context["original"] = parsed
t = get_template("includes/editable_form.html")
return t.render(Context(context))
return parsed
@register.simple_tag | [
"def try_url(url_name):"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystone import catalog
from keystone.tests import unit
from keystone.tests.unit.ksfixtures import database
from keystone.tests.unit import rest
BASE_URL = 'http://127.0.0.1:35357/v2'
SERVICE_FIXTURE = object()
class V2CatalogTestCase(rest.RestfulTestCase):
def setUp(self):
super(V2CatalogTestCase, self).setUp()
self.useFixture(database.Database())
self.service_id = uuid.uuid4().hex
self.service = unit.new_service_ref()
self.service['id'] = self.service_id
self.catalog_api.create_service(
self.service_id,
self.service.copy())
# TODO(termie): add an admin user to the fixtures and use that user
# override the fixtures, for now
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_admin['id'])
def config_overrides(self):
super(V2CatalogTestCase, self).config_overrides()
self.config_fixture.config(group='catalog', driver='sql')
def _get_token_id(self, r):
"""Applicable only to JSON."""
return r.result['access']['token']['id']
def _endpoint_create(self, expected_status=200, service_id=SERVICE_FIXTURE,
publicurl='http://localhost:8080',
internalurl='http://localhost:8080',
adminurl='http://localhost:8080'):
if service_id is SERVICE_FIXTURE:
service_id = self.service_id
# FIXME(dolph): expected status should actually be 201 Created
path = '/v2.0/endpoints'
body = {
'endpoint': {
'adminurl': adminurl,
'service_id': service_id,
'region': 'RegionOne',
'internalurl': internalurl,
'publicurl': publicurl
}
}
r = self.admin_request(method='POST', token=self.get_scoped_token(),
path=path, expected_status=expected_status,
body=body)
return body, r
def test_endpoint_create(self):
req_body, response = self._endpoint_create()
self.assertIn('endpoint', response.result)
self.assertIn('id', response.result['endpoint'])
for field, value in req_body['endpoint'].items():
self.assertEqual(response.result['endpoint'][field], value)
def test_endpoint_create_with_null_adminurl(self):
req_body, response = self._endpoint_create(adminurl=None)
self.assertIsNone(req_body['endpoint']['adminurl'])
self.assertNotIn('adminurl', response.result['endpoint'])
def test_endpoint_create_with_empty_adminurl(self):
req_body, response = self._endpoint_create(adminurl='')
self.assertEqual('', req_body['endpoint']['adminurl'])
self.assertNotIn("adminurl", response.result['endpoint'])
def test_endpoint_create_with_null_internalurl(self):
req_body, response = self._endpoint_create(internalurl=None)
self.assertIsNone(req_body['endpoint']['internalurl'])
self.assertNotIn('internalurl', response.result['endpoint'])
def test_endpoint_create_with_empty_internalurl(self):
req_body, response = self._endpoint_create(internalurl='')
self.assertEqual('', req_body['endpoint']['internalurl'])
self.assertNotIn("internalurl", response.result['endpoint'])
def test_endpoint_create_with_null_publicurl(self):
self._endpoint_create(expected_status=400, publicurl=None)
def test_endpoint_create_with_empty_publicurl(self):
self._endpoint_create(expected_status=400, publicurl='')
def test_endpoint_create_with_null_service_id(self):
self._endpoint_create(expected_status=400, service_id=None)
def test_endpoint_create_with_empty_service_id(self):
self._endpoint_create(expected_status=400, service_id='')
def test_endpoint_create_with_valid_url(self):
"""Create endpoint with valid URL should be tested, too."""
# list one valid url is enough, no need to list too much
valid_url = 'http://127.0.0.1:8774/v1.1/$(tenant_id)s'
# baseline tests that all valid URLs works
self._endpoint_create(expected_status=200,
publicurl=valid_url,
internalurl=valid_url,
adminurl=valid_url)
def test_endpoint_create_with_invalid_url(self):
"""Test the invalid cases: substitutions is not exactly right."""
invalid_urls = [
# using a substitution that is not whitelisted - KeyError
'http://127.0.0.1:8774/v1.1/$(nonexistent)s',
# invalid formatting - ValueError
'http://127.0.0.1:8774/v1.1/$(tenant_id)',
'http://127.0.0.1:8774/v1.1/$(tenant_id)t',
'http://127.0.0.1:8774/v1.1/$(tenant_id',
# invalid type specifier - TypeError
# admin_url is a string not an int
'http://127.0.0.1:8774/v1.1/$(admin_url)d',
]
# list one valid url is enough, no need to list too much
valid_url = 'http://127.0.0.1:8774/v1.1/$(tenant_id)s'
# Case one: publicurl, internalurl and adminurl are
# all invalid
for invalid_url in invalid_urls:
self._endpoint_create(expected_status=400,
publicurl=invalid_url,
internalurl=invalid_url,
adminurl=invalid_url)
# Case two: publicurl, internalurl are invalid
# and adminurl is valid
for invalid_url in invalid_urls:
self._endpoint_create(expected_status=400,
publicurl=invalid_url,
internalurl=invalid_url,
adminurl=valid_url)
# Case three: publicurl, adminurl are invalid
# and internalurl is valid
for invalid_url in invalid_urls:
self._endpoint_create(expected_status=400,
publicurl=invalid_url,
internalurl=valid_url,
adminurl=invalid_url)
# Case four: internalurl, adminurl are invalid
# and publicurl is valid
for invalid_url in invalid_urls:
self._endpoint_create(expected_status=400,
publicurl=valid_url,
internalurl=invalid_url,
adminurl=invalid_url)
# Case five: publicurl is invalid, internalurl
# and adminurl are valid
for invalid_url in invalid_urls:
self._endpoint_create(expected_status=400,
publicurl=invalid_url,
internalurl=valid_url,
adminurl=valid_url)
# Case six: internalurl is invalid, publicurl
# and adminurl are valid
for invalid_url in invalid_urls:
self._endpoint_create(expected_status=400,
publicurl=valid_url,
internalurl=invalid_url,
adminurl=valid_url)
# Case seven: adminurl is invalid, publicurl
# and internalurl are valid
for invalid_url in invalid_urls:
self._endpoint_create(expected_status=400,
publicurl=valid_url,
internalurl=valid_url,
adminurl=invalid_url)
class TestV2CatalogAPISQL(unit.TestCase):
def setUp(self):
super(TestV2CatalogAPISQL, self).setUp()
self.useFixture(database.Database())
self.catalog_api = catalog.Manager()
self.service_id = uuid.uuid4().hex
service = {'id': self.service_id, 'name': uuid.uuid4().hex}
self.catalog_api.create_service(self.service_id, service)
endpoint = self.new_endpoint_ref(service_id=self.service_id)
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
def config_overrides(self):
super(TestV2CatalogAPISQL, self).config_overrides()
self.config_fixture.config(group='catalog', driver='sql')
def new_endpoint_ref(self, service_id):
return {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'interface': uuid.uuid4().hex[:8],
'service_id': service_id,
'url': uuid.uuid4().hex,
'region': uuid.uuid4().hex,
}
def test_get_catalog_ignores_endpoints_with_invalid_urls(self):
user_id = uuid.uuid4().hex
tenant_id = uuid.uuid4().hex
# the only endpoint in the catalog is the one created in setUp
catalog = self.catalog_api.get_catalog(user_id, tenant_id)
self.assertEqual(1, len(catalog))
# it's also the only endpoint in the backend
self.assertEqual(1, len(self.catalog_api.list_endpoints()))
# create a new, invalid endpoint - malformed type declaration
endpoint = self.new_endpoint_ref(self.service_id)
endpoint['url'] = 'http://keystone/%(tenant_id)'
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
# create a new, invalid endpoint - nonexistent key
endpoint = self.new_endpoint_ref(self.service_id)
endpoint['url'] = 'http://keystone/%(you_wont_find_me)s'
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
# verify that the invalid endpoints don't appear in the catalog
catalog = self.catalog_api.get_catalog(user_id, tenant_id)
self.assertEqual(1, len(catalog))
# all three endpoints appear in the backend
self.assertEqual(3, len(self.catalog_api.list_endpoints()))
| [
" def test_get_catalog_always_returns_service_name(self):"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ncclient import manager
from ncclient.operations import RPCError, TimeoutExpiredError
from ncclient.xml_ import new_ele, sub_ele, to_ele, to_xml
from netaddr import IPNetwork
from netman import regex
from netman.core.objects.access_groups import IN, OUT
from netman.core.objects.bond import Bond
from netman.core.objects.exceptions import LockedSwitch, VlanAlreadyExist, UnknownVlan, \
InterfaceInWrongPortMode, UnknownInterface, AccessVlanNotSet, NativeVlanNotSet, TrunkVlanNotSet, VlanAlreadyInTrunk, \
BadBondNumber, BondAlreadyExist, UnknownBond, InterfaceNotInBond, OperationNotCompleted, InvalidMtuSize
from netman.core.objects.interface import Interface
from netman.core.objects.interface_states import ON, OFF
from netman.core.objects.port_modes import ACCESS, TRUNK, BOND_MEMBER
from netman.core.objects.switch_base import SwitchBase
from netman.core.objects.vlan import Vlan
class Juniper(SwitchBase):
def __init__(self, switch_descriptor, custom_strategies,
timeout=300):
super(Juniper, self).__init__(switch_descriptor)
self.timeout = timeout
self.custom_strategies = custom_strategies
self.netconf = None
self.in_transaction = False
def _connect(self):
params = dict(
host=self.switch_descriptor.hostname,
username=self.switch_descriptor.username,
password=self.switch_descriptor.password,
hostkey_verify=False,
device_params={'name': 'junos'},
timeout=self.timeout
)
if self.switch_descriptor.port:
params["port"] = self.switch_descriptor.port
self.netconf = manager.connect(**params)
def _disconnect(self):
try:
self.netconf.close_session()
except TimeoutExpiredError:
pass
def start_transaction(self):
try:
self.netconf.lock(target="candidate")
except RPCError as e:
if "configuration database modified" in e.message:
self.rollback_transaction()
self.netconf.lock(target="candidate")
elif "Configuration database is already open" in e.message:
raise LockedSwitch()
else:
raise
self.in_transaction = True
def end_transaction(self):
self.in_transaction = False
self.netconf.unlock(target="candidate")
def rollback_transaction(self):
self.netconf.discard_changes()
def commit_transaction(self):
try:
self.netconf.commit()
except RPCError as e:
self.logger.info("An RPCError was raised : {}".format(e))
raise OperationNotCompleted(str(e).strip())
def get_vlans(self):
config = self.query(self.custom_strategies.all_vlans, all_interfaces)
vlan_list = []
for vlan_node in self.custom_strategies.vlan_nodes(config):
vlan = self.get_vlan_from_node(vlan_node, config)
if vlan is not None:
vlan_list.append(vlan)
return vlan_list
def get_vlan(self, number):
config = self.query(self.custom_strategies.all_vlans, all_interfaces)
vlan_node = self.custom_strategies.vlan_node(config, number)
return self.get_vlan_from_node(vlan_node, config)
def get_vlan_from_node(self, vlan_node, config):
vlan_id_node = first(vlan_node.xpath("vlan-id"))
vlan = None
if vlan_id_node is not None:
vlan = Vlan(number=int(vlan_id_node.text), icmp_redirects=True)
description_node = first(vlan_node.xpath("description"))
if description_node is not None:
vlan.name = description_node.text
l3_if_type, l3_if_name = self.custom_strategies.get_l3_interface(vlan_node)
if l3_if_name is not None:
interface_vlan_node = first(config.xpath("data/configuration/interfaces/interface/name[text()=\"{}\"]/.."
"/unit/name[text()=\"{}\"]/..".format(l3_if_type, l3_if_name)))
if interface_vlan_node is not None:
vlan.ips = parse_ips(interface_vlan_node)
vlan.access_groups[IN] = parse_inet_filter(interface_vlan_node, "input")
vlan.access_groups[OUT] = parse_inet_filter(interface_vlan_node, "output")
vlan.vrrp_groups = self.custom_strategies.parse_vrrp_groups(interface_vlan_node)
vlan.icmp_redirects = self.custom_strategies.parse_icmp_redirects(interface_vlan_node)
return vlan
def get_interfaces(self):
physical_interfaces = self._list_physical_interfaces()
config = self.query(all_interfaces, self.custom_strategies.all_vlans)
interface_list = []
for phys_int in physical_interfaces:
if not phys_int.name.startswith("ae"):
interface_node = first(config.xpath("data/configuration/interfaces/interface/name[text()=\"{}\"]/.."
.format(phys_int.name)))
if interface_node is not None:
interface_list.append(self.node_to_interface(interface_node, config))
else:
interface_list.append(phys_int.to_interface())
return interface_list
def add_vlan(self, number, name=None):
config = self.query(self.custom_strategies.all_vlans)
try:
self.custom_strategies.vlan_node(config, number)
raise VlanAlreadyExist(number)
except UnknownVlan:
pass
update = Update()
self.custom_strategies.add_update_vlans(update, number, name)
try:
self._push(update)
except RPCError as e:
self.custom_strategies.manage_update_vlan_exception(e.message, number)
raise
def remove_vlan(self, number):
config = self.query(self.custom_strategies.all_vlans, all_interfaces)
vlan_node = self.custom_strategies.vlan_node(config, number)
vlan_name = first(vlan_node.xpath("name")).text
update = Update()
self.custom_strategies.remove_update_vlans(update, vlan_name)
l3_if_type, l3_if_name = self.custom_strategies.get_l3_interface(vlan_node)
if l3_if_name is not None:
update.add_interface(interface_unit_interface_removal(l3_if_type, l3_if_name))
for interface_node in config.xpath("data/configuration/interfaces/interface"):
members_modifications = self.custom_strategies.craft_members_modification_to_remove_vlan(interface_node, vlan_name, number)
if len(members_modifications) > 0:
update.add_interface(self.custom_strategies.interface_vlan_members_update(
first(interface_node.xpath("name")).text,
first(interface_node.xpath("unit/name")).text,
members_modifications)
)
self._push(update)
def set_access_mode(self, interface_id):
update_attributes = []
config = self.query(all_interfaces, self.custom_strategies.all_vlans)
interface_node = self.get_interface_config(interface_id, config)
interface = self.node_to_interface(interface_node, config)
if self.get_port_mode(interface_node) in (TRUNK, None):
update_attributes.append(self.custom_strategies.get_interface_port_mode_update_element("access"))
if len(interface.trunk_vlans) > 0:
update_attributes.append(self.custom_strategies.get_delete_trunk_vlan_element())
if interface.trunk_native_vlan is not None:
update_attributes.append(to_ele('<native-vlan-id operation="delete" />'))
if len(update_attributes) > 0:
update = Update()
update.add_interface(self.custom_strategies.interface_update(interface_id, "0", update_attributes))
self._push_interface_update(interface_id, update)
def set_trunk_mode(self, interface_id):
update_attributes = []
config = self.query(one_interface(interface_id), self.custom_strategies.all_vlans)
interface_node = self.get_interface_config(interface_id, config)
interface = self.node_to_interface(interface_node, config)
if interface.port_mode is ACCESS or interface.port_mode is None:
update_attributes.append(self.custom_strategies.get_interface_port_mode_update_element("trunk"))
if interface.access_vlan is not None:
update_attributes.append(self.custom_strategies.get_delete_vlan_element())
if len(update_attributes) > 0:
update = Update()
update.add_interface(self.custom_strategies.interface_update(interface_id, "0", update_attributes))
self._push_interface_update(interface_id, update)
def set_access_vlan(self, interface_id, vlan):
update_attributes = []
update_vlan_members = []
config = self.query(all_interfaces, self.custom_strategies.all_vlans)
self.custom_strategies.vlan_node(config, vlan)
interface_node = self.get_interface_config(interface_id, config)
interface = self.node_to_interface(interface_node, config)
if interface.port_mode == TRUNK:
raise InterfaceInWrongPortMode("trunk")
elif self.get_port_mode(interface_node) is None:
update_attributes.append(self.custom_strategies.get_interface_port_mode_update_element("access"))
if interface.access_vlan != vlan:
self.custom_strategies.update_vlan_members(interface_node, update_vlan_members, vlan)
if update_attributes or update_vlan_members:
update = Update()
update.add_interface(self.custom_strategies.interface_update(interface_id, "0", update_attributes, update_vlan_members))
try:
self._push_interface_update(interface_id, update)
except RPCError as e:
if "No vlan matches vlan tag" in e.message:
raise UnknownVlan(vlan)
raise
def unset_interface_access_vlan(self, interface_id):
config = self.query(one_interface(interface_id), self.custom_strategies.all_vlans)
interface_node = self.get_interface_config(interface_id, config)
interface = self.node_to_interface(interface_node, config)
if interface.port_mode == TRUNK:
raise InterfaceInWrongPortMode("trunk")
if interface.access_vlan is not None:
update = Update()
update.add_interface(self.custom_strategies.interface_update(interface_id, "0", [to_ele('<vlan operation="delete" />')]))
self._push(update)
else:
raise AccessVlanNotSet(interface_id)
def set_interface_native_vlan(self, interface_id, vlan):
port_mode_node = None
native_vlan_id_node = None
config = self.query(all_interfaces, self.custom_strategies.all_vlans)
self.custom_strategies.vlan_node(config, vlan)
interface_node = self.get_interface_config(interface_id, config)
interface = self.node_to_interface(interface_node, config)
actual_port_mode = self.get_port_mode(interface_node)
if actual_port_mode is ACCESS:
raise InterfaceInWrongPortMode("access")
elif actual_port_mode is None:
port_mode_node = self.custom_strategies.get_interface_port_mode_update_element("trunk")
if vlan in interface.trunk_vlans:
raise VlanAlreadyInTrunk(vlan)
elif interface.trunk_native_vlan != vlan:
native_vlan_id_node = to_ele("<native-vlan-id>{}</native-vlan-id>".format(vlan))
if native_vlan_id_node is not None:
interface = self.custom_strategies.interface_update(interface_id, "0", [port_mode_node] if port_mode_node is not None else [])
self.custom_strategies.set_native_vlan_id_node(interface, native_vlan_id_node)
update = Update()
update.add_interface(interface)
try:
self._push_interface_update(interface_id, update)
except RPCError as e:
if "No vlan matches vlan tag" in e.message:
raise UnknownVlan(vlan)
raise
def set_interface_auto_negotiation_state(self, interface_id, negotiation_state):
content = to_ele("""
<interface>
<name>{0}</name>
</interface>
""".format(interface_id))
if negotiation_state == ON:
content.append(to_ele("<ether-options><auto-negotiation/></ether-options>"))
else:
content.append(to_ele("<ether-options><no-auto-negotiation/></ether-options>"))
update = Update()
update.add_interface(content)
self._push_interface_update(interface_id, update)
def unset_interface_auto_negotiation_state(self, interface_id):
config = self.query(one_interface(interface_id))
interface_node = self.get_interface_config(interface_id, config)
if interface_node is None:
self._get_physical_interface(interface_id)
return
auto_negotiation_present = first(interface_node.xpath('ether-options/auto-negotiation')) is not None
no_auto_negotiation_present = first(interface_node.xpath('ether-options/no-auto-negotiation')) is not None
if auto_negotiation_present or no_auto_negotiation_present:
content = to_ele("""
<interface>
<name>{0}</name>
</interface>
""".format(interface_id))
ether_options = to_ele("<ether-options/>")
if auto_negotiation_present:
ether_options.append(to_ele("<auto-negotiation operation=\"delete\"/>"))
elif no_auto_negotiation_present:
ether_options.append(to_ele("<no-auto-negotiation operation=\"delete\"/>"))
update = Update()
content.append(ether_options)
update.add_interface(content)
self._push_interface_update(interface_id, update)
def reset_interface(self, interface_id):
content = to_ele("""
<interface operation=\"delete\">
<name>{0}</name>
</interface>
""".format(interface_id))
update = Update()
update.add_interface(content)
self._push_interface_update(interface_id, update)
def unset_interface_native_vlan(self, interface_id):
config = self.query(one_interface(interface_id), self.custom_strategies.all_vlans)
interface_node = self.get_interface_config(interface_id, config)
interface = self.node_to_interface(interface_node, config)
if interface.trunk_native_vlan is None:
raise NativeVlanNotSet(interface_id)
update = Update()
update.add_interface(self.custom_strategies.interface_update(interface_id, "0", [to_ele("<native-vlan-id operation=\"delete\" />")]))
self._push(update)
def add_trunk_vlan(self, interface_id, vlan):
config = self.query(all_interfaces, self.custom_strategies.all_vlans)
self.custom_strategies.vlan_node(config, vlan)
interface_node = self.get_interface_config(interface_id, config)
interface = self.node_to_interface(interface_node, config)
actual_port_mode = self.get_port_mode(interface_node)
if actual_port_mode is ACCESS or interface.access_vlan is not None:
raise InterfaceInWrongPortMode("access")
if vlan not in interface.trunk_vlans:
update = Update()
update.add_interface(self.custom_strategies.interface_update(
interface_id, "0",
[self.custom_strategies.get_interface_port_mode_update_element("trunk")] if actual_port_mode is None else None,
[self.custom_strategies.get_vlan_member_update_element(vlan)]
))
self._push_interface_update(interface_id, update)
def remove_trunk_vlan(self, interface_id, vlan):
config = self.query(all_interfaces, self.custom_strategies.all_vlans)
interface_node = self.get_interface_config(interface_id, config)
if interface_node is None:
raise UnknownInterface(interface_id)
interface = self.node_to_interface(interface_node, config)
if interface.port_mode is ACCESS:
raise InterfaceInWrongPortMode("access")
vlan_node = self.custom_strategies.vlan_node(config, vlan)
vlan_name = first(vlan_node.xpath("name")).text
modifications = self.custom_strategies.craft_members_modification_to_remove_vlan(interface_node, vlan_name, vlan)
if len(modifications) == 0:
raise TrunkVlanNotSet(interface_id)
update = Update()
update.add_interface(self.custom_strategies.interface_update(interface_id, "0", vlan_members=modifications))
self._push(update)
def set_interface_description(self, interface_id, description):
update = Update()
update.add_interface(interface_main_update(interface_id, [
to_ele("<description>{}</description>".format(description))
]))
try:
self._push(update)
except RPCError as e:
self.logger.info("actual setting error was {}".format(e))
raise UnknownInterface(interface_id)
def unset_interface_description(self, interface_id):
update = Update()
update.add_interface(interface_main_update(interface_id, [
to_ele("<description operation=\"delete\" />")
]))
try:
self._push(update)
except RPCError as e:
if e.severity != "warning":
raise UnknownInterface(interface_id)
def set_interface_mtu(self, interface_id, size):
update = Update()
update.add_interface(interface_main_update(interface_id, [
to_ele("<mtu>{}</mtu>".format(size))
]))
try:
self._push(update)
except RPCError as e:
self.logger.info("actual setting error was {}".format(e))
if "Value {} is not within range".format(size) in str(e):
raise InvalidMtuSize(str(e))
raise UnknownInterface(interface_id)
def unset_interface_mtu(self, interface_id):
update = Update()
update.add_interface(interface_main_update(interface_id, [
to_ele("<mtu operation=\"delete\" />")
]))
try:
self._push(update)
except RPCError as e:
if e.severity != "warning":
raise UnknownInterface(interface_id)
def edit_interface_spanning_tree(self, interface_id, edge=None):
config = self.query(one_interface(interface_id), | [
" one_protocol_interface(\"rstp\", self._for_protocol(interface_id)))"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
"""The WiderFace dataset.
"""
# standard imports
import os
import logging
# third party imports
import numpy as np
# toolbox imports
from dltb.base.data import Data
from dltb.base.image import BoundingBox, Region, Landmarks
from dltb.tool.classifier import ClassScheme
from dltb.datasource import Imagesource, Sectioned, DataDirectory
# logging
LOG = logging.getLogger(__name__)
class WiderfaceScheme(ClassScheme):
"""The WiderFace dataset divides its data into
62 classes (actually just 61, as class 60 is missing).
Class labels can be obtained from directory names in the
data directories.
"""
def __init__(self) -> None:
"""Iniitalization of the :py:class:`WiderfaceScheme`.
"""
# The WIDER face dataset has 62 classes (but it seems
# that only 61 are used - class '60' is missing).
super().__init__(length=62, key='widerface')
@property
def prepared(self) -> bool:
"""Check if the :py:class:`WiderfaceScheme` has been initialized.
"""
return 'text' in self._labels
def prepare(self) -> None:
"""Prepare the labels for the Widerface dataset.
The labels will be read in from the directory names
in the WIDERFACE_DATA directory.
"""
if self.prepared:
return # nothing to do ...
widerface_data = os.getenv('WIDERFACE_DATA')
train_dir = os.path.join(widerface_data, 'WIDER_train', 'images')
text = [''] * len(self)
for dirname in os.listdir(train_dir):
number, label = dirname.split('--', maxsplit=1)
text[int(number)] = label
self.add_labels(text, 'text')
WiderfaceScheme()
class WiderFace(DataDirectory, Imagesource, Sectioned,
sections={'train', 'val', 'test'}):
# pylint: disable=too-many-ancestors
"""
http://shuoyang1213.me/WIDERFACE/
"Wider Face" is A face detection benchmark consisting of 32,203
images with 393,703 labeled faces.
The faces have wide variability in scale, pose, occlusion.
Images are categorized in 61 event class.
From each class train/validation/test datasets where split
in relation 40%/10%/50%.
Attributes
----------
blur: Tuple[str]
expression: Tuple[str]
illumination: Tuple[str]
occlusion: Tuple[str]
invalid: Tuple[str]
"""
blur = ('clear', 'normal blur', 'heavy blur')
expression = ('typical expression', 'exaggerate expression')
illumination = ('normal illumination', 'extreme illumination')
occlusion = ('no occlusion', 'partial occlusion', 'heavy occlusion')
pose = ('typical pose', 'atypical pose')
invalid = ('valid image', 'invalid image')
def __init__(self, section: str = 'train',
key: str = None, **kwargs) -> None:
"""Initialize the WIDER Face Datasource.
"""
self._widerface_data = os.getenv('WIDERFACE_DATA', '.')
self._section = section
scheme = ClassScheme['widerface']
directory = os.path.join(self._widerface_data,
'WIDER_' + self._section, 'images')
super().__init__(key=key or f"wider-faces-{section}",
section=section, directory=directory, scheme=scheme,
description=f"WIDER Faces", **kwargs)
self._annotations = None
def __str__(self):
return f'WIDER Faces ({self._section})'
#
# Preparation
#
def _prepare(self, **kwargs) -> None:
# pylint: disable=arguments-differ
"""Prepare the WIDER Face dataset. This will provide in a list of
all images provided by the dataset, either by reading in a
prepared file, or by traversing the directory.
"""
LOG.info("Preparing WiderFace[%r]: %s",
self.preparable, self.directory)
cache = f"widerface_{self._section}_filelist.p"
super()._prepare(filenames_cache=cache, **kwargs)
self._scheme.prepare()
self._prepare_annotations()
def _unprepare(self):
"""Prepare the WIDER Face dataset. This will provide in a list of
all images provided by the dataset, either by reading in a
prepared file, or by traversing the directory.
"""
self._annotations = None
super()._unprepare()
def _prepare_annotations(self):
"""Load the annotations for the training images.
The annotations are stored in a single large text file
('wider_face_train_bbx_gt.txtX'), with a multi-line entry per file.
An entry has the following structure: The first line contains
the filename of the training image. The second line contains
the number of faces in that image. Then follows one line for
each face, consisting of a bounding box (x,y,w,h) and attributes
(blur, expression, illumination, invalid, occlusion, pose)
encoded numerically. In these lines, all numbers are separated
by spaces. Example:
0--Parade/0_Parade_marchingband_1_95.jpg
5
828 209 56 76 0 0 0 0 0 0
661 258 49 65 0 0 0 0 0 0
503 253 48 66 0 0 1 0 0 0
366 181 51 74 0 0 1 0 0 0
148 176 54 68 0 0 1 0 0 0
"""
self._annotations = {}
# check if annotations file exists
filename = None
if self._widerface_data is not None:
filename = os.path.join(self._widerface_data, 'wider_face_split',
'wider_face_train_bbx_gt.txt')
if not os.path.isfile(filename):
return # file not found
# load the annotations
try:
with open(filename, "r") as file:
for filename in file:
filename = filename.rstrip()
lines = int(file.readline())
faces = []
for line_number in range(lines):
# x1, y1, w, h, blur, expression, illumination,
# invalid, occlusion, pose
attributes = tuple(int(a)
for a in file.readline().split())
if len(attributes) == 10:
faces.append(attributes)
else:
LOG.warning("bad annotation for '%s', line %d/%d':"
"got %d instead of 10 values",
filename, line_number,
lines, len(attributes))
if lines == 0:
# images with 0 faces nevertheless have one
# line with dummy attributes -> just ignore that line
file.readline()
# Store all faces for the current file
self._annotations[filename] = faces
except FileNotFoundError:
self._annotations = {}
#
# Data
#
def _get_meta(self, data: Data, **kwargs) -> None:
data.add_attribute('label', batch=True)
super()._get_meta(data, **kwargs)
def _get_data_from_file(self, data, filename: str) -> str:
"""
Arguments
---------
filename: str
The relative filename.
"""
super()._get_data_from_file(data, filename)
regions = []
for (pos_x, pos_y, width, height, blur, expression, illumination,
invalid, occlusion, pose) in self._annotations[filename]:
region = Region(BoundingBox(x=pos_x, y=pos_y,
width=width, height=height),
blur=blur, expression=expression,
illumination=illumination,
invalid=invalid, occlusion=occlusion,
pose=pose)
regions.append(region)
data.label = regions
# FIXME[todo]
class W300(DataDirectory, Imagesource):
"""The 300 Faces In-the-Wild Challenge (300-W), form the ICCV 2013.
The challenge targets facial landmark detection, using a 68 point
annotation scheme.
Besides 300-W, there are several other datasets annotated in the
same scheme: AFW, FRGC, HELEN, IBUG, LPFW, and XM2VTS.
For more information visit:
https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod | [
" def _load_annotation(filename: str) -> Landmarks:"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Natural Language Toolkit: Discourse Processing
#
# Author: Ewan Klein <[email protected]>
#
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
# $Id: discourse.py 5783 2008-02-27 14:51:45Z ehk $
from nltk.sem import root_semrep, Expression
from nltk import parse
from nltk.inference import Mace, spacer, get_prover
from nltk.data import show_cfg
import os
"""
Module for incrementally developing simple discourses, and checking for semantic ambiguity,
consistency and informativeness.
Many of the ideas are based on the CURT family of programs of Blackburn and Bos
(see U{http://homepages.inf.ed.ac.uk/jbos/comsem/book1.html}).
Consistency checking is carried out by using the L{mace} module to call the Mace4 model builder.
Informativeness checking is carried out with a call to C{get_prover()} from
the L{inference} module.
C{DiscourseTester} is a constructor for discourses.
The basic data structure is a list of sentences, stored as C{self._sentences}. Each sentence in the list
is assigned a I{sentence ID} (C{sid}) of the form C{s}I{i}. For example::
s0: A boxer walks
s1: Every boxer chases a girl
Each sentence can be ambiguous between a number of readings, each of which receives a
I{reading ID} (C{rid}) of the form C{s}I{i} -C{r}I{j}. For example::
s0 readings:
------------------------------
s0-r1: some x.((boxer x) and (walk x))
s0-r0: some x.((boxerdog x) and (walk x))
A I{thread} is a list of readings, represented
as a list of C{rid}s. Each thread receives a I{thread ID} (C{tid}) of the form C{d}I{i}.
For example::
d0: ['s0-r0', 's1-r0']
The set of all threads for a discourse is the Cartesian product of all the readings of the sequences of sentences.
(This is not intended to scale beyond very short discourses!) The method L{readings(filter=True)} will only show
those threads which are consistent (taking into account any background assumptions).
"""
class DiscourseTester(object):
"""
Check properties of an ongoing discourse.
"""
def __init__(self, input, gramfile=None, background=None):
"""
Initialize a C{DiscourseTester}.
@parameter input: the discourse sentences
@type input: C{list} of C{str}
@parameter gramfile: name of file where grammar can be loaded
@type gramfile: C{str}
@parameter background: Formulas which express background assumptions
@type background: C{list} of L{logic.Expression}.
"""
self._input = input
self._sentences = dict([('s%s' % i, sent) for i, sent in enumerate(input)])
self._models = None
self._readings = {}
if gramfile is None:
self._gramfile = 'grammars/sem4.fcfg'
else:
self._gramfile = gramfile
self._threads = {}
self._filtered_threads = {}
self._parser = parse.load_earley(self._gramfile)
if background is not None:
for e in background:
assert isinstance(e, Expression)
self._background = background
else:
self._background = []
###############################
# Sentences
###############################
def sentences(self):
"""
Display the list of sentences in the current discourse.
"""
for id in sorted(self._sentences.keys()):
print "%s: %s" % (id, self._sentences[id])
def add_sentence(self, sentence, informchk=False, consistchk=False,):
"""
Add a sentence to the current discourse.
Updates C{self._input} and C{self._sentences}.
@parameter sentence: An input sentence
@type sentence: C{str}
@parameter informchk: if C{True}, check that the result of adding the sentence is thread-informative. Updates C{self._readings}.
@parameter consistchk: if C{True}, check that the result of adding the sentence is thread-consistent. Updates C{self._readings}.
"""
# check whether the new sentence is informative (i.e. not entailed by the previous discourse)
if informchk:
self.readings(quiet=True)
for tid in sorted(self._threads.keys()):
assumptions = [reading for (rid, reading) in self.expand_threads(tid)]
assumptions += self._background
for sent_reading in self._get_readings(sentence):
tp = get_prover(goal=sent_reading, assumptions=assumptions)
if tp.prove():
print "Sentence '%s' under reading '%s':" % (sentence, str(sent_reading.infixify()))
print "Not informative relative to thread '%s'" % tid
self._input.append(sentence)
self._sentences = dict([('s%s' % i, sent) for i, sent in enumerate(self._input)])
# check whether adding the new sentence to the discourse preserves consistency (i.e. a model can be found for the combined set of
# of assumptions
if consistchk:
self.readings(quiet=True)
self.models(show=False)
def retract_sentence(self, sentence, quiet=False):
"""
Remove a sentence from the current discourse.
Updates C{self._input}, C{self._sentences} and C{self._readings}.
@parameter sentence: An input sentence
@type sentence: C{str}
@parameter quiet: If C{False}, report on the updated list of sentences.
"""
self._input.remove(sentence)
self._sentences = dict([('s%s' % i, sent) for i, sent in enumerate(self._input)])
self.readings(quiet=True)
if not quiet:
print "Current sentences are "
for sent in self._sentences:
print sent
def grammar(self):
"""
Print out the grammar in use for parsing input sentences
"""
show_cfg(self._gramfile)
###############################
# Readings and Threads
###############################
def _get_readings(self, sentence):
"""
Build a list of semantic readings for a sentence.
@rtype: C{list} of L{logic.Expression}.
"""
tokens = sentence.split()
trees = self._parser.nbest_parse(tokens)
return [root_semrep(tree) for tree in trees]
def _construct_readings(self):
"""
Use C{self._sentences} to construct a value for C{self._readings}.
"""
for sid in self._sentences:
readings = self._get_readings(self._sentences[sid])
self._readings[sid] = dict([("%s-r%s" % (sid, rid), reading)
for rid, reading in enumerate(readings)])
def _construct_threads(self):
"""
Use C{self._readings} to construct a value for C{self._threads}
and use the model builder to construct a value for C{self._filtered_threads}
"""
thread_list = [[]]
for sid in sorted(self._readings.keys()):
thread_list = self.multiply(thread_list, sorted(self._readings[sid].keys()))
self._threads = dict([("d%s" % tid, thread) for tid, thread in enumerate(thread_list)])
# re-initialize the filtered threads
self._filtered_threads = {}
# keep the same ids, but only include threads which get models
for (tid, thread) in self._threads.items():
if (tid, True) in self._check_consistency(self._threads):
self._filtered_threads[tid] = thread
def _show_readings(self, sentence=None):
"""
Print out the readings for the discourse (or a single sentence).
"""
if sentence is not None:
print "The sentence '%s' has these readings:" % sentence
for r in [str(reading.infixify()) for reading in (self._get_readings(sentence))]:
print " %s" % r
else:
for sid in sorted(self._readings.keys()):
print
print '%s readings:' % sid
print '-' * 30
for rid in self._readings[sid]:
lf = str(self._readings[sid][rid].infixify())
print "%s: %s" % (rid, lf)
def _show_threads(self, filter=False):
"""
Print out the value of C{self._threads} or C{self._filtered_hreads}
"""
if filter:
threads = self._filtered_threads
else:
threads = self._threads
for tid in sorted(threads.keys()):
print "%s:" % tid, self._threads[tid]
def readings(self, sentence=None, threaded=False, quiet=False, filter=False):
"""
Construct and show the readings of the discourse (or of a single sentence).
@parameter sentence: test just this sentence
@type sentence: C{str}
@parameter threaded: if C{True}, print out each thread ID and the corresponding thread.
@parameter filter: if C{True}, only print out consistent thread IDs and threads.
"""
self._construct_readings()
self._construct_threads()
# if we are filtering, just show threads
if filter: threaded=True
if not quiet:
if not threaded:
self._show_readings(sentence=sentence)
else:
self._show_threads(filter=filter)
def expand_threads(self, thread_id, threads=None):
"""
Given a thread ID, find the list of L{logic.Expression}s corresponding to the reading IDs in that thread.
@parameter thread_id: thread ID
@type thread_id: C{str}
@parameter threads: a mapping from thread IDs to lists of reading IDs
@type threads: C{dict}
@return: A list of pairs (C{rid}, I{reading}) where I{reading} is the L{logic.Expression} associated with a reading ID
@rtype: C{list} of C{tuple}
"""
if threads is None:
threads = self._threads
return [(rid, self._readings[sid][rid]) for rid in threads[thread_id] for sid in rid.split('-')[:1]]
###############################
# Models and Background
###############################
def _check_consistency(self, threads, show=False, quiet=True):
results = []
for tid in sorted(threads.keys()):
assumptions = [reading for (rid, reading) in self.expand_threads(tid, threads=threads)]
assumptions += self._background
# if Mace4 finds a model, it always seems to find it quickly
mb = Mace('', assumptions, timeout=2)
modelfound = mb.build_model()
results.append((tid, modelfound))
if show:
spacer(80)
print "Model for Discourse Thread %s" % tid
spacer(80)
if not quiet:
for a in assumptions:
print a.infixify()
spacer(80)
if modelfound:
mb.show_model(format='cooked')
else:
print "No model found!\n"
return results
def models(self, thread_id=None, show=True, quiet=True):
"""
Call Mace4 to build a model for each current discourse thread.
@parameter thread_id: thread ID
@type thread_id: C{str}
@parameter show: If C{True}, display the model that has been found.
"""
self._construct_readings()
self._construct_threads()
if thread_id is None:
threads = self._threads
else:
threads = {thread_id: self._threads[thread_id]}
for (tid, modelfound) in self._check_consistency(threads, show=show, quiet=quiet):
idlist = [rid for rid in threads[tid]]
if not modelfound:
print "Inconsistent discourse %s %s:" % (tid, idlist)
for rid, reading in [(rid, str(reading.infixify())) for (rid, reading) in self.expand_threads(tid)]:
print " %s: %s" % (rid, reading)
print
else:
print "Consistent discourse: %s %s:" % (tid, idlist)
for rid, reading in [(rid, str(reading.infixify())) for (rid, reading) in self.expand_threads(tid)]:
print " %s: %s" % (rid, reading)
print
def add_background(self, background, quiet=False):
"""
Add a list of background assumptions for reasoning about the discourse.
When called, this method also updates the discourse model's set of readings and threads.
@parameter background: Formulas which contain background information | [
" @type background: C{list} of L{logic.Expression}."
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
import discord
from discord.ext import commands
import functools
import inspect
import re
import youtube_dl
from utilities import checks
def setup(bot):
bot.add_cog(Search(bot))
class Search(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Add commands as search subcommands
for name, command in inspect.getmembers(self):
if isinstance(command, commands.Command) and command.parent is None and name != "search":
self.bot.add_command(command)
self.search.add_command(command)
# Add search youtube / youtube (audio) search subcommands
command = commands.Command(self.youtube, aliases = ["yt"], checks = [checks.not_forbidden().predicate])
command.error(self.youtube_error)
self.search.add_command(command) | [
"\t\tif (cog := self.bot.get_cog(\"Audio\")) and (parent := getattr(cog, \"audio\")):"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# -*- coding: utf-8 -*-
"""
OSM Deviation Finder - Web Interface
~~~~~~~~~~~~~~~~~~~~
Implementation of a web interface for the OSM Deviation Finder library.
It uses the flask microframework by Armin Ronacher
For more information see https://github.com/mitsuhiko/flask/
To interact with the GeoServer REST API, the GeoServer configuration client library by boundlessgeo is used, see:
https://github.com/boundlessgeo/gsconfig
On the client side it uses jquery.js, leaflet.js, nprogress.js, DataTables and the UIKit framework,
for further information see the README.md file.
:copyright: (c) 2015 by Martin Hochenwarter
:license: MIT
"""
__author__ = 'Martin Hochenwarter'
__version__ = '0.1'
import os
import shutil
import zipfile
import uuid
import socket
from osmdeviationfinder import OSMDeviationfinder, HarmonizeOptions, LinematchOptions, ResultOptions
from osgeo import ogr
from web import app, db
from models import User, DevMap
from flask import json, request, Blueprint, jsonify, redirect, url_for, render_template, Response, abort, make_response
from flask.ext.login import (current_user, login_required)
from werkzeug.utils import secure_filename
from geoserver.catalog import Catalog
#: Blueprint for deviation finder specific functions
devmap = Blueprint('devmap', __name__, template_folder='templates')
DEBUG = True
UPLOAD_FOLDER = 'web/uploads/'
ALLOWED_EXTENSIONS = set(['zip', 'rar', 'json', 'osm'])
app.config['MAX_CONTENT_LENGTH'] = 3 * 1024 * 1024 # 32MB Upload-Limit
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
#: Database connection info
serverName = 'localhost'
database = 'odf'
port = '5432'
usr = 'martin'
pw = 'odf'
connectioninfo = "dbname='%s' host='%s' port='%s' user='%s' password='%s'" % (database, serverName, port, usr, pw)
#: GeoServer REST info
gs_url = 'http://localhost:8080/geoserver/'
gs_user = 'admin'
gs_password = 'geoserver'
gs_workspace = 'OSMDeviationMaps'
gs_store = 'osmdeviationmaps'
class Shapefile(object):
def __init__(self, name, ref, directory):
self.name = name
self.ref = ref
self.directory = directory
class ShapefileColumns(object):
def __init__(self, name):
self.name = name
@devmap.route('/upload', methods=['GET', 'POST'])
def upload_file():
"""This function handles the uid generation and zipfile (containing the shapefile) upload.
GET request: a redirect to the index site is made, where the user can upload a file.
POST request: first the file extions is validated, then a unique identifier is genereated for the current
upload. This uid is stored in the database and a directory is created using the uid,
in which the zip file gets extracted. After that the import to database site gets send to the user.
"""
if request.method == 'POST':
reffile = request.files['files[]']
if reffile and allowed_file(reffile.filename):
uid = str(uuid.uuid4())[:8] # str(uuid.uuid4()) #.hex
user = None
if current_user.is_authenticated():
user = current_user
#user = User.query.filter_by(username='Guest').first()
else:
user = User.query.filter_by(username='Guest').first()
dm = DevMap(uid, user)
db.session.add(dm)
db.session.commit()
filename = secure_filename(reffile.filename)
mapdir = os.path.join(app.config['UPLOAD_FOLDER'], uid)
os.makedirs(mapdir)
reffile.save(os.path.join(mapdir, filename))
archive = os.path.join(mapdir, filename)
zfile = zipfile.ZipFile(archive)
for name in zfile.namelist():
zfile.extract(name, mapdir)
os.remove(archive)
return url_for('devmap.import_to_db', uid=uid)
else:
return render_template('upload.html')
@devmap.route('/<uid>/import', methods=['GET', 'POST'])
def import_to_db(uid):
"""Function to import features from a layer of a shapefile into the database and calculation of the concavehull of
the features in the table to use as a bounding polygon for the OverpassAPI request.
GET request: The import site is returned, containing a set of the uploaded shapefiles.
The user can then choose the shapefile to import.
POST request: The chosen layer will be imported into a new table using the the function layer_to_db
from the OSMDeviationfinder class. This function will import the features and convert multigeometry features to
single geometry features. After a successful import, the concavehull of the imported data is generated using the
function get_concavehull of the OSMDeviationfinder class. The concavhull is saved for the current devmap in the
xy (for the OverpassAPI) and yx (for leaflet.js) representation. After that, the osm data download site is returned.
"""
error = None
fdir = os.path.join(app.config['UPLOAD_FOLDER'], uid)
fdata = dict()
if request.method == 'POST':
uid = uid.encode('ISO-8859-1')
fdata['datasource'] = request.form['source']
fdata['title'] = request.form['title']
fdata['datalicense'] = request.form['license']
fdata['shapefile'] = request.form['shapefile']
fdata['wmsformat'] = request.form['wmsformat']
fdata['wmsurl'] = request.form['wmsurl']
fdata['wmslayer'] = request.form['wmslayer']
if len(fdata['datasource']) < 4:
error = 'Please define a data source with at least 4 characters.'
if len(fdata['datalicense']) < 3:
error = 'Please a license with at least 2 characters.'
if len(fdata['title']) < 4:
error = 'Please define a title with at least 4 characters.'
if len(fdata['wmsurl']) > 1 or len(fdata['wmslayer']) > 1 or len(fdata['wmsformat']) > 1:
if not (fdata['wmsurl']) > 12 and len(fdata['wmslayer']) > 3 and len(fdata['wmsformat']) > 12:
error = 'All fields for a custom WMS Basemap have to be filled.'
if not 'image' in fdata['wmsformat']:
error = 'Please define a correct image format eg. image/jpeg'
else:
dm = DevMap.query.filter_by(title=fdata['title']).first()
if dm and dm.uid != uid:
error = 'The title "' + fdata['title'] + '" is already chosen. Please try another title.'
if fdata['shapefile'] == 'No Shapefile found!':
error = 'No shapefile was found.'
if error is None:
f = os.path.join(fdir, fdata['shapefile'])
tablename = 'odf_'+uid+'_ref'
shapefile = ogr.Open(f)
devfinder = OSMDeviationfinder(connectioninfo)
s = shapefile.GetLayerByIndex(0)
devfinder.layer_to_db(s, tablename, True)
concavehull = devfinder.get_concavehull(tablename)
dm = DevMap.query.filter_by(uid=uid).first()
if current_user.is_authenticated() and dm.owner == current_user or dm.owner == User.query.filter_by(
username='Guest').first():
boundsyx = {'type': "Feature", 'properties':
{'uid': uid, 'title': fdata['title'], 'author': dm.owner.username, 'source': fdata['datasource']},
'geometry': {'type': "Polygon", 'coordinates': [concavehull[1]['coordinates'][0]]}}
boundsxy = {'type': "Feature", 'properties':
{'uid': uid, 'title': fdata['title'], 'author': dm.owner.username, 'source': fdata['datasource']},
'geometry': {'type': "Polygon", 'coordinates': [concavehull[0]['coordinates'][0]]}}
dm.boundsxy = boundsxy
dm.boundsyx = boundsyx
dm.datasource = fdata['datasource']
dm.title = fdata['title']
dm.datalicense = fdata['datalicense']
dm.basemapwmsurl = fdata['wmsurl']
dm.basemapwmslayer = fdata['wmslayer']
dm.basemapwmsformat = fdata['wmsformat']
db.session.add(dm)
db.session.commit()
return redirect(url_for('devmap.osm_download', uid=uid))
shapefiles = []
for f in os.listdir(fdir):
if f.endswith(".shp") and not f.startswith('.'):
s = Shapefile(f, None, fdir)
shapefiles.append(s)
return render_template('import.html', shapefiles=shapefiles, uid=uid, error=error, fdata=fdata)
@devmap.route('/<uid>/osmdownload/', methods=['GET', 'POST'])
def osm_download(uid):
"""Function to download osm data.
GET request: The osmdownload site is returned, which shows the bounding polygon for the selected layer and a form to
choose the osm highway-types which should not be downloaded.
POST request: The selected options in the request form and the bounding polygon coordinates are transformed to
overpass query language. This data is used to call the osm_from_overpass function from the OSMDeviationfinder class,
which will make an OverpassAPI query and dowload the returned osm data and yield the progress of the download back,
which will be streamed to the client.
"""
uid = uid.encode('ISO-8859-1')
if request.method == 'POST':
fdir = os.path.join(app.config['UPLOAD_FOLDER'], uid)
f = os.path.join(fdir, str(uid)+'.osm')
typesquery = ''
for i in request.form:
typesquery = typesquery + '["highway"!="' + i + '"]'
dm = DevMap.query.filter_by(uid=uid).first()
bbox = json.dumps(dm.boundsxy)
bbox = bbox[bbox.find("[["):bbox.find("]]")+2].replace('[', '').replace(']', '').replace(',', '')
devfinder = OSMDeviationfinder(connectioninfo)
return Response(devfinder.osm_from_overpass(bbox, typesquery, f, uid),
mimetype='text/html')
return render_template('osmdownload.html', uid=uid)
@devmap.route('/<uid>/harmonize/', methods=['GET', 'POST'])
def harmonize(uid):
"""This function is used to show and handle the harmonization options and process.
GET request: Renders and returns a site showing harmonization options.
POST request: Gets the harmonization options from the user and creates an object of the HarmonizeOptions class
which holds the user's chosen and default options. The harmonize_datasets function from the OSMDeviationfinder class
is called with the HarmonizeOptions object as parameter. The harmonize_datasets function uses 'yield' to return the
progress, this is used to stream the progress to the client.
"""
uid = uid.encode('ISO-8859-1')
devfinder = OSMDeviationfinder(connectioninfo)
dm = DevMap.query.filter_by(uid=uid).first()
if request.method == 'POST':
devfinder.db_source = ogr.Open(devfinder.dbconnectioninfo_ogr, 1)
harmonization_options = HarmonizeOptions(uid)
#: Keep column osm_id while processing
harmonization_options.keepcolumns_t2 = {'osm_id': 'varchar'}
if 'azimuthdifftolerance' in request.form:
harmonization_options.azimuthdifftolerance = request.form['azimuthdifftolerance']
if 'maxcheckpointanglediff' in request.form:
harmonization_options.maxcheckpointanglediff = request.form['maxcheckpointanglediff']
if 'searchradius' in request.form:
harmonization_options.searchradius = request.form['searchradius']
if 'presplitref' in request.form:
harmonization_options.presplitref = True
if 'presplitosm' in request.form:
harmonization_options.presplitosm = True
if 'harmonize' in request.form:
harmonization_options.harmonize = True
if 'cleanref' in request.form:
harmonization_options.cleanref = True
if 'cleanosm' in request.form:
harmonization_options.cleanosm = True
if 'cleandistance' in request.form:
harmonization_options.cleanosmradius = request.form['cleandistance']
harmonization_options.cleanrefradius = request.form['cleandistance']
if 'streetnamecol' in request.form:
harmonization_options.streetnamecol = request.form['streetnamecol']
if harmonization_options.streetnamecol == 'NoNameCol':
devfinder.create_nonamecolumn('odf_'+uid+'_ref')
dm.basetable = harmonization_options.basetable
dm.harmonize = harmonization_options.harmonize
dm.reftable = harmonization_options.reftable
dm.osmtable = harmonization_options.osmtable
dm.streetnamecol = harmonization_options.streetnamecol
dm.outsuffix = harmonization_options.outsuffix
dm.keepcolumns_t1 = harmonization_options.keepcolumns_t1
dm.keepcolumns_t2 = harmonization_options.keepcolumns_t2
dm.cleanref = harmonization_options.cleanref
dm.cleanosm = harmonization_options.cleanosm
dm.cleanrefradius = harmonization_options.cleanrefradius
dm.cleanosmradius = harmonization_options.cleanosmradius
dm.presplitref = harmonization_options.presplitref
dm.presplitosm = harmonization_options.presplitosm
dm.searchradius = harmonization_options.searchradius
dm.azimuthdifftolerance = harmonization_options.azimuthdifftolerance
dm.maxcheckpointanglediff = harmonization_options.maxcheckpointanglediff
dm.max_roads_countdiff = harmonization_options.max_roads_countdiff
dm.max_azdiff = harmonization_options.max_azdiff
dm.max_distancediff = harmonization_options.max_distancediff
db.session.add(dm)
db.session.commit()
return Response(devfinder.harmonize_datasets(harmonization_options), mimetype='text/html')
namecolumns = devfinder.get_textcolumns('odf_'+uid+'_ref')
return render_template('harmonize.html', uid=uid, namecolumns=namecolumns, dm=dm)
@devmap.route('/<uid>/linematch/', methods=['GET', 'POST'])
def linematch(uid):
"""This function is used to show and handle the linematching options and process.
GET request: Renders and returns a site showing linematching options.
POST request: Gets the linematching options from the user and creates an object of the LinematchOptions class
which holds the user's chosen and default options. The linematch_datasets function from the OSMDeviationfinder class
is called with the LinematchOptions object as parameter. The linematch_datasets function uses 'yield' to return the
progress, this is used to stream the progress to the client.
"""
uid = uid.encode('ISO-8859-1')
dm = DevMap.query.filter_by(uid=uid).first()
if request.method == 'POST':
devfinder = OSMDeviationfinder(connectioninfo)
devfinder.db_source = ogr.Open(devfinder.dbconnectioninfo_ogr, 1)
linematch_options = LinematchOptions(uid)
linematch_options.keepcolumns_t2 = {'osm_id': 'varchar'}
if 'searchradius' in request.form:
linematch_options.searchradius = request.form['searchradius']
if 'maxpotentialmatches' in request.form:
linematch_options.maxpotentialmatches = request.form['maxpotentialmatches']
if 'minmatchingfeatlen' in request.form:
linematch_options.minmatchingfeatlen = request.form['minmatchingfeatlen']
if 'maxlengthdiffratio' in request.form:
linematch_options.maxlengthdiffratio = request.form['maxlengthdiffratio']
if 'maxanglediff' in request.form:
linematch_options.maxanglediff = request.form['maxanglediff']
if 'posdiffsegmentlength' in request.form:
linematch_options.posdiffsegmentlength = request.form['posdiffsegmentlength']
if 'hausdorffsegmentlength' in request.form:
linematch_options.hausdorffsegmentlength = request.form['hausdorffsegmentlength']
if 'maxazimuthdiff' in request.form:
linematch_options.maxazimuthdiff = request.form['maxazimuthdiff']
if 'maxmeanposdifftolengthratio' in request.form:
linematch_options.maxmeanposdifftolength = request.form['maxmeanposdifftolengthratio']
if 'minmeanposdifftolengthratio' in request.form:
linematch_options.minmeanposdifftolength = request.form['minmeanposdifftolengthratio']
if 'exportdevvec' in request.form:
linematch_options.deviationvectorlayer = True
else:
linematch_options.deviationvectorlayer = False
dm.searchradius2 = linematch_options.searchradius
dm.minmatchingfeatlen = linematch_options.minmatchingfeatlen
dm.maxlengthdiffratio = linematch_options.maxlengthdiffratio
dm.maxanglediff = linematch_options.maxanglediff
dm.maxpotentialmatches = linematch_options.maxpotentialmatches
dm.posdiffsegmentlength = linematch_options.posdiffsegmentlength
dm.hausdorffsegmentlength = linematch_options.hausdorffsegmentlength
dm.maxazimuthdiff = linematch_options.maxazimuthdiff
dm.maxmeanposdevtolength = linematch_options.maxmeanposdevtolength
dm.minmeanposdevtolength = linematch_options.minmeanposdevtolength
dm.maxabsolutmeanposdev = linematch_options.maxabsolutmeanposdev
dm.maxdeviation = linematch_options.maxdeviation
db.session.add(dm)
db.session.commit()
return Response(devfinder.linematch_datasets(linematch_options), mimetype='text/html')
return render_template('linematch.html', uid=uid, dm=dm)
@devmap.route('/<uid>/finished/', methods=['GET', 'POST'])
def finished(uid):
uid = uid.encode('ISO-8859-1')
if request.method == 'POST':
dm = DevMap.query.filter_by(uid=uid).first()
if dm.owner == current_user or dm.owner == User.query.filter_by(username='Guest').first():
title = request.form['title']
listedmap = False
if 'listed' in request.form:
listedmap = True
dm.title = title
dm.listed = listedmap
db.session.add(dm)
db.session.commit()
return render_template('finished.html', uid=uid)
else:
return render_template('finished.html', uid=uid, error="No User", dm=dm)
else:
dm = DevMap.query.filter_by(uid=uid).first()
if dm.owner == current_user or dm.owner == User.query.filter_by(username='Guest').first():
return render_template('finished.html', uid=uid, dm=dm)
else:
return redirect(url_for('devmap.index'))
@devmap.route('/<uid>/results/', methods=['GET', 'POST'])
def results(uid):
"""This function is used to show and handle the result generation options and process.
GET request: Renders and returns a site showing result generation options.
POST request: Gets the result generation options from the user and creates an object of the ResultOptions class
which holds the user's chosen and default options. The create_results function from the OSMDeviationfinder class
is called with the ResultOptions object as parameter. The create_results function uses 'yield' to return the
progress, this is used to stream the progress to the client.
"""
uid = uid.encode('ISO-8859-1')
if request.method == 'POST':
devfinder = OSMDeviationfinder(connectioninfo)
devfinder.db_source = ogr.Open(devfinder.dbconnectioninfo_ogr, 1)
result_options = ResultOptions(uid)
dm = DevMap.query.filter_by(uid=uid).first()
if current_user.is_authenticated() and dm.owner == current_user \
or dm.owner == User.query.filter_by(username='Guest').first():
if 'maxdevgrid' in request.form: | [
" result_options.maxdevgrid = True"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Transformer-based models."""
import typing
import flax
from flax import linen as nn
import jax.numpy as jnp
from language.gscan.xattn_model.model import layers
from language.gscan.xattn_model.model import model_utils
@flax.struct.dataclass
class TransformerConfig:
"""Global model hyperparameters."""
vocab_size: int
target_vocab_size: int
type_vocab_size: int = 2
dtype: typing.Any = jnp.float32
bi_hidden_dim: int = 128
l_hidden_dim: int = 128
v_hidden_dim: int = 128
l_intermediate_dim: int = 256
v_intermediate_dim: int = 256
bi_num_heads: int = 8
l_num_heads: int = 8
v_num_heads: int = 8
decode_num_heads: int = 8
l_num_layers: int = 6
v_num_layers: int = 6
bi_num_layers: int = 6
decode_num_layers: int = 6
max_position_embeddings: int = 512
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
hidden_act: layers.ActFn = nn.gelu
deterministic: bool = True
kernel_init: layers.InitFn = layers.default_kernel_init
bias_init: layers.InitFn = layers.default_bias_init
embedding_init: layers.InitFn = layers.default_embedding_init
layer_norm_eps: float = 1e-12
cross_attn: bool = True
num_conv_channels: int = 50
conv_kernel_sizes: typing.Sequence[int] = (1, 5, 7)
max_decode_step: int = 50
decode: bool = False
beam_size: int = 1
class CNNImageEncoder(nn.Module):
"""CNN-based image encoder."""
config: TransformerConfig
@nn.compact
def __call__(self, x):
cfg = self.config
feats = []
for i, kernel_size in enumerate(cfg.conv_kernel_sizes):
feat = nn.Conv(
cfg.num_conv_channels,
kernel_size=(kernel_size, kernel_size),
name=f'conv_{i}')(
x)
feats.append(feat)
img = jnp.concatenate(feats, axis=-1)
img = img.reshape(img.shape[0], -1, img.shape[-1])
img = nn.Dense(
cfg.v_hidden_dim,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
name='dense')(
img)
img = nn.relu(img)
img = nn.Dropout(rate=cfg.dropout_rate)(
img, deterministic=cfg.deterministic)
return img
class TransformerEncoder(nn.Module):
"""The generatic transformer-based input encoder.
It should be inherited with other transformer-based encoders, e.g. the
encoder with or without cross-modal attention.
"""
config: TransformerConfig
def encode_txt(self, batch):
cfg = self.config
x = batch['token']
mask = batch.get('txt_mask', jnp.ones(x.shape[:2], dtype=jnp.int32))
assert x.ndim == 2, 'Inputs shape must be (batch_size, seq_len).'
x = layers.TransformerEmbeddings(
hidden_size=cfg.l_hidden_dim,
vocab_size=cfg.vocab_size,
type_vocab_size=cfg.type_vocab_size,
max_position_embeddings=cfg.max_position_embeddings,
hidden_dropout_rate=cfg.dropout_rate,
layer_norm_eps=cfg.layer_norm_eps,
deterministic=cfg.deterministic,
embedding_init=cfg.embedding_init,
name='embeddings')(x, batch.get('pos_ids'), batch.get('seg_ids'))
mask = mask[:, None, None, :]
return x, mask
def encode_image(self, batch):
img = CNNImageEncoder(self.config, name='img_enc')(batch['image'])
img_mask = jnp.ones(img.shape[:2], dtype=jnp.int32)
img_mask = img_mask[:, None, None, :]
return img, img_mask
class CrossModalEncoder(TransformerEncoder):
"""Transformer-based encoder with cross-modal attention."""
config: TransformerConfig
@nn.compact
def __call__(self, batch):
cfg = self.config
txt, txt_mask = self.encode_txt(batch)
img, img_mask = self.encode_image(batch)
for i in range(cfg.bi_num_layers):
txt, img = layers.TransformerCrossLayer(
bi_num_heads=cfg.bi_num_heads,
bi_hidden_size=cfg.bi_hidden_dim,
hidden_size1=cfg.l_hidden_dim,
hidden_size2=cfg.v_hidden_dim,
intermediate_size1=cfg.l_intermediate_dim,
intermediate_size2=cfg.v_intermediate_dim,
attention_dropout_rate=cfg.attention_dropout_rate,
hidden_dropout_rate=cfg.dropout_rate,
layer_norm_eps=cfg.layer_norm_eps,
deterministic=cfg.deterministic,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
hidden_act=cfg.hidden_act,
name=f'transformer_cross_layer_{i}')(txt, img, txt_mask, img_mask)
encoded = jnp.concatenate((txt, img), axis=1)
encoded_mask = jnp.concatenate(
(txt_mask.squeeze(1).squeeze(1), img_mask.squeeze(1).squeeze(1)),
axis=1)
encoded = img
encoded_mask = img_mask.squeeze(1).squeeze(1)
return encoded, encoded_mask
class NonCrossModalEncoder(TransformerEncoder):
"""Transformer-based encoder without cross-modal attention."""
config: TransformerConfig
@nn.compact
def __call__(self, batch):
cfg = self.config
txt, txt_mask = self.encode_txt(batch)
img, img_mask = self.encode_image(batch)
for i in range(cfg.l_num_layers):
txt = layers.TransformerLayer(
num_heads=cfg.l_num_heads,
hidden_size=cfg.l_hidden_dim,
intermediate_size=cfg.l_intermediate_dim,
attention_dropout_rate=cfg.attention_dropout_rate,
hidden_dropout_rate=cfg.dropout_rate,
layer_norm_eps=cfg.layer_norm_eps,
deterministic=cfg.deterministic,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
hidden_act=cfg.hidden_act,
name=f'txt_transformer_layer_{i}')(
txt, txt, mask=txt_mask)
for i in range(cfg.v_num_layers):
img = layers.TransformerLayer(
num_heads=cfg.v_num_heads,
hidden_size=cfg.v_hidden_dim,
intermediate_size=cfg.v_intermediate_dim,
attention_dropout_rate=cfg.attention_dropout_rate,
hidden_dropout_rate=cfg.dropout_rate,
layer_norm_eps=cfg.layer_norm_eps,
deterministic=cfg.deterministic,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
hidden_act=cfg.hidden_act,
name=f'img_transformer_layer_{i}')(
img, img, mask=img_mask)
encoded = jnp.concatenate((txt, img), axis=1)
encoded_mask = jnp.concatenate(
(txt_mask.squeeze(1).squeeze(1), img_mask.squeeze(1).squeeze(1)),
axis=1)
return encoded, encoded_mask
class TransformerDecoder(nn.Module):
"""Transformer decoder."""
config: TransformerConfig
@nn.compact
def __call__(self,
x,
encoded,
pos_ids=None,
token_type_ids=None,
decoder_mask=None,
encoder_decoder_mask=None):
cfg = self.config
x = layers.TransformerEmbeddings(
hidden_size=cfg.l_hidden_dim,
vocab_size=cfg.target_vocab_size,
type_vocab_size=cfg.type_vocab_size,
max_position_embeddings=cfg.max_position_embeddings,
hidden_dropout_rate=cfg.dropout_rate,
layer_norm_eps=cfg.layer_norm_eps,
deterministic=cfg.deterministic,
embedding_init=cfg.embedding_init,
decode=cfg.decode,
name='embeddings')(x, pos_ids, token_type_ids)
for i in range(cfg.decode_num_layers):
x = layers.TransformerEncoderDecoderLayer(
num_heads=cfg.decode_num_heads,
hidden_size=cfg.l_hidden_dim,
intermediate_size=cfg.l_intermediate_dim,
attention_dropout_rate=cfg.attention_dropout_rate,
hidden_dropout_rate=cfg.dropout_rate,
layer_norm_eps=cfg.layer_norm_eps,
deterministic=cfg.deterministic,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
hidden_act=cfg.hidden_act,
decode=cfg.decode,
name=f'transformer_encoder_decoder_layer_{i}')(x, encoded,
decoder_mask,
encoder_decoder_mask)
x = nn.Dense(
cfg.target_vocab_size,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
name='dense')(
x)
return x
def get_attention_masks(self, inputs, targets):
cfg = self.config
if cfg.decode:
decoder_mask = None
encoder_decoder_mask = nn.make_attention_mask(
jnp.ones_like(targets) > 0, inputs > 0)
else:
decoder_mask = nn.combine_masks(
nn.make_attention_mask(targets > 0, targets > 0, dtype=cfg.dtype),
nn.make_causal_mask(targets, dtype=cfg.dtype))
encoder_decoder_mask = nn.make_attention_mask(
targets > 0, inputs > 0, dtype=cfg.dtype)
return decoder_mask, encoder_decoder_mask
class Model(nn.Module):
"""The main model class."""
| [
" config: TransformerConfig"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
V8 correctness fuzzer launcher script.
"""
import argparse
import hashlib
import itertools
import json
import os
import random
import re
import sys
import traceback
import v8_commands
import v8_suppressions
CONFIGS = dict(
default=[
'--suppress-asm-messages',
],
ignition=[
'--turbo-filter=~',
'--noopt',
'--suppress-asm-messages',
],
ignition_asm=[
'--turbo-filter=~',
'--noopt',
'--validate-asm',
'--stress-validate-asm',
'--suppress-asm-messages',
],
ignition_eager=[
'--turbo-filter=~',
'--noopt',
'--no-lazy',
'--no-lazy-inner-functions',
'--suppress-asm-messages',
],
ignition_turbo=[
'--suppress-asm-messages',
],
ignition_turbo_opt=[
'--always-opt',
'--suppress-asm-messages',
],
ignition_turbo_opt_eager=[
'--always-opt',
'--no-lazy',
'--no-lazy-inner-functions',
'--suppress-asm-messages',
],
slow_path=[
'--force-slow-path',
'--suppress-asm-messages',
],
slow_path_opt=[
'--always-opt',
'--force-slow-path',
'--suppress-asm-messages',
],
trusted=[
'--no-untrusted-code-mitigations',
'--suppress-asm-messages',
],
trusted_opt=[
'--always-opt',
'--no-untrusted-code-mitigations',
'--suppress-asm-messages',
],
)
# Additional flag experiments. List of tuples like
# (<likelihood to use flags in [0,1)>, <flag>).
ADDITIONAL_FLAGS = [
(0.1, '--stress-marking=100'), | [
" (0.1, '--stress-scavenge=100'),"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
A client for AWS Batch services
.. seealso::
- http://boto3.readthedocs.io/en/latest/guide/configuration.html
- http://boto3.readthedocs.io/en/latest/reference/services/batch.html
- https://docs.aws.amazon.com/batch/latest/APIReference/Welcome.html
"""
import warnings
from random import uniform
from time import sleep
from typing import Dict, List, Optional, Union
import botocore.client
import botocore.exceptions
import botocore.waiter
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.typing_compat import Protocol, runtime_checkable
@runtime_checkable
class BatchProtocol(Protocol):
"""
A structured Protocol for ``boto3.client('batch') -> botocore.client.Batch``.
This is used for type hints on :py:meth:`.BatchClient.client`; it covers
only the subset of client methods required.
.. seealso::
- https://mypy.readthedocs.io/en/latest/protocols.html
- http://boto3.readthedocs.io/en/latest/reference/services/batch.html
"""
def describe_jobs(self, jobs: List[str]) -> Dict:
"""
Get job descriptions from AWS Batch
:param jobs: a list of JobId to describe
:type jobs: List[str]
:return: an API response to describe jobs
:rtype: Dict
"""
...
def get_waiter(self, waiterName: str) -> botocore.waiter.Waiter:
"""
Get an AWS Batch service waiter
:param waiterName: The name of the waiter. The name should match
the name (including the casing) of the key name in the waiter
model file (typically this is CamelCasing).
:type waiterName: str
:return: a waiter object for the named AWS Batch service
:rtype: botocore.waiter.Waiter
.. note::
AWS Batch might not have any waiters (until botocore PR-1307 is released).
.. code-block:: python
import boto3
boto3.client("batch").waiter_names == []
.. seealso::
- https://boto3.amazonaws.com/v1/documentation/api/latest/guide/clients.html#waiters
- https://github.com/boto/botocore/pull/1307
"""
...
def submit_job(
self,
jobName: str,
jobQueue: str,
jobDefinition: str,
arrayProperties: Dict,
parameters: Dict,
containerOverrides: Dict,
tags: Dict,
) -> Dict:
"""
Submit a Batch job
:param jobName: the name for the AWS Batch job
:type jobName: str
:param jobQueue: the queue name on AWS Batch
:type jobQueue: str
:param jobDefinition: the job definition name on AWS Batch
:type jobDefinition: str
:param arrayProperties: the same parameter that boto3 will receive
:type arrayProperties: Dict
:param parameters: the same parameter that boto3 will receive
:type parameters: Dict
:param containerOverrides: the same parameter that boto3 will receive
:type containerOverrides: Dict
:param tags: the same parameter that boto3 will receive
:type tags: Dict
:return: an API response
:rtype: Dict
"""
...
def terminate_job(self, jobId: str, reason: str) -> Dict:
"""
Terminate a Batch job
:param jobId: a job ID to terminate
:type jobId: str
:param reason: a reason to terminate job ID
:type reason: str
:return: an API response | [
" :rtype: Dict"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import errno
import logging
import os
import subprocess
import warnings
from future import standard_library
standard_library.install_aliases()
from builtins import str
from collections import OrderedDict
from configparser import ConfigParser
# show Airflow's deprecation warnings
warnings.filterwarnings(
action='default', category=DeprecationWarning, module='airflow')
warnings.filterwarnings(
action='default', category=PendingDeprecationWarning, module='airflow')
class AirflowConfigException(Exception):
pass
try:
from cryptography.fernet import Fernet
except ImportError:
pass
def generate_fernet_key():
try:
FERNET_KEY = Fernet.generate_key().decode()
except NameError:
FERNET_KEY = "cryptography_not_found_storing_passwords_in_plain_text"
return FERNET_KEY
def expand_env_var(env_var):
"""
Expands (potentially nested) env vars by repeatedly applying
`expandvars` and `expanduser` until interpolation stops having
any effect.
"""
if not env_var:
return env_var
while True:
interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))
if interpolated == env_var:
return interpolated
else:
env_var = interpolated
def run_command(command):
"""
Runs command and returns stdout
"""
process = subprocess.Popen(
command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, stderr = process.communicate()
if process.returncode != 0:
raise AirflowConfigException(
"Cannot execute {}. Error code is: {}. Output: {}, Stderr: {}"
.format(command, process.returncode, output, stderr)
)
return output
defaults = {
'core': {
'unit_test_mode': False,
'parallelism': 32,
'load_examples': True,
'plugins_folder': None,
'security': None,
'donot_pickle': False,
'remote_base_log_folder': '',
'remote_log_conn_id': '',
'encrypt_s3_logs': False,
's3_log_folder': '', # deprecated!
'dag_concurrency': 16,
'max_active_runs_per_dag': 16,
'executor': 'SequentialExecutor',
'dags_are_paused_at_creation': True,
'sql_alchemy_pool_size': 5,
'sql_alchemy_pool_recycle': 3600,
'dagbag_import_timeout': 30,
'non_pooled_task_slot_count': 128,
},
'operators': {
'default_owner': 'airflow',
'default_cpus': 1,
'default_ram': 512,
'default_disk': 512,
'default_gpus': 0,
},
'webserver': {
'base_url': 'http://localhost:8080',
'web_server_host': '0.0.0.0',
'web_server_port': '8080',
'web_server_worker_timeout': 120,
'worker_refresh_batch_size': 1,
'worker_refresh_interval': 30,
'authenticate': False,
'filter_by_owner': False,
'owner_mode': 'user',
'demo_mode': False,
'secret_key': 'airflowified',
'expose_config': False,
'workers': 4,
'worker_class': 'sync',
'access_logfile': '',
'error_logfile': '',
},
'scheduler': {
'statsd_on': False,
'statsd_host': 'localhost',
'statsd_port': 8125,
'statsd_prefix': 'airflow',
'job_heartbeat_sec': 5,
'scheduler_heartbeat_sec': 60,
'authenticate': False,
'max_threads': 2,
'run_duration': 30 * 60,
'dag_dir_list_interval': 5 * 60,
'print_stats_interval': 30,
'min_file_process_interval': 180,
'child_process_log_directory': '/tmp/airflow/scheduler/logs'
},
'celery': {
'broker_url': 'sqla+mysql://airflow:airflow@localhost:3306/airflow',
'celery_app_name': 'airflow.executors.celery_executor',
'celery_result_backend': 'db+mysql://airflow:airflow@localhost:3306/airflow',
'celeryd_concurrency': 16,
'default_queue': 'default',
'flower_host': '0.0.0.0',
'flower_port': '5555',
'worker_log_server_port': '8793',
},
'email': {
'email_backend': 'airflow.utils.email.send_email_smtp',
},
'smtp': {
'smtp_starttls': True,
'smtp_ssl': False,
'smtp_user': '',
'smtp_password': '',
},
'kerberos': {
'ccache': '/tmp/airflow_krb5_ccache',
'principal': 'airflow', # gets augmented with fqdn
'reinit_frequency': '3600',
'kinit_path': 'kinit',
'keytab': 'airflow.keytab',
},
'github_enterprise': {
'api_rev': 'v3'
},
'admin': {
'hide_sensitive_variable_fields': True,
},
}
DEFAULT_CONFIG = """\
[core]
# The home folder for airflow, default is ~/airflow
airflow_home = {AIRFLOW_HOME}
# The folder where your airflow pipelines live, most likely a
# subfolder in a code repository
# This path must be absolute
dags_folder = {AIRFLOW_HOME}/dags
# The folder where airflow should store its log files
# This path must be absolute
base_log_folder = {AIRFLOW_HOME}/logs
# Airflow can store logs remotely in AWS S3 or Google Cloud Storage. Users
# must supply a remote location URL (starting with either 's3://...' or
# 'gs://...') and an Airflow connection id that provides access to the storage
# location.
remote_base_log_folder =
remote_log_conn_id =
# Use server-side encryption for logs stored in S3
encrypt_s3_logs = False
# deprecated option for remote log storage, use remote_base_log_folder instead!
# s3_log_folder =
# The executor class that airflow should use. Choices include
# SequentialExecutor, LocalExecutor, CeleryExecutor
executor = SequentialExecutor
# The SqlAlchemy connection string to the metadata database.
# SqlAlchemy supports many different database engine, more information
# their website
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/airflow.db
# The SqlAlchemy pool size is the maximum number of database connections
# in the pool.
sql_alchemy_pool_size = 5
# The SqlAlchemy pool recycle is the number of seconds a connection
# can be idle in the pool before it is invalidated. This config does
# not apply to sqlite.
sql_alchemy_pool_recycle = 3600
# The amount of parallelism as a setting to the executor. This defines
# the max number of task instances that should run simultaneously
# on this airflow installation
parallelism = 32
# The number of task instances allowed to run concurrently by the scheduler
dag_concurrency = 16
# Are DAGs paused by default at creation
dags_are_paused_at_creation = True
# When not using pools, tasks are run in the "default pool",
# whose size is guided by this config element
non_pooled_task_slot_count = 128
# The maximum number of active DAG runs per DAG
max_active_runs_per_dag = 16
# Whether to load the examples that ship with Airflow. It's good to
# get started, but you probably want to set this to False in a production
# environment
load_examples = True
# Where your Airflow plugins are stored
plugins_folder = {AIRFLOW_HOME}/plugins
# Secret key to save connection passwords in the db
fernet_key = {FERNET_KEY}
# Whether to disable pickling dags
donot_pickle = False
# How long before timing out a python file import while filling the DagBag
dagbag_import_timeout = 30
[operators]
# The default owner assigned to each new operator, unless
# provided explicitly or passed via `default_args`
default_owner = Airflow
default_cpus = 1
default_ram = 512
default_disk = 512
default_gpus = 0
[webserver]
# The base url of your website as airflow cannot guess what domain or
# cname you are using. This is used in automated emails that
# airflow sends to point links to the right web server
base_url = http://localhost:8080
# The ip specified when starting the web server
web_server_host = 0.0.0.0
# The port on which to run the web server
web_server_port = 8080
# Number of seconds the gunicorn webserver waits before timing out on a worker
web_server_worker_timeout = 120
# Number of workers to refresh at a time. When set to 0, worker refresh is
# disabled. When nonzero, airflow periodically refreshes webserver workers by
# bringing up new ones and killing old ones.
worker_refresh_batch_size = 1
# Number of seconds to wait before refreshing a batch of workers.
worker_refresh_interval = 30
# Secret key used to run your flask app
secret_key = temporary_key
# Number of workers to run the Gunicorn web server
workers = 4
# The worker class gunicorn should use. Choices include
# sync (default), eventlet, gevent
worker_class = sync
# Log files for the gunicorn webserver. '-' means log to stderr.
access_logfile = -
error_logfile = -
# Expose the configuration file in the web server
expose_config = true
# Set to true to turn on authentication:
# http://pythonhosted.org/airflow/installation.html#web-authentication
authenticate = False
# Filter the list of dags by owner name (requires authentication to be enabled)
filter_by_owner = False
# Filtering mode. Choices include user (default) and ldapgroup.
# Ldap group filtering requires using the ldap backend
#
# Note that the ldap server needs the "memberOf" overlay to be set up
# in order to user the ldapgroup mode.
owner_mode = user
[email]
email_backend = airflow.utils.email.send_email_smtp
[smtp]
# If you want airflow to send emails on retries, failure, and you want to use
# the airflow.utils.email.send_email_smtp function, you have to configure an smtp
# server here
smtp_host = localhost
smtp_starttls = True
smtp_ssl = False
smtp_user = airflow
smtp_port = 25
smtp_password = airflow
smtp_mail_from = [email protected]
[celery]
# This section only applies if you are using the CeleryExecutor in
# [core] section above
# The app name that will be used by celery
celery_app_name = airflow.executors.celery_executor
# The concurrency that will be used when starting workers with the
# "airflow worker" command. This defines the number of task instances that
# a worker will take, so size up your workers based on the resources on
# your worker box and the nature of your tasks
celeryd_concurrency = 16
# When you start an airflow worker, airflow starts a tiny web server
# subprocess to serve the workers local log files to the airflow main
# web server, who then builds pages and sends them to users. This defines
# the port on which the logs are served. It needs to be unused, and open
# visible from the main web server to connect into the workers.
worker_log_server_port = 8793
# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
# a sqlalchemy database. Refer to the Celery documentation for more
# information.
broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
# Another key Celery setting
celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
# it `airflow flower`. This defines the IP that Celery Flower runs on
flower_host = 0.0.0.0
# This defines the port that Celery Flower runs on
flower_port = 5555
# Default queue that tasks get assigned to and that worker listen on.
default_queue = default
[scheduler]
# Task instances listen for external kill signal (when you clear tasks
# from the CLI or the UI), this defines the frequency at which they should
# listen (in seconds).
job_heartbeat_sec = 5
# The scheduler constantly tries to trigger new tasks (look at the
# scheduler section in the docs for more information). This defines
# how often the scheduler should run (in seconds).
scheduler_heartbeat_sec = 5
# Statsd (https://github.com/etsy/statsd) integration settings
# statsd_on = False
# statsd_host = localhost
# statsd_port = 8125
# statsd_prefix = airflow
# The scheduler can run multiple threads in parallel to schedule dags.
# This defines how many threads will run. However airflow will never
# use more threads than the amount of cpu cores available.
max_threads = 2
[mesos]
# Mesos master address which MesosExecutor will connect to.
master = localhost:5050
# The framework name which Airflow scheduler will register itself as on mesos
framework_name = Airflow
# Number of cpu cores required for running one task instance using
# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
# command on a mesos slave
task_cpu = 1
# Memory in MB required for running one task instance using
# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
# command on a mesos slave
task_memory = 256
# Enable framework checkpointing for mesos
# See http://mesos.apache.org/documentation/latest/slave-recovery/
checkpoint = False
# Failover timeout in milliseconds.
# When checkpointing is enabled and this option is set, Mesos waits
# until the configured timeout for
# the MesosExecutor framework to re-register after a failover. Mesos
# shuts down running tasks if the
# MesosExecutor framework fails to re-register within this timeframe.
# failover_timeout = 604800
# Enable framework authentication for mesos
# See http://mesos.apache.org/documentation/latest/configuration/
authenticate = False
# Mesos credentials, if authentication is enabled
# default_principal = admin
# default_secret = admin
[admin]
# UI to hide sensitive variable fields when set to True
hide_sensitive_variable_fields = True
"""
TEST_CONFIG = """\
[core]
airflow_home = {AIRFLOW_HOME}
dags_folder = {TEST_DAGS_FOLDER}
base_log_folder = {AIRFLOW_HOME}/logs
executor = SequentialExecutor
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/unittests.db
unit_test_mode = True
load_examples = True
donot_pickle = False
dag_concurrency = 16
dags_are_paused_at_creation = False
fernet_key = {FERNET_KEY}
non_pooled_task_slot_count = 128
[operators]
default_owner = airflow
[webserver]
base_url = http://localhost:8080
web_server_host = 0.0.0.0
web_server_port = 8080
[email]
email_backend = airflow.utils.email.send_email_smtp
[smtp]
smtp_host = localhost
smtp_user = airflow
smtp_port = 25
smtp_password = airflow
smtp_mail_from = [email protected]
[celery]
celery_app_name = airflow.executors.celery_executor
celeryd_concurrency = 16
worker_log_server_port = 8793
broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
flower_host = 0.0.0.0
flower_port = 5555
default_queue = default
[scheduler]
job_heartbeat_sec = 1
scheduler_heartbeat_sec = 5
authenticate = true
max_threads = 2
"""
class ConfigParserWithDefaults(ConfigParser):
# These configuration elements can be fetched as the stdout of commands
# following the "{section}__{name}__cmd" pattern, the idea behind this is to not
# store password on boxes in text files.
as_command_stdout = {
('core', 'sql_alchemy_conn'),
('core', 'fernet_key'),
('celery', 'broker_url'),
('celery', 'celery_result_backend')
}
def __init__(self, defaults, *args, **kwargs):
self.defaults = defaults
ConfigParser.__init__(self, *args, **kwargs)
self.is_validated = False
def _validate(self):
if (
self.get("core", "executor") != 'SequentialExecutor' and
"sqlite" in self.get('core', 'sql_alchemy_conn')):
raise AirflowConfigException("error: cannot use sqlite with the {}".
format(self.get('core', 'executor')))
elif (
self.getboolean("webserver", "authenticate") and
self.get("webserver", "owner_mode") not in ['user', 'ldapgroup']
):
raise AirflowConfigException("error: owner_mode option should be either "
"'user' or 'ldapgroup' "
"when filtering by owner is set")
elif (
self.getboolean("webserver", "authenticate") and
self.get("webserver", "owner_mode").lower() == 'ldapgroup' and
self.get("core", "auth_backend") != 'airflow.contrib.auth.backends.ldap_auth'
):
raise AirflowConfigException("error: attempt at using ldapgroup "
"filtering without using the Ldap backend")
self.is_validated = True
def _get_env_var_option(self, section, key):
# must have format AIRFLOW__{SECTION}__{KEY} (note double underscore)
env_var = 'AIRFLOW__{S}__{K}'.format(S=section.upper(), K=key.upper())
if env_var in os.environ:
return expand_env_var(os.environ[env_var])
def _get_cmd_option(self, section, key):
fallback_key = key + '_cmd'
if (
(section, key) in ConfigParserWithDefaults.as_command_stdout and
self.has_option(section, fallback_key)):
command = self.get(section, fallback_key)
return run_command(command)
def get(self, section, key, **kwargs):
section = str(section).lower()
key = str(key).lower()
d = self.defaults
# first check environment variables
option = self._get_env_var_option(section, key)
if option:
return option
# ...then the config file
if self.has_option(section, key):
return expand_env_var(
ConfigParser.get(self, section, key, **kwargs))
# ...then commands
option = self._get_cmd_option(section, key)
if option:
return option
# ...then the defaults
if section in d and key in d[section]:
return expand_env_var(d[section][key])
else:
logging.warn("section/key [{section}/{key}] not found "
"in config".format(**locals()))
raise AirflowConfigException(
"section/key [{section}/{key}] not found "
"in config".format(**locals()))
def getboolean(self, section, key):
val = str(self.get(section, key)).lower().strip()
if '#' in val:
val = val.split('#')[0].strip()
if val == "true":
return True
elif val == "false":
return False
else:
raise AirflowConfigException("Not a boolean.")
def getint(self, section, key):
return int(self.get(section, key))
def getfloat(self, section, key):
return float(self.get(section, key))
def read(self, filenames):
ConfigParser.read(self, filenames)
self._validate()
def as_dict(self, display_source=False, display_sensitive=False):
"""
Returns the current configuration as an OrderedDict of OrderedDicts.
:param display_source: If False, the option value is returned. If True,
a tuple of (option_value, source) is returned. Source is either
'airflow.cfg' or 'default'.
:type display_source: bool
:param display_sensitive: If True, the values of options set by env
vars and bash commands will be displayed. If False, those options
are shown as '< hidden >'
:type display_sensitive: bool
"""
cfg = copy.deepcopy(self._sections)
# remove __name__ (affects Python 2 only)
for options in cfg.values():
options.pop('__name__', None)
# add source
if display_source:
for section in cfg:
for k, v in cfg[section].items():
cfg[section][k] = (v, 'airflow.cfg')
# add env vars and overwrite because they have priority
for ev in [ev for ev in os.environ if ev.startswith('AIRFLOW__')]:
try:
_, section, key = ev.split('__')
opt = self._get_env_var_option(section, key)
except ValueError:
opt = None
if opt:
if not display_sensitive:
opt = '< hidden >'
if display_source:
opt = (opt, 'env var')
cfg.setdefault(section.lower(), OrderedDict()).update(
{key.lower(): opt})
# add bash commands
for (section, key) in ConfigParserWithDefaults.as_command_stdout:
opt = self._get_cmd_option(section, key)
if opt:
if not display_sensitive:
opt = '< hidden >'
if display_source:
opt = (opt, 'bash cmd')
cfg.setdefault(section, OrderedDict()).update({key: opt})
# add defaults
for section in sorted(self.defaults):
for key in sorted(self.defaults[section].keys()):
if key not in cfg.setdefault(section, OrderedDict()):
opt = str(self.defaults[section][key])
if display_source:
cfg[section][key] = (opt, 'default')
else:
cfg[section][key] = opt
return cfg
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise AirflowConfigException('Had trouble creating a directory')
# Setting AIRFLOW_HOME and AIRFLOW_CONFIG from environment variables, using
# "~/airflow" and "~/airflow/airflow.cfg" respectively as defaults.
if 'AIRFLOW_HOME' not in os.environ:
AIRFLOW_HOME = expand_env_var('~/airflow')
else:
AIRFLOW_HOME = expand_env_var(os.environ['AIRFLOW_HOME'])
mkdir_p(AIRFLOW_HOME)
if 'AIRFLOW_CONFIG' not in os.environ:
if os.path.isfile(expand_env_var('~/airflow.cfg')):
AIRFLOW_CONFIG = expand_env_var('~/airflow.cfg')
else:
AIRFLOW_CONFIG = AIRFLOW_HOME + '/airflow.cfg'
else:
AIRFLOW_CONFIG = expand_env_var(os.environ['AIRFLOW_CONFIG'])
# Set up dags folder for unit tests
# this directory won't exist if users install via pip
_TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))), | [
" 'tests',"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring, long, unicode
import functools
from collections import Mapping
from datetime import datetime
from sqlalchemy import extract, func
from sqlalchemy.orm import synonym
from sqlalchemy.ext.hybrid import Comparator, hybrid_property
from flexget.manager import Session
from flexget.utils import qualities, json
from flexget.entry import Entry
def with_session(*args, **kwargs):
""""
A decorator which creates a new session if one was not passed via keyword argument to the function.
Automatically commits and closes the session if one was created, caller is responsible for commit if passed in.
| [
" If arguments are given when used as a decorator, they will automatically be passed to the created Session when"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All KeyTypes and which languages support them."""
from typing import List
from tink import aead
from tink import daead
from tink import hybrid
from tink import jwt
from tink import mac
from tink import prf
from tink import signature
from tink import streaming_aead
from tink.proto import tink_pb2
# All languages supported by cross-language tests.
ALL_LANGUAGES = ['cc', 'java', 'go', 'python']
# All KeyTypes (without the prefix 'type.googleapis.com/google.crypto.tink.')
AEAD_KEY_TYPES = [
'AesEaxKey',
'AesGcmKey',
'AesGcmSivKey',
'AesCtrHmacAeadKey',
'ChaCha20Poly1305Key',
'XChaCha20Poly1305Key',
'KmsAeadKey',
'KmsEnvelopeAeadKey',
]
DAEAD_KEY_TYPES = ['AesSivKey']
STREAMING_AEAD_KEY_TYPES = [
'AesCtrHmacStreamingKey',
'AesGcmHkdfStreamingKey',
]
HYBRID_PRIVATE_KEY_TYPES = ['EciesAeadHkdfPrivateKey', 'HpkePrivateKey']
MAC_KEY_TYPES = [
'AesCmacKey',
'HmacKey',
]
SIGNATURE_KEY_TYPES = [
'EcdsaPrivateKey',
'Ed25519PrivateKey',
'RsaSsaPkcs1PrivateKey',
'RsaSsaPssPrivateKey',
]
PRF_KEY_TYPES = [
'AesCmacPrfKey',
'HmacPrfKey',
'HkdfPrfKey',
]
JWT_MAC_KEY_TYPES = [
'JwtHmacKey',
]
JWT_SIGNATURE_KEY_TYPES = [
'JwtEcdsaPrivateKey',
'JwtRsaSsaPkcs1PrivateKey',
'JwtRsaSsaPssPrivateKey',
]
ALL_KEY_TYPES = (
AEAD_KEY_TYPES + DAEAD_KEY_TYPES + STREAMING_AEAD_KEY_TYPES +
HYBRID_PRIVATE_KEY_TYPES + MAC_KEY_TYPES + SIGNATURE_KEY_TYPES +
PRF_KEY_TYPES + JWT_MAC_KEY_TYPES + JWT_SIGNATURE_KEY_TYPES)
# All languages that are supported by a KeyType
SUPPORTED_LANGUAGES = {
'AesEaxKey': ['cc', 'java', 'python'],
'AesGcmKey': ['cc', 'java', 'go', 'python'],
'AesGcmSivKey': ['cc', 'go', 'python'],
'AesCtrHmacAeadKey': ['cc', 'java', 'go', 'python'],
'ChaCha20Poly1305Key': ['java', 'go'],
'XChaCha20Poly1305Key': ['cc', 'java', 'go', 'python'],
'KmsAeadKey': ['cc', 'java', 'python'],
'KmsEnvelopeAeadKey': ['cc', 'java', 'go', 'python'],
'AesSivKey': ['cc', 'java', 'go', 'python'],
'AesCtrHmacStreamingKey': ['cc', 'java', 'go', 'python'],
'AesGcmHkdfStreamingKey': ['cc', 'java', 'go', 'python'],
'EciesAeadHkdfPrivateKey': ['cc', 'java', 'go', 'python'],
'HpkePrivateKey': ['cc', 'java', 'python'],
'AesCmacKey': ['cc', 'java', 'go', 'python'],
'HmacKey': ['cc', 'java', 'go', 'python'],
'EcdsaPrivateKey': ['cc', 'java', 'go', 'python'],
'Ed25519PrivateKey': ['cc', 'java', 'go', 'python'],
'RsaSsaPkcs1PrivateKey': ['cc', 'java', 'python'],
'RsaSsaPssPrivateKey': ['cc', 'java', 'python'],
'AesCmacPrfKey': ['cc', 'java', 'go', 'python'],
'HmacPrfKey': ['cc', 'java', 'go', 'python'],
'HkdfPrfKey': ['cc', 'java', 'go', 'python'],
'JwtHmacKey': ['cc', 'java', 'python'],
'JwtEcdsaPrivateKey': ['cc', 'java', 'python'],
'JwtRsaSsaPkcs1PrivateKey': ['cc', 'java', 'python'],
'JwtRsaSsaPssPrivateKey': ['cc', 'java', 'python'],
}
KEY_TYPE_FROM_URL = {
'type.googleapis.com/google.crypto.tink.' + key_type: key_type
for key_type in ALL_KEY_TYPES}
# For each KeyType, a list of Tinkey KeyTemplate names.
# TODO(juerg): Add missing key template names, and remove deprecated names.
KEY_TEMPLATE_NAMES = {
'AesEaxKey': [
'AES128_EAX', 'AES128_EAX_RAW', 'AES256_EAX', 'AES256_EAX_RAW'
],
'AesGcmKey': [
'AES128_GCM', 'AES128_GCM_RAW', 'AES256_GCM', 'AES256_GCM_RAW'
],
'AesGcmSivKey': [
'AES128_GCM_SIV', 'AES128_GCM_SIV_RAW', 'AES256_GCM_SIV',
'AES256_GCM_SIV_RAW'
],
'AesCtrHmacAeadKey': [
'AES128_CTR_HMAC_SHA256', 'AES128_CTR_HMAC_SHA256_RAW',
'AES256_CTR_HMAC_SHA256', 'AES256_CTR_HMAC_SHA256_RAW'
],
'ChaCha20Poly1305Key': ['CHACHA20_POLY1305', 'CHACHA20_POLY1305_RAW'],
'XChaCha20Poly1305Key': ['XCHACHA20_POLY1305', 'XCHACHA20_POLY1305_RAW'],
'KmsAeadKey': [],
'KmsEnvelopeAeadKey': [],
'AesSivKey': ['AES256_SIV'],
'AesCtrHmacStreamingKey': [
'AES128_CTR_HMAC_SHA256_4KB',
'AES128_CTR_HMAC_SHA256_1MB',
'AES256_CTR_HMAC_SHA256_4KB',
'AES256_CTR_HMAC_SHA256_1MB',
],
'AesGcmHkdfStreamingKey': [
'AES128_GCM_HKDF_4KB',
'AES128_GCM_HKDF_1MB',
'AES256_GCM_HKDF_4KB',
'AES256_GCM_HKDF_1MB',
],
'EciesAeadHkdfPrivateKey': [
'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM',
'ECIES_P256_COMPRESSED_HKDF_HMAC_SHA256_AES128_GCM',
'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256',
'ECIES_P256_COMPRESSED_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256',
],
'HpkePrivateKey': [
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_128_GCM',
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_128_GCM_RAW',
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_256_GCM',
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_256_GCM_RAW',
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_CHACHA20_POLY1305',
'DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_CHACHA20_POLY1305_RAW',
],
'AesCmacKey': ['AES_CMAC'],
'HmacKey': [
'HMAC_SHA256_128BITTAG', 'HMAC_SHA256_256BITTAG',
'HMAC_SHA512_256BITTAG', 'HMAC_SHA512_512BITTAG'
],
'EcdsaPrivateKey': [
'ECDSA_P256', 'ECDSA_P256_RAW', 'ECDSA_P384', 'ECDSA_P384_SHA384',
'ECDSA_P384_SHA512', 'ECDSA_P521', 'ECDSA_P256_IEEE_P1363',
'ECDSA_P384_IEEE_P1363', 'ECDSA_P384_SHA384_IEEE_P1363',
'ECDSA_P521_IEEE_P1363'
], | [
" 'Ed25519PrivateKey': ['ED25519'],"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
import pytest
import numba
import os
import numpy as np
from numpy.testing import assert_array_equal
from umap import distances as dist
benchmark_only = pytest.mark.skipif(
"BENCHARM_TEST" not in os.environ, reason="Benchmark tests skipped"
)
# Constants for benchmark
WARMUP_ROUNDS = 5
ITERATIONS = 10
ROUNDS = 10
# --------
# Fixtures
# --------
@pytest.fixture(scope="function")
def stashed_previous_impl_for_regression_test():
@numba.njit(parallel=True, nogil=True)
def stashed_chunked_parallel_special_metric(
X, Y=None, metric=dist.named_distances["hellinger"], chunk_size=16
):
if Y is None:
row_size = col_size = X.shape[0]
else:
row_size = X.shape[0]
col_size = Y.shape[0]
result = np.zeros((row_size, col_size), dtype=np.float32)
if Y is None:
size = X.shape[0]
n_row_chunks = (size // chunk_size) + 1
for chunk_idx in numba.prange(n_row_chunks):
n = chunk_idx * chunk_size
chunk_end_n = min(n + chunk_size, size)
for m in range(n, size, chunk_size):
chunk_end_m = min(m + chunk_size, size)
if n == m:
for i in range(n, chunk_end_n):
for j in range(m, chunk_end_m):
if j > i:
d = metric(X[i], X[j])
result[i, j] = d
result[j, i] = d
else:
for i in range(n, chunk_end_n):
for j in range(m, chunk_end_m):
d = metric(X[i], X[j])
result[i, j] = d
result[j, i] = d
else:
n_row_chunks = (row_size // chunk_size) + 1
for chunk_idx in numba.prange(n_row_chunks):
n = chunk_idx * chunk_size
chunk_end_n = min(n + chunk_size, row_size)
for m in range(0, col_size, chunk_size):
chunk_end_m = min(m + chunk_size, col_size)
for i in range(n, chunk_end_n):
for j in range(m, chunk_end_m):
d = metric(X[i], Y[j])
result[i, j] = d
return result
return stashed_chunked_parallel_special_metric
@pytest.fixture(scope="function")
def workaround_590_impl():
@numba.njit(parallel=True, nogil=True)
def chunked_parallel_special_metric(
X, Y=None, metric=dist.named_distances["hellinger"], chunk_size=16
):
if Y is None:
size = X.shape[0]
result = np.zeros((size, size), dtype=np.float32)
n_row_chunks = (size // chunk_size) + 1
for chunk_idx in numba.prange(n_row_chunks):
n = chunk_idx * chunk_size
chunk_end_n = min(n + chunk_size, size)
for m in range(n, size, chunk_size):
chunk_end_m = min(m + chunk_size, size)
if n == m:
for i in range(n, chunk_end_n):
for j in range(m, chunk_end_m):
if j > i:
d = metric(X[i], X[j])
result[i, j] = d
result[j, i] = d
else:
for i in range(n, chunk_end_n):
for j in range(m, chunk_end_m):
d = metric(X[i], X[j])
result[i, j] = d
result[j, i] = d
return result
row_size = X.shape[0]
col_size = Y.shape[0]
result = np.zeros((row_size, col_size), dtype=np.float32)
n_row_chunks = (row_size // chunk_size) + 1
for chunk_idx in numba.prange(n_row_chunks):
n = chunk_idx * chunk_size
chunk_end_n = min(n + chunk_size, row_size)
for m in range(0, col_size, chunk_size):
chunk_end_m = min(m + chunk_size, col_size)
for i in range(n, chunk_end_n):
for j in range(m, chunk_end_m):
d = metric(X[i], Y[j])
result[i, j] = d
return result
return chunked_parallel_special_metric
@pytest.fixture(scope="function")
def benchmark_data(request):
shape = request.param
spatial_data = np.random.randn(*shape).astype(np.float32)
return np.abs(spatial_data)
# ---------------------------------------------------------------
@benchmark_only
def test_chunked_parallel_alternative_implementations(
spatial_data, workaround_590_impl
):
# Base tests that must pass!
dist_matrix_x = workaround_590_impl(np.abs(spatial_data[:-2]))
dist_matrix_xy = workaround_590_impl(
np.abs(spatial_data[:-2]), np.abs(spatial_data[:-2])
)
dist_matrix_x_full = dist.chunked_parallel_special_metric(np.abs(spatial_data[:-2]))
dist_matrix_xy_full = dist.chunked_parallel_special_metric(
np.abs(spatial_data[:-2]), np.abs(spatial_data[:-2])
)
assert_array_equal(
dist_matrix_x_full,
dist_matrix_x,
err_msg="Distances don't match for metric hellinger",
)
assert_array_equal(
dist_matrix_xy_full,
dist_matrix_xy,
err_msg="Distances don't match for metric hellinger",
)
@benchmark_only
def test_chunked_parallel_special_metric_implementation_hellinger(
spatial_data,
stashed_previous_impl_for_regression_test,
):
# Base tests that must pass!
dist_matrix_x = dist.chunked_parallel_special_metric(np.abs(spatial_data[:-2]))
dist_matrix_xy = dist.chunked_parallel_special_metric(
np.abs(spatial_data[:-2]), np.abs(spatial_data[:-2])
)
test_matrix = np.array(
[
[
dist.hellinger_grad(np.abs(spatial_data[i]), np.abs(spatial_data[j]))[0]
for j in range(spatial_data.shape[0] - 2)
]
for i in range(spatial_data.shape[0] - 2)
]
).astype(np.float32)
assert_array_equal(
test_matrix,
dist_matrix_x,
err_msg="Distances don't match for metric hellinger",
)
assert_array_equal(
test_matrix,
dist_matrix_xy,
err_msg="Distances don't match for metric hellinger",
)
# Test to compare chunked_parallel different implementations
dist_x_stashed = stashed_previous_impl_for_regression_test(
np.abs(spatial_data[:-2])
)
dist_xy_stashed = stashed_previous_impl_for_regression_test(
np.abs(spatial_data[:-2]), np.abs(spatial_data[:-2])
)
assert_array_equal(
dist_xy_stashed,
dist_matrix_xy,
err_msg="Distances don't match between stashed and current chunked_parallel implementations with X and Y!",
)
assert_array_equal(
dist_x_stashed,
dist_matrix_x,
err_msg="Distances don't match between stashed and current chunked_parallel implementations with X only!",
)
# test hellinger on different X and Y Pair
spatial_data_two = np.random.randn(10, 20)
dist_stashed_diff_pair = stashed_previous_impl_for_regression_test(
np.abs(spatial_data[:-2]), spatial_data_two
)
dist_chunked_diff_pair = dist.chunked_parallel_special_metric(
np.abs(spatial_data[:-2]), spatial_data_two
)
assert_array_equal(
dist_stashed_diff_pair,
dist_chunked_diff_pair,
err_msg="Distances don't match between stashed and current chunked_parallel implementations",
)
# ----------------------------
# 1st Group Benchmark: X only
# (Worst Case)
# ----------------------------
@benchmark_only
@pytest.mark.benchmark(
group="benchmark_single_param",
)
@pytest.mark.parametrize(
"benchmark_data",
[(10 * s, 10 * s) for s in list(range(0, 101, 10))[1:]],
indirect=["benchmark_data"],
)
def test_benchmark_chunked_parallel_special_metric_x_only(
benchmark,
benchmark_data,
):
# single argument
benchmark.pedantic(
dist.chunked_parallel_special_metric,
kwargs={"X": benchmark_data, "Y": None},
warmup_rounds=WARMUP_ROUNDS,
iterations=ITERATIONS,
rounds=ROUNDS,
)
@benchmark_only
@pytest.mark.benchmark(
group="benchmark_single_param",
)
@pytest.mark.parametrize(
"benchmark_data",
[(10 * s, 10 * s) for s in list(range(0, 101, 10))[1:]],
indirect=["benchmark_data"],
)
def test_benchmark_workaround_590_x_only(
benchmark,
benchmark_data,
workaround_590_impl,
):
# single argument | [
" benchmark.pedantic("
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# -*- coding: utf-8 -*-
""" This module contains classes:
HostsEntry:
A representation of a hosts file entry, i.e. a line containing an IP address
and name(s), a comment, or a blank line/line separator.
Hosts:
A representation of a hosts file, e.g. /etc/hosts and
c:\\\\windows\\\\system32\\\\drivers\\\\etc\\\\hosts for a linux or MS windows
based machine respectively. Each entry being represented as an instance
of the HostsEntry class.
"""
import sys
try:
from urllib.request import urlopen
except ImportError: # pragma: no cover
from urllib2 import urlopen
from python_hosts.utils import (is_ipv4, is_ipv6, is_readable, valid_hostnames,
dedupe_list)
from python_hosts.exception import (InvalidIPv6Address, InvalidIPv4Address,
UnableToWriteHosts)
class HostsEntry(object):
""" An entry in a hosts file. """
__slots__ = ['entry_type', 'address', 'comment', 'names']
def __init__(self,
entry_type=None,
address=None,
comment=None,
names=None):
"""
Initialise an instance of a Hosts file entry
:param entry_type: ipv4 | ipv6 | comment | blank
:param address: The ipv4 or ipv6 address belonging to the instance
:param comment: The comment belonging to the instance
:param names: The names that resolve to the specified address
:return: None
"""
if not entry_type or entry_type not in ('ipv4',
'ipv6',
'comment',
'blank'):
raise Exception('entry_type invalid or not specified')
if entry_type == 'comment' and not comment:
raise Exception('entry_type comment supplied without value.')
if entry_type == 'ipv4':
if not all((address, names)):
raise Exception('Address and Name(s) must be specified.')
if not is_ipv4(address):
raise InvalidIPv4Address()
if entry_type == 'ipv6':
if not all((address, names)):
raise Exception('Address and Name(s) must be specified.')
if not is_ipv6(address):
raise InvalidIPv6Address()
self.entry_type = entry_type
self.address = address
self.comment = comment
self.names = names
def is_real_entry(self):
return self.entry_type in ('ipv4', 'ipv6')
def __repr__(self):
return "HostsEntry(entry_type=\'{0}\', address=\'{1}\', " \
"comment={2}, names={3})".format(self.entry_type,
self.address,
self.comment,
self.names)
def __str__(self):
if self.entry_type in ('ipv4', 'ipv6'):
return "TYPE={0}, ADDR={1}, NAMES={2}".format(self.entry_type,
self.address,
" ".join(self.names))
elif self.entry_type == 'comment':
return "TYPE = {0}, COMMENT = {1}".format(self.entry_type, self.comment)
elif self.entry_type == 'blank':
return "TYPE = {0}".format(self.entry_type)
@staticmethod
def get_entry_type(hosts_entry=None):
"""
Return the type of entry for the line of hosts file passed
:param hosts_entry: A line from the hosts file
:return: 'comment' | 'blank' | 'ipv4' | 'ipv6'
"""
if hosts_entry and isinstance(hosts_entry, str):
entry = hosts_entry.strip()
if not entry or not entry[0] or entry[0] == "\n":
return 'blank'
if entry[0] == "#":
return 'comment'
entry_chunks = entry.split()
if is_ipv6(entry_chunks[0]):
return 'ipv6'
if is_ipv4(entry_chunks[0]):
return 'ipv4'
@staticmethod
def str_to_hostentry(entry):
"""
Transform a line from a hosts file into an instance of HostsEntry
:param entry: A line from the hosts file
:return: An instance of HostsEntry
"""
line_parts = entry.strip().split()
if is_ipv4(line_parts[0]) and valid_hostnames(line_parts[1:]):
return HostsEntry(entry_type='ipv4',
address=line_parts[0],
names=line_parts[1:])
elif is_ipv6(line_parts[0]) and valid_hostnames(line_parts[1:]):
return HostsEntry(entry_type='ipv6',
address=line_parts[0],
names=line_parts[1:])
else:
return False
class Hosts(object):
""" A hosts file. """
__slots__ = ['entries', 'hosts_path']
def __init__(self, path=None):
"""
Initialise an instance of a hosts file
:param path: The filesystem path of the hosts file to manage
:return: None
"""
self.entries = []
if path:
self.hosts_path = path
else:
self.hosts_path = self.determine_hosts_path()
self.populate_entries()
def __repr__(self):
return 'Hosts(hosts_path=\'{0}\', entries={1})'.format(self.hosts_path, self.entries)
def __str__(self):
output = ('hosts_path={0}, '.format(self.hosts_path))
for entry in self.entries:
output += str(entry)
return output
def count(self):
""" Get a count of the number of host entries
:return: The number of host entries
"""
return len(self.entries)
@staticmethod
def determine_hosts_path(platform=None):
"""
Return the hosts file path based on the supplied
or detected platform.
:param platform: a string used to identify the platform
:return: detected filesystem path of the hosts file
"""
if not platform:
platform = sys.platform
if platform.startswith('win'):
result = r"c:\windows\system32\drivers\etc\hosts"
return result
else:
return '/etc/hosts'
def write(self, path=None, mode='w'):
"""
Write all of the HostsEntry instances back to the hosts file
:param path: override the write path
:return: Dictionary containing counts
"""
written_count = 0
comments_written = 0
blanks_written = 0
ipv4_entries_written = 0
ipv6_entries_written = 0
if path:
output_file_path = path
else:
output_file_path = self.hosts_path
try:
with open(output_file_path, mode) as hosts_file:
for written_count, line in enumerate(self.entries):
if line.entry_type == 'comment':
hosts_file.write(line.comment + "\n")
comments_written += 1
if line.entry_type == 'blank':
hosts_file.write("\n")
blanks_written += 1
if line.entry_type == 'ipv4':
hosts_file.write(
"{0}\t{1}\n".format(
line.address,
' '.join(line.names),
)
)
ipv4_entries_written += 1
if line.entry_type == 'ipv6':
hosts_file.write(
"{0}\t{1}\n".format(
line.address,
' '.join(line.names), ))
ipv6_entries_written += 1
except:
raise UnableToWriteHosts()
return {'total_written': written_count + 1,
'comments_written': comments_written,
'blanks_written': blanks_written,
'ipv4_entries_written': ipv4_entries_written,
'ipv6_entries_written': ipv6_entries_written}
@staticmethod
def get_hosts_by_url(url=None):
"""
Request the content of a URL and return the response
:param url: The URL of the hosts file to download
:return: The content of the passed URL
"""
response = urlopen(url)
return response.read()
def exists(self, address=None, names=None, comment=None):
"""
Determine if the supplied address and/or names, or comment, exists in a HostsEntry within Hosts
:param address: An ipv4 or ipv6 address to search for
:param names: A list of names to search for
:param comment: A comment to search for
:return: True if a supplied address, name, or comment is found. Otherwise, False.
"""
for entry in self.entries:
if entry.entry_type in ('ipv4', 'ipv6'):
if address and address == entry.address:
return True
if names:
for name in names:
if name in entry.names:
return True
elif entry.entry_type == 'comment' and entry.comment == comment:
return True
return False
def remove_all_matching(self, address=None, name=None):
"""
Remove all HostsEntry instances from the Hosts object
where the supplied ip address or name matches
:param address: An ipv4 or ipv6 address
:param name: A host name
:return: None
"""
if self.entries:
if address and name:
func = lambda entry: not entry.is_real_entry() or (entry.address != address and name not in entry.names)
elif address:
func = lambda entry: not entry.is_real_entry() or entry.address != address
elif name:
func = lambda entry: not entry.is_real_entry() or name not in entry.names
else:
raise ValueError('No address or name was specified for removal.')
self.entries = list(filter(func, self.entries))
def find_all_matching(self, address=None, name=None):
"""
Return all HostsEntry instances from the Hosts object
where the supplied ip address or name matches
:param address: An ipv4 or ipv6 address
:param name: A host name
:return: HostEntry instances
"""
results = []
if self.entries:
for entry in self.entries:
if not entry.is_real_entry():
continue
if address and name:
if address == entry.address and name in entry.names:
results.append(entry)
elif address and address == entry.address:
results.append(entry)
elif name in entry.names:
results.append(entry)
return results
def import_url(self, url=None, force=None):
"""
Read a list of host entries from a URL, convert them into instances of HostsEntry and
then append to the list of entries in Hosts
:param url: The URL of where to download a hosts file
:return: Counts reflecting the attempted additions
"""
file_contents = self.get_hosts_by_url(url=url).decode('utf-8')
file_contents = file_contents.rstrip().replace('^M', '\n')
file_contents = file_contents.rstrip().replace('\r\n', '\n')
lines = file_contents.split('\n')
skipped = 0
import_entries = []
for line in lines:
stripped_entry = line.strip()
if (not stripped_entry) or (stripped_entry.startswith('#')):
skipped += 1
else:
line = line.partition('#')[0]
line = line.rstrip()
import_entry = HostsEntry.str_to_hostentry(line)
if import_entry:
import_entries.append(import_entry)
add_result = self.add(entries=import_entries, force=force)
write_result = self.write()
return {'result': 'success',
'skipped': skipped,
'add_result': add_result,
'write_result': write_result}
def import_file(self, import_file_path=None):
"""
Read a list of host entries from a file, convert them into instances
of HostsEntry and then append to the list of entries in Hosts
:param import_file_path: The path to the file containing the host entries
:return: Counts reflecting the attempted additions
"""
skipped = 0
invalid_count = 0
if is_readable(import_file_path):
import_entries = []
with open(import_file_path, 'r') as infile:
for line in infile:
stripped_entry = line.strip()
if (not stripped_entry) or (stripped_entry.startswith('#')):
skipped += 1
else:
line = line.partition('#')[0]
line = line.rstrip()
import_entry = HostsEntry.str_to_hostentry(line)
if import_entry:
import_entries.append(import_entry)
else:
invalid_count += 1
add_result = self.add(entries=import_entries)
write_result = self.write()
return {'result': 'success',
'skipped': skipped,
'invalid_count': invalid_count,
'add_result': add_result,
'write_result': write_result}
else:
return {'result': 'failed',
'message': 'Cannot read: file {0}.'.format(import_file_path)}
def add(self, entries=None, force=False, allow_address_duplication=False, merge_names=False):
"""
Add instances of HostsEntry to the instance of Hosts.
:param entries: A list of instances of HostsEntry
:param force: Remove matching before adding
:param allow_address_duplication: Allow using multiple entries for same address
:param merge_names: Merge names where address already exists
:return: The counts of successes and failures
"""
ipv4_count = 0
ipv6_count = 0
comment_count = 0
invalid_count = 0
duplicate_count = 0
replaced_count = 0
import_entries = []
existing_addresses = [x.address for x in self.entries if x.address]
existing_names = []
for item in self.entries:
if item.names:
existing_names.extend(item.names)
existing_names = dedupe_list(existing_names)
for entry in entries:
if entry.entry_type == 'comment':
entry.comment = entry.comment.strip()
if entry.comment[0] != "#":
entry.comment = "# " + entry.comment
import_entries.append(entry)
elif entry.address in ('0.0.0.0', '127.0.0.1') or allow_address_duplication:
# Allow duplicates entries for addresses used for adblocking
if set(entry.names).intersection(existing_names):
if force:
for name in entry.names:
self.remove_all_matching(name=name)
import_entries.append(entry)
else:
duplicate_count += 1
else:
import_entries.append(entry)
elif entry.address in existing_addresses:
if not any((force, merge_names)):
duplicate_count += 1
elif merge_names:
# get the last entry with matching address
entry_names = list()
for existing_entry in self.entries:
if entry.address == existing_entry.address:
entry_names = existing_entry.names
break
# merge names with that entry
merged_names = list(set(entry.names + entry_names))
# remove all matching
self.remove_all_matching(address=entry.address)
# append merged entry
entry.names = merged_names
import_entries.append(entry)
elif force:
self.remove_all_matching(address=entry.address)
replaced_count += 1
import_entries.append(entry)
elif set(entry.names).intersection(existing_names):
if not force:
duplicate_count += 1
else:
for name in entry.names:
self.remove_all_matching(name=name)
replaced_count += 1
import_entries.append(entry)
else:
import_entries.append(entry)
for item in import_entries:
if item.entry_type == 'comment':
comment_count += 1
self.entries.append(item)
elif item.entry_type == 'ipv4':
ipv4_count += 1
self.entries.append(item)
elif item.entry_type == 'ipv6':
ipv6_count += 1
self.entries.append(item)
return {'comment_count': comment_count,
'ipv4_count': ipv4_count,
'ipv6_count': ipv6_count,
'invalid_count': invalid_count,
'duplicate_count': duplicate_count,
'replaced_count': replaced_count}
def populate_entries(self):
"""
Called by the initialiser of Hosts. This reads the entries from the local hosts file,
converts them into instances of HostsEntry and adds them to the Hosts list of entries.
:return: None
"""
try:
with open(self.hosts_path, 'r') as hosts_file:
hosts_entries = [line for line in hosts_file]
for hosts_entry in hosts_entries:
entry_type = HostsEntry.get_entry_type(hosts_entry)
if entry_type == "comment":
hosts_entry = hosts_entry.replace("\r", "")
hosts_entry = hosts_entry.replace("\n", "")
self.entries.append(HostsEntry(entry_type="comment",
comment=hosts_entry))
elif entry_type == "blank":
self.entries.append(HostsEntry(entry_type="blank"))
elif entry_type in ("ipv4", "ipv6"):
chunked_entry = hosts_entry.split()
stripped_name_list = [name.strip() for name in chunked_entry[1:]]
self.entries.append(
HostsEntry(
entry_type=entry_type,
address=chunked_entry[0].strip(),
names=stripped_name_list)) | [
" except IOError:"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
Created on Jul 24, 2012 | [
"\"\"\""
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2018, Eric Jacob <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_ltm_profile_tcp
short_description: BIG-IP ltm tcp profile module
description:
- Configures a Transmission Control Protocol (TCP) profile.
version_added: "2.4"
author:
- "Gabriel Fortin (@GabrielFortin)"
options:
abc:
description:
- When enabled, increases the congestion window by basing the increase amount on the number of previously
unacknowledged bytes that each acknowledgement code (ACK) includes.
default: enabled
choices: ['enabled', 'disabled']
ack_on_push:
description:
- When enabled, significantly improves performance to Microsoft Windows and MacOS peers, who are writing out
on a very small send buffer.
default: enabled
choices: ['enabled', 'disabled']
app_service:
description:
- Specifies the name of the application service to which the profile belongs.
close_wait_timeout:
description:
- Specifies the number of seconds that a connection remains in a LAST-ACK (last acknowledgement code) state
before quitting.
default: 5
cmetrics_cache:
description:
- Specifies, when enabled, the default value, that the system uses a cache for storing congestion metrics.
choices: ['enabled', 'disabled']
congestion_control:
description:
- Specifies the algorithm to use to share network resources among competing users to reduce congestion.
default: high-speed
choices: [
'cdg', 'chd', 'cubic', 'h igh-speed', 'illinois', 'new-reno', 'none', 'reno', 'scalable', 'vegas',
'westwood', 'woodside'
]
defaults_from:
description:
- Specifies the profile that you want to use as the parent profile.
default: tcp
deferred_accept:
description:
- Specifies, when enabled, that the system defers allocation of the connection chain context until the
system has received the payload from the client.
default: disabled
choices: ['enabled', 'disabled']
delay_window_control:
description:
- When enabled, the system uses an estimate of queueing delay as a measure of congestion, in addition to the
normal loss-based control, to control the amount of data sent.
default: disabled
choices: ['enabled', 'disabled']
delayed_acks:
description:
- Specifies, when enabled, the default value, that the traffic management system allows coalescing of
multiple acknowledgement (ACK) responses.
default: enabled
choices: ['enabled', 'disabled']
description:
description:
- User defined description.
dsack:
description:
- When enabled, specifies the use of the SACK option to acknowledge duplicate segments.
default: disabled
choices: ['enabled', 'disabled']
early_retransmit:
description:
- Specifies, when enabled, that the system uses early retransmit recovery (as specified in RFC 5827) to
reduce the recovery time for connections that are receive-buffer or user-data limited.
default: disabled
choices: ['enabled', 'disabled']
ecn:
description:
- Specifies, when enabled, that the system uses the TCP flags CWR and ECE to notify its peer of congestion
and congestion counter-measures.
default: disabled
choices: ['enabled', 'disabled']
fin_wait_timeout:
description:
- Specifies the number of seconds that a connection is in the FIN-WAIT or closing state before quitting.
default: 5
hardware_syn_cookie:
description:
- Specifies whether or not to use hardware SYN Cookie when cross system limit.
default: disabled
choices: ['enabled', 'disabled']
idle_timeout:
description:
- Specifies the number of seconds that a connection is idle before the connection is eligible for deletion.
default: 300
init_cwnd:
description:
- Specifies the initial congestion window size for connections to this destination.
default: 0
choices: range(0, 17)
init_rwnd:
description:
- Specifies the initial receive window size for connections to this destination.
default: 0
choices: range(0, 17)
ip_tos_to_client:
description:
- Specifies the Type of Service (ToS) level that the traffic management system assigns to TCP packets when
sending them to clients.
default: 0
keep_alive_interval:
description:
- Specifies the keep-alive probe interval, in seconds.
default: 1800
limited_transmit:
description:
- Specifies, when enabled, that the system uses limited transmit recovery revisions for fast retransmits to
reduce the recovery time for connections on a lossy network.
default: enabled
choices: ['enabled', 'disabled']
link_qos_to_client:
description:
- Specifies the Link Quality of Service (QoS) level that the system assigns to TCP packets when sending them
to clients.
default: 0
max_retrans:
description:
- Specifies the maximum number of retransmissions of data segments that the system allows.
default: 8
md5_signature:
description:
- Specifies, when enabled, that the system uses RFC2385 TCP-MD5 signatures to protect TCP traffic against
intermediate tampering.
default: disabled
choices: ['enabled', 'disabled']
md5_signature_passphrase:
description:
- Specifies a plain text passphrase tnat is used in a shared-secret scheme to implement the spoof-prevention
parts of RFC2385.
choices: Plain text passphrase between 1 and 80 characters in length
minimum_rto:
description:
- Specifies the minimum TCP retransmission timeout in milliseconds.
default: 0
mptcp:
description:
- Specifies, when enabled, that the system will accept MPTCP connections.
default: disabled
choices: ['enabled', 'disabled']
mptcp_csum:
description:
- Specifies, when enabled, that the system will calculate the checksum for MPTCP connections.
default: disabled
choices: ['enabled', 'disabled']
mptcp_csum_verify:
description:
- Specifies, when enabled, that the system verifys checksum for MPTCP connections.
default: disabled
choices: ['enabled', 'disabled']
mptcp_debug:
description:
- Specifies, when enabled, that the system provides debug logs and statistics for MPTCP connections.
default: disabled
choices: ['enabled', 'disabled']
mptcp_fallback:
description:
- Specifies, MPTCP fallback mode.
default: reset
choices: ['accept', 'active-accept', 'reset', 'retransmit']
mptcp_joinmax:
description:
- Specifies the max number of MPTCP connections that can join to given one.
default: 5
mptcp_nojoindssack:
description:
- Specifies, when enabled, no DSS option is sent on the JOIN ACK.
default: disabled
choices: ['enabled', 'disabled']
mptcp_rtomax:
description:
- Specifies, the number of RTOs before declaring subflow dead.
default: 5
mptcp_rxmitmin:
description:
- Specifies the minimum value (in msec) of the retransmission timer for these MPTCP flows.
default: 1000
mptcp_subflowmax:
description:
- Specifies the maximum number of MPTCP subflows for a single flow.
default: 6
mptcp_makeafterbreak:
description:
- Specifies, when enabled, that make-after-break functionality is supported, allowing for long-lived MPTCP
sessions.
default: disabled
choices: ['enabled', 'disabled']
mptcp_timeout:
description:
- Specifies, the timeout value to discard long-lived sessions that do not have an active flow, in seconds.
default: 3600
mptcp_fastjoin:
description:
- Specifies, when enabled, FAST join, allowing data to be sent on the MP_JOIN SYN, which can allow a server
response to occur in parallel with the JOIN.
default: disabled
choices: ['enabled', 'disabled']
nagle:
description:
- Specifies, when enabled, that the system applies Nagle's algorithm to reduce the number of short segments
on the network.
default: disabled
choices: ['enabled', 'disabled']
name:
description:
- Specifies a unique name for the component.
required: true
partition:
description:
- Specifies the administrative partition in which the component object resides.
default: Common
pkt_loss_ignore_burst:
description:
- Specifies the probability of performing congestion control when multiple packets in a row are lost, even
if the pkt-loss-ignore-rate was not exceeded.
default: 0
choices: range(0, 33)
pkt_loss_ignore_rate:
description:
- Specifies the threshold of packets lost per million at which the system should perform congestion control.
default: 0
choices: range(0, 1000001)
proxy_buffer_high:
description:
- Specifies the highest level at which the receive window is closed.
default: 49152
proxy_buffer_low:
description:
- Specifies the lowest level at which the receive window is closed.
default: 32768
proxy_mss:
description:
- Specifies, when enabled, that the system advertises the same mss to the server as was negotiated with the
client.
default: disabled
choices: ['enabled', 'disabled']
proxy_options:
description:
- Specifies, when enabled, that the system advertises an option, such as a time-stamp to the server only if
it was negotiated with the client.
default: disabled
choices: ['enabled', 'disabled']
rate_pace:
description:
- Specifies, when enabled, that the system will rate pace TCP data transmissions.
default: disabled
choices: ['enabled', 'disabled']
receive_window_size:
description:
- Specifies the size of the receive window, in bytes.
default: 65535
reset_on_timeout:
description:
- Specifies whether to reset connections on timeout.
default: enabled
choices: ['enabled', 'disabled']
selective_acks:
description:
- Specifies, when enabled, that the system negotiates RFC2018-compliant Selective Acknowledgements with
peers.
default: enabled
choices: ['enabled', 'disabled']
selective_nack:
description:
- Specifies whether Selective Negative Acknowledgment is enabled or disabled.
default: enabled
choices: ['enabled', 'disabled']
send_buffer_size:
description:
- Specifies the size of the buffer, in bytes.
default: 65535
slow_start:
description:
- Specifies, when enabled, that the system uses larger initial window sizes (as specified in RFC 3390) to
help reduce round trip times.
default: enabled
choices: ['enabled', 'disabled']
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
syn_cookie_whitelist:
description:
- Specifies whether or not to use a SYN Cookie WhiteList when doing software SYN Cookies.
default: disabled
choices: ['enabled', 'disabled']
syn_max_retrans:
description:
- Specifies the maximum number of retransmissions of SYN segments that the system allows.
default: 3
syn_rto_base:
description:
- Specifies the initial RTO (Retransmission TimeOut) base multiplier for SYN retransmission, in
milliseconds.
default: 0
tail_loss_probe:
description:
- Specifies whether the system uses tail loss probe to reduce the number of retransmission timeouts.
default: disabled
choices: ['enabled', 'disabled']
time_wait_recycle:
description:
- Specifies whether the system recycles the connection when a SYN packet is received in a TIME-WAIT state.
default: enabled
choices: ['enabled', 'disabled']
time_wait_timeout:
description:
- Specifies the number of milliseconds that a connection is in the TIME-WAIT state before closing.
default: 2000
choices: range(0, 600001)
timestamps:
description:
- Specifies, when enabled, that the system uses the timestamps extension for TCP (as specified in RFC 1323)
to enhance high-speed network performance.
default: enabled
choices: ['enabled', 'disabled']
verified_accept:
description:
- Specifies, when enabled, that the system can actually communicate with the server before establishing a
client connection.
default: disabled
choices: ['enabled', 'disabled']
zero_window_timeout:
description:
- Specifies the timeout in milliseconds for terminating a connection with an effective zero length TCP
transmit window.
default: 2000
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create LTM TCP Profile
f5bigip_ltm_profile_tcp:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_tcp_profile
partition: Common
init_cwnd: 10
pkt_loss_ignore_burst: 15
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import range
from ansible_common_f5.base import F5_ACTIVATION_CHOICES
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
abc=dict(type='str', choices=F5_ACTIVATION_CHOICES),
ack_on_push=dict(type='str', choices=F5_ACTIVATION_CHOICES),
app_service=dict(type='str'),
close_wait_timeout=dict(type='int'),
cmetrics_cache=dict(type='str', choices=F5_ACTIVATION_CHOICES),
congestion_control=dict(type='str',
choices=['cdg', 'chd', 'cubic', 'high-speed', 'illinois', 'new-reno', 'none',
'reno', 'scalable', 'vegas', 'westwood', 'woodside']),
defaults_from=dict(type='str'),
deferred_accept=dict(type='str', choices=F5_ACTIVATION_CHOICES),
delay_window_control=dict(type='str', choices=F5_ACTIVATION_CHOICES),
delayed_acks=dict(type='str', choices=F5_ACTIVATION_CHOICES),
description=dict(type='str'),
dsack=dict(type='str', choices=F5_ACTIVATION_CHOICES),
early_retransmit=dict(type='str', choices=F5_ACTIVATION_CHOICES),
ecn=dict(type='str', choices=F5_ACTIVATION_CHOICES),
fin_wait_timeout=dict(type='int'),
hardware_syn_cookie=dict(type='str', choices=F5_ACTIVATION_CHOICES),
idle_timeout=dict(type='int'),
init_cwnd=dict(type='int', choices=range(0, 17)),
init_rwnd=dict(type='int', choices=range(0, 17)),
ip_tos_to_client=dict(type='int'),
keep_alive_interval=dict(type='int'),
limited_transmit=dict(type='str', choices=F5_ACTIVATION_CHOICES),
link_qos_to_client=dict(type='int'),
max_retrans=dict(type='int'),
md5_signature=dict(type='str', choices=F5_ACTIVATION_CHOICES),
md5_signature_passphrase=dict(type='str', no_log=True),
minimum_rto=dict(type='int'),
mptcp=dict(type='str', choices=F5_ACTIVATION_CHOICES),
mptcp_csum=dict(type='str', choices=F5_ACTIVATION_CHOICES),
mptcp_csum_verify=dict(type='str', choices=F5_ACTIVATION_CHOICES),
mptcp_debug=dict(type='str', choices=F5_ACTIVATION_CHOICES),
mptcp_fallback=dict(type='str', choices=['accept', 'active-accept', 'reset', 'retransmit']),
mptcp_joinmax=dict(type='int'),
mptcp_nojoindssack=dict(type='str', choices=F5_ACTIVATION_CHOICES),
mptcp_rtomax=dict(type='int'),
mptcp_rxmitmin=dict(type='int'),
mptcp_subflowmax=dict(type='int'),
mptcp_makeafterbreak=dict(type='str', choices=F5_ACTIVATION_CHOICES),
mptcp_timeout=dict(type='int'),
mptcp_fastjoin=dict(type='str', choices=F5_ACTIVATION_CHOICES),
nagle=dict(type='str', choices=F5_ACTIVATION_CHOICES),
pkt_loss_ignore_burst=dict(type='int', choices=range(0, 33)),
pkt_loss_ignore_rate=dict(type='int', choices=range(0, 1000001)),
proxy_buffer_high=dict(type='int'),
proxy_buffer_low=dict(type='int'),
proxy_mss=dict(type='str', choices=F5_ACTIVATION_CHOICES),
proxy_options=dict(type='str', choices=F5_ACTIVATION_CHOICES),
rate_pace=dict(type='str', choices=F5_ACTIVATION_CHOICES),
receive_window_size=dict(type='int'),
reset_on_timeout=dict(type='str', choices=F5_ACTIVATION_CHOICES),
selective_acks=dict(type='str', choices=F5_ACTIVATION_CHOICES),
selective_nack=dict(type='str', choices=F5_ACTIVATION_CHOICES), | [
" send_buffer_size=dict(type='int'),"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse, reverse_lazy
from django.forms import ValidationError
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from django.views.generic import ListView
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.edit import CreateView, DeleteView
from django.http import Http404
from planet.models import Blog, Feed, Author, Post
from planet.forms import SearchForm
from tagging.models import Tag, TaggedItem
def index(request):
posts = Post.site_objects.all().order_by("-date_modified")
return render_to_response("planet/posts/list.html", {"posts": posts},
context_instance=RequestContext(request))
def blogs_list(request):
blogs_list = Blog.site_objects.all()
return render_to_response("planet/blogs/list.html",
{"blogs_list": blogs_list}, context_instance=RequestContext(request))
def blog_detail(request, blog_id, slug=None):
blog = get_object_or_404(Blog, pk=blog_id)
if slug is None:
return redirect(blog, permanent=True)
posts = Post.site_objects.filter(feed__blog=blog).order_by("-date_modified")
return render_to_response("planet/blogs/detail.html",
{"blog": blog, "posts": posts},
context_instance=RequestContext(request))
def feeds_list(request):
feeds_list = Feed.site_objects.all()
return render_to_response("planet/feeds/list.html",
{"feeds_list": feeds_list}, context_instance=RequestContext(request))
def feed_detail(request, feed_id, tag=None, slug=None):
feed = get_object_or_404(Feed, pk=feed_id)
if not slug:
return redirect(feed, permanent=True)
if tag:
tag = get_object_or_404(Tag, name=tag)
posts = TaggedItem.objects.get_by_model(
Post.site_objects, tag).filter(feed=feed).order_by("-date_modified")
else:
posts = Post.site_objects.filter(feed=feed).order_by("-date_modified")
return render_to_response("planet/feeds/detail.html",
{"feed": feed, "posts": posts, "tag": tag},
context_instance=RequestContext(request))
def authors_list(request):
authors = Author.site_objects.all()
return render_to_response("planet/authors/list.html",
{"authors_list": authors},
context_instance=RequestContext(request))
def author_detail(request, author_id, tag=None, slug=None):
author = get_object_or_404(Author, pk=author_id)
if not slug:
return redirect(author, permanent=True)
if tag:
tag = get_object_or_404(Tag, name=tag)
posts = TaggedItem.objects.get_by_model(Post.site_objects, tag).filter(
authors=author).order_by("-date_modified")
else:
posts = Post.site_objects.filter(
authors=author).order_by("-date_modified")
return render_to_response("planet/authors/detail.html",
{"author": author, "posts": posts, "tag": tag},
context_instance=RequestContext(request))
def posts_list(request):
posts = Post.site_objects.all().select_related("feed", "feed__blog")\
.prefetch_related("authors").order_by("-date_modified")
return render_to_response("planet/posts/list.html", {"posts": posts},
context_instance=RequestContext(request))
def post_detail(request, post_id, slug=None):
post = get_object_or_404(
Post.objects.select_related("feed", "feed__blog").prefetch_related("authors"),
pk=post_id
)
if not slug:
return redirect(post, permanent=True)
return render_to_response("planet/posts/detail.html", {"post": post},
context_instance=RequestContext(request))
def tag_detail(request, tag):
tag = get_object_or_404(Tag, name=tag)
posts = TaggedItem.objects.get_by_model(
Post.site_objects, tag).order_by("-date_modified")
return render_to_response("planet/tags/detail.html", {"posts": posts,
"tag": tag}, context_instance=RequestContext(request))
def tag_authors_list(request, tag):
tag = get_object_or_404(Tag, name=tag)
posts_list = TaggedItem.objects.get_by_model(Post.site_objects, tag)
authors = set()
for post in posts_list:
for author in post.authors.all():
authors.add(author)
return render_to_response("planet/authors/list_for_tag.html",
{"authors": list(authors), "tag": tag},
context_instance=RequestContext(request))
def tag_feeds_list(request, tag):
tag = get_object_or_404(Tag, name=tag)
post_ids = TaggedItem.objects.get_by_model(Post.site_objects, tag
).values_list("id", flat=True)
feeds_list = Feed.site_objects.filter(post__in=post_ids).distinct()
return render_to_response("planet/feeds/list_for_tag.html",
{"feeds_list": feeds_list, "tag": tag},
context_instance=RequestContext(request))
def tags_cloud(request, min_posts_count=1):
tags_cloud = Tag.objects.cloud_for_model(Post)
return render_to_response("planet/tags/cloud.html",
{"tags_cloud": tags_cloud}, context_instance=RequestContext(request))
def foaf(request):
# TODO: use http://code.google.com/p/django-foaf/ instead of this
feeds = Feed.site_objects.all().select_related("blog")
return render_to_response("planet/microformats/foaf.xml", {"feeds": feeds},
context_instance=RequestContext(request), content_type="text/xml")
def opml(request):
feeds = Feed.site_objects.all().select_related("blog")
return render_to_response("planet/microformats/opml.xml", {"feeds": feeds},
context_instance=RequestContext(request), content_type="text/xml")
def search(request):
if request.method == "GET" and request.GET.get("search") == "go":
search_form = SearchForm(request.GET)
if search_form.is_valid():
query = search_form.cleaned_data["q"]
if search_form.cleaned_data["w"] == "posts":
params_dict = {"title__icontains": query}
| [
" posts = Post.site_objects.filter(**params_dict"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import uuid
from datetime import datetime
from typing import Dict, List, Optional, TYPE_CHECKING
from msrest import Serializer
from azure.core.async_paging import AsyncItemPaged
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.tracing.decorator import distributed_trace
from azure.core import MatchConditions
from .._version import SDK_MONIKER
from .._utils import (
prep_if_match,
prep_if_none_match
)
from .._generated.aio import AzureDigitalTwinsAPI
from .._generated.models import (
QuerySpecification,
DigitalTwinsAddOptions,
DigitalTwinsDeleteOptions,
DigitalTwinsUpdateOptions,
DigitalTwinsUpdateComponentOptions,
DigitalTwinsDeleteRelationshipOptions,
DigitalTwinsUpdateRelationshipOptions,
DigitalTwinsAddRelationshipOptions,
DigitalTwinsModelData
)
if TYPE_CHECKING:
from azure.core.credentials_async import AsyncTokenCredential
from .._generated.models import (
IncomingRelationship,
DigitalTwinsEventRoute
)
class DigitalTwinsClient(object): # pylint: disable=too-many-public-methods
"""Creates an instance of the Digital Twins client.
:param str endpoint: The URL endpoint of an Azure search service
:param ~azure.core.credentials_async.AsyncTokenCredential credential:
A credential to authenticate requests to the service.
"""
def __init__(self, endpoint: str, credential: "AsyncTokenCredential", **kwargs) -> None:
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
self._client = AzureDigitalTwinsAPI(
credential=credential,
base_url=endpoint,
sdk_moniker=SDK_MONIKER,
**kwargs
)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "DigitalTwinsClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
@distributed_trace_async
async def get_digital_twin(self, digital_twin_id: str, **kwargs) -> Dict[str, object]:
"""Get a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:return: Dictionary containing the twin.
:rtype: Dict[str, object]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError:
If the digital twin doesn't exist.
"""
return await self._client.digital_twins.get_by_id(
digital_twin_id,
**kwargs
)
@distributed_trace_async
async def upsert_digital_twin(
self,
digital_twin_id: str,
digital_twin: Dict[str, object],
**kwargs
) -> Dict[str, object]:
"""Create or update a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param Dict[str,object] digital_twin:
Dictionary containing the twin to create or update.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: Dictionary containing the created or updated twin.
:rtype: Dict[str, object]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceExistsError:
If the digital twin already exists.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_none_match, error_map = prep_if_none_match(etag, match_condition)
if if_none_match:
options = DigitalTwinsAddOptions(if_none_match=if_none_match)
return await self._client.digital_twins.add(
digital_twin_id,
digital_twin,
digital_twins_add_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace_async
async def update_digital_twin(
self,
digital_twin_id: str,
json_patch: List[Dict[str, object]],
**kwargs
) -> None:
"""Update a digital twin using a JSON patch.
:param str digital_twin_id: The ID of the digital twin.
:param List[Dict[str,object]] json_patch: An update specification described by JSON Patch.
Updates to property values and $model elements may happen in the same request.
Operations are limited to add, replace and remove.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError:
If there is no digital twin with the provided ID.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_match, error_map = prep_if_match(etag, match_condition)
if if_match:
options = DigitalTwinsUpdateOptions(if_match=if_match)
return await self._client.digital_twins.update(
digital_twin_id,
json_patch,
digital_twins_update_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace_async
async def delete_digital_twin(
self,
digital_twin_id: str,
**kwargs
) -> None:
"""Delete a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError:
If there is no digital twin with the provided ID.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_match, error_map = prep_if_match(etag, match_condition)
if if_match:
options = DigitalTwinsDeleteOptions(if_match=if_match)
return await self._client.digital_twins.delete(
digital_twin_id,
digital_twins_delete_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace_async
async def get_component(self, digital_twin_id: str, component_name: str, **kwargs) -> Dict[str, object]:
"""Get a component on a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param str component_name: The component being retrieved.
:return: Dictionary containing the component.
:rtype: Dict[str, object]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin with the provided ID or the component name is invalid.
"""
return await self._client.digital_twins.get_component(
digital_twin_id,
component_name,
**kwargs
)
@distributed_trace_async
async def update_component(
self,
digital_twin_id: str,
component_name: str,
json_patch: List[Dict[str, object]],
**kwargs
) -> None:
"""Update properties of a component on a digital twin using a JSON patch.
:param str digital_twin_id: The ID of the digital twin.
:param str component_name: The component being updated.
:param List[Dict[str,object]] json_patch: An update specification described by JSON Patch.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin with the provided ID or the component name is invalid.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_match, error_map = prep_if_match(etag, match_condition)
if if_match:
options = DigitalTwinsUpdateComponentOptions(if_match=if_match)
return await self._client.digital_twins.update_component(
digital_twin_id,
component_name,
patch_document=json_patch,
digital_twins_update_component_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace_async
async def get_relationship(
self,
digital_twin_id: str,
relationship_id: str,
**kwargs
) -> Dict[str, object]:
"""Get a relationship on a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param str relationship_id: The ID of the relationship to retrieve.
:return: Dictionary containing the relationship.
:rtype: Dict[str, object]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin or relationship with the provided ID.
"""
return await self._client.digital_twins.get_relationship_by_id(
digital_twin_id,
relationship_id,
**kwargs
)
@distributed_trace_async
async def upsert_relationship(
self,
digital_twin_id: str,
relationship_id: str,
relationship: Dict[str, object],
**kwargs
) -> Dict[str, object]:
"""Create or update a relationship on a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param str relationship_id: The ID of the relationship to retrieve.
:param Dict[str,object] relationship: Dictionary containing the relationship.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: The created or updated relationship.
:rtype: Dict[str, object]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin, target digital twin or relationship with the provided ID.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_none_match, error_map = prep_if_none_match(etag, match_condition)
if if_none_match:
options = DigitalTwinsAddRelationshipOptions(if_none_match=if_none_match)
return await self._client.digital_twins.add_relationship(
id=digital_twin_id,
relationship_id=relationship_id,
relationship=relationship,
digital_twins_add_relationship_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace_async
async def update_relationship(
self,
digital_twin_id: str,
relationship_id: str,
json_patch: List[Dict[str, object]],
**kwargs
) -> None:
"""Updates the properties of a relationship on a digital twin using a JSON patch.
:param str digital_twin_id: The ID of the digital twin.
:param str relationship_id: The ID of the relationship to retrieve.
:param List[Dict[str,object]] json_patch: JSON Patch description of the update
to the relationship properties.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin or relationship with the provided ID.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_match, error_map = prep_if_match(etag, match_condition)
if if_match:
options = DigitalTwinsUpdateRelationshipOptions(if_match=if_match)
return await self._client.digital_twins.update_relationship(
id=digital_twin_id,
relationship_id=relationship_id,
patch_document=json_patch,
digital_twins_update_relationship_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace_async
async def delete_relationship(
self,
digital_twin_id: str,
relationship_id: str,
**kwargs
) -> None:
"""Delete a relationship on a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param str relationship_id: The ID of the relationship to delete.
:keyword ~azure.core.MatchConditions match_condition:
The condition under which to perform the operation.
:keyword str etag:
Only perform the operation if the entity's etag matches the value provided
according to the `match_condition`.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is either no
digital twin or relationship with the provided ID.
"""
options = None
etag = kwargs.pop("etag", None)
match_condition = kwargs.pop("match_condition", MatchConditions.Unconditionally)
if_match, error_map = prep_if_match(etag, match_condition)
if if_match:
options = DigitalTwinsDeleteRelationshipOptions(if_match=if_match)
return await self._client.digital_twins.delete_relationship(
digital_twin_id,
relationship_id,
digital_twins_delete_relationship_options=options,
error_map=error_map,
**kwargs
)
@distributed_trace
def list_relationships(
self,
digital_twin_id: str,
relationship_id: Optional[str] = None,
**kwargs
) -> AsyncItemPaged[Dict[str, object]]:
"""Retrieve relationships for a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:param str relationship_id: The ID of the relationship to
get (if None all the relationship will be retrieved).
:return: An iterator instance of list of relationships.
:rtype: ~azure.core.async_paging.AsyncItemPaged[Dict[str,object]]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is no
digital twin with the provided ID.
"""
return self._client.digital_twins.list_relationships(
digital_twin_id,
relationship_name=relationship_id,
**kwargs
)
@distributed_trace
def list_incoming_relationships(
self,
digital_twin_id: str,
**kwargs
) -> AsyncItemPaged['IncomingRelationship']:
"""Retrieve all incoming relationships for a digital twin.
:param str digital_twin_id: The ID of the digital twin.
:return: An iterator instance of list of incoming relationships.
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.digitaltwins.core.IncomingRelationship]
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is no
digital twin with the provided ID.
"""
return self._client.digital_twins.list_incoming_relationships(
digital_twin_id,
**kwargs
)
@distributed_trace_async
async def publish_telemetry(
self,
digital_twin_id: str,
telemetry: object,
**kwargs
) -> None:
"""Publish telemetry from a digital twin, which is then consumed by
one or many destination endpoints (subscribers) defined under.
:param str digital_twin_id: The ID of the digital twin
:param object telemetry: The telemetry data to be sent
:keyword str message_id: The message ID. If not specified, a UUID will be generated.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is no
digital twin with the provided ID.
"""
message_id = kwargs.pop('message_id', None) or str(uuid.uuid4())
timestamp = Serializer.serialize_iso(datetime.utcnow())
return await self._client.digital_twins.send_telemetry(
digital_twin_id,
message_id=message_id,
telemetry=telemetry,
telemetry_source_time=timestamp,
**kwargs
)
@distributed_trace_async
async def publish_component_telemetry(
self,
digital_twin_id: str,
component_name: str,
telemetry: object,
**kwargs
) -> None:
"""Publish telemetry from a digital twin's component, which is then consumed
by one or many destination endpoints (subscribers) defined under.
:param str digital_twin_id: The ID of the digital twin.
:param str component_name: The name of the DTDL component.
:param object telemetry: The telemetry data to be sent.
:keyword str message_id: The message ID. If not specified, a UUID will be generated.
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
:raises ~azure.core.exceptions.ResourceNotFoundError: If there is no
digital twin with the provided ID or the component name is invalid.
"""
message_id = kwargs.pop('message_id', None) or str(uuid.uuid4())
timestamp = Serializer.serialize_iso(datetime.utcnow())
return await self._client.digital_twins.send_component_telemetry(
digital_twin_id,
component_name,
message_id=message_id,
telemetry=telemetry,
telemetry_source_time=timestamp,
**kwargs
)
@distributed_trace_async
async def get_model(self, model_id: str, **kwargs) -> DigitalTwinsModelData: | [
" \"\"\"Get a model, including the model metadata and the model definition."
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import transaction
from BTrees.OOBTree import OOSet
from zeo_connector import transaction_manager
from zeo_connector.examples import DatabaseHandler
from settings import ZEO_CLIENT_PATH
from settings import TREE_PROJECT_KEY as PROJECT_KEY
# Variables ===================================================================
_TREE_HANDLER = None
# Functions & classes =========================================================
class TreeHandler(DatabaseHandler):
"""
This class is used as database handler for :class:`.Tree` instances.
Attributes:
name_db_key (str): Key for the :attr:`.name_db`.
name_db (dict): Database handler dict for `name`.
aleph_id_db_key (str): Key for the :attr:`.aleph_id_db`.
aleph_id_db (dict): Database handler dict for `aleph_id`.
issn_db_key (str): Key for the :attr:`.issn_db`.
issn_db (dict): Database handler dict for `issn`.
path_db_key (str): Key for the :attr:`.path_db`.
path_db (dict): Database handler dict for `path`.
parent_db_key (str): Key for the :attr:`.parent_db`.
parent_db (dict): Database handler dict for `parent`.
"""
def __init__(self, conf_path=ZEO_CLIENT_PATH, project_key=PROJECT_KEY):
"""
Constructor.
Args:
conf_path (str): Path to the ZEO configuration file. Default
:attr:`~storage.settings.ZEO_CLIENT_PATH`.
project_key (str): Project key, which is used for lookups into ZEO.
Default :attr:`~storage.settings.TREE_PROJECT_KEY`.
"""
super(self.__class__, self).__init__(
conf_path=conf_path,
project_key=project_key
)
# tree.name -> tree
self.name_db_key = "name_db"
self.name_db = self._get_key_or_create(self.name_db_key)
# tree.aleph_id -> tree
self.aleph_id_db_key = "aleph_id_db"
self.aleph_id_db = self._get_key_or_create(self.aleph_id_db_key)
# tree.issn -> tree
self.issn_db_key = "issn_db"
self.issn_db = self._get_key_or_create(self.issn_db_key)
# tree.path -> tree
self.path_db_key = "path_db"
self.path_db = self._get_key_or_create(self.path_db_key)
# sub_tree.path -> parent
self.parent_db_key = "parent_db"
self.parent_db = self._get_key_or_create(self.parent_db_key)
@transaction_manager
def _add_to(self, db, index, item, default=OOSet):
"""
Add `item` to `db` under `index`. If `index` is not yet in `db`, create
it using `default`.
Args:
db (dict-obj): Dict-like object used to connect to database.
index (str): Index used to look in `db`.
item (obj): Persistent object, which may be stored in DB.
default (func/obj): Reference to function/object, which will be
used to create the object under `index`.
Default :class:`OOSet`.
"""
row = db.get(index, None)
if row is None:
row = default()
db[index] = row
row.add(item)
@transaction_manager
def add_tree(self, tree, parent=None):
"""
Add `tree` into database.
Args:
tree (obj): :class:`.Tree` instance.
parent (ref, default None): Reference to parent tree. This is used
for all sub-trees in recursive call.
"""
if tree.path in self.path_db:
self.remove_tree_by_path(tree.path)
# index all indexable attributes
for index in tree.indexes:
if not getattr(tree, index):
continue
self._add_to(
getattr(self, index + "_db"),
getattr(tree, index),
tree,
)
if parent:
self._add_to(self.parent_db, tree.path, parent)
# make sure, that all sub-trees starts with path of parent tree
for sub_tree in tree.sub_trees:
assert sub_tree.path.startswith(tree.path)
for sub_tree in tree.sub_trees:
self.add_tree(sub_tree, parent=tree)
def remove_tree_by_path(self, path):
"""
Remove the tree from database by given `path`.
Args:
path (str): Path of the tree.
"""
with transaction.manager:
trees = self.path_db.get(path, None)
if not trees:
return
for tree in trees:
return self._remove_tree(tree)
def remove_tree(self, tree):
"""
Remove the tree from database using `tree` object to identfy the path.
Args:
tree (obj): :class:`.Tree` instance.
"""
return self.remove_tree_by_path(tree.path)
def _remove_from(self, db, index, item):
"""
Remove `item` from `db` at `index`.
Note:
This function is inverse to :meth:`._add_to`.
Args:
db (dict-obj): Dict-like object used to connect to database.
index (str): Index used to look in `db`.
item (obj): Persistent object, which may be stored in DB.
"""
with transaction.manager:
row = db.get(index, None)
if row is None:
return
with transaction.manager:
if item in row:
row.remove(item)
with transaction.manager:
if not row:
del db[index]
@transaction_manager
def _remove_tree(self, tree, parent=None):
""" | [
" Really remove the tree identified by `tree` instance from all indexes"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#!/usr/bin/env python2.7
import argparse
import os
import audioop
import numpy
import glob
import scipy
import subprocess
import wave
import pickle
import threading
import shutil
import ntpath
import matplotlib.pyplot as plt
from . import audioFeatureExtraction as aF
from . import audioTrainTest as aT
from . import audioSegmentation as aS
from . import audioVisualization as aV
from . import audioBasicIO
from . import utilities as uT
import scipy.io.wavfile as wavfile
import matplotlib.patches
def dirMp3toWavWrapper(directory, samplerate, channels):
if not os.path.isdir(directory):
raise Exception("Input path not found!")
useMp3TagsAsNames = True
audioBasicIO.convertDirMP3ToWav(directory, samplerate, channels, useMp3TagsAsNames)
def dirWAVChangeFs(directory, samplerate, channels):
if not os.path.isdir(directory):
raise Exception("Input path not found!")
audioBasicIO.convertFsDirWavToWav(directory, samplerate, channels)
def featureExtractionFileWrapper(wavFileName, outFile, mtWin, mtStep, stWin, stStep):
if not os.path.isfile(wavFileName):
raise Exception("Input audio file not found!")
aF.mtFeatureExtractionToFile(wavFileName, mtWin, mtStep, stWin, stStep, outFile, True, True, True)
def beatExtractionWrapper(wavFileName, plot):
if not os.path.isfile(wavFileName):
raise Exception("Input audio file not found!")
[Fs, x] = audioBasicIO.readAudioFile(wavFileName)
F = aF.stFeatureExtraction(x, Fs, 0.050 * Fs, 0.050 * Fs)
BPM, ratio = aF.beatExtraction(F, 0.050, plot)
print("Beat: {0:d} bpm ".format(int(BPM)))
print("Ratio: {0:.2f} ".format(ratio))
def featureExtractionDirWrapper(directory, mtWin, mtStep, stWin, stStep):
if not os.path.isdir(directory):
raise Exception("Input path not found!")
aF.mtFeatureExtractionToFileDir(directory, mtWin, mtStep, stWin, stStep, True, True, True)
def featureVisualizationDirWrapper(directory):
if not os.path.isdir(directory):
raise Exception("Input folder not found!")
aV.visualizeFeaturesFolder(directory, "pca", "")
#aV.visualizeFeaturesFolder(directory, "lda", "artist")
def fileSpectrogramWrapper(wavFileName):
if not os.path.isfile(wavFileName):
raise Exception("Input audio file not found!")
[Fs, x] = audioBasicIO.readAudioFile(wavFileName)
x = audioBasicIO.stereo2mono(x)
specgram, TimeAxis, FreqAxis = aF.stSpectogram(x, Fs, round(Fs * 0.040), round(Fs * 0.040), True)
def fileChromagramWrapper(wavFileName):
if not os.path.isfile(wavFileName):
raise Exception("Input audio file not found!")
[Fs, x] = audioBasicIO.readAudioFile(wavFileName)
x = audioBasicIO.stereo2mono(x)
specgram, TimeAxis, FreqAxis = aF.stChromagram(x, Fs, round(Fs * 0.040), round(Fs * 0.040), True)
def trainClassifierWrapper(method, beatFeatures, directories, modelName):
if len(directories) < 2:
raise Exception("At least 2 directories are needed")
aT.featureAndTrain(directories, 1, 1, aT.shortTermWindow, aT.shortTermStep,
method.lower(), modelName, computeBEAT=beatFeatures)
def trainRegressionWrapper(method, beatFeatures, dirName, modelName):
aT.featureAndTrainRegression(dirName, 1, 1, aT.shortTermWindow, aT.shortTermStep,
method.lower(), modelName, computeBEAT=beatFeatures)
def classifyFileWrapper(inputFile, modelType, modelName):
if not os.path.isfile(modelName):
raise Exception("Input modelName not found!")
if not os.path.isfile(inputFile):
raise Exception("Input audio file not found!")
[Result, P, classNames] = aT.fileClassification(inputFile, modelName, modelType)
print("{0:s}\t{1:s}".format("Class", "Probability"))
for i, c in enumerate(classNames):
print("{0:s}\t{1:.2f}".format(c, P[i]))
print("Winner class: " + classNames[int(Result)])
def regressionFileWrapper(inputFile, modelType, modelName):
if not os.path.isfile(inputFile):
raise Exception("Input audio file not found!")
R, regressionNames = aT.fileRegression(inputFile, modelName, modelType)
for i in range(len(R)):
print("{0:s}\t{1:.3f}".format(regressionNames[i], R[i]))
def classifyFolderWrapper(inputFolder, modelType, modelName, outputMode=False):
if not os.path.isfile(modelName):
raise Exception("Input modelName not found!")
files = "*.wav"
if os.path.isdir(inputFolder):
strFilePattern = os.path.join(inputFolder, files)
else:
strFilePattern = inputFolder + files
wavFilesList = []
wavFilesList.extend(glob.glob(strFilePattern))
wavFilesList = sorted(wavFilesList)
if len(wavFilesList) == 0:
print("No WAV files found!")
return
Results = []
for wavFile in wavFilesList:
[Result, P, classNames] = aT.fileClassification(wavFile, modelName, modelType)
Result = int(Result)
Results.append(Result)
if outputMode:
print("{0:s}\t{1:s}".format(wavFile, classNames[Result]))
Results = numpy.array(Results)
# print distribution of classes:
[Histogram, _] = numpy.histogram(Results, bins=numpy.arange(len(classNames) + 1))
for i, h in enumerate(Histogram):
print("{0:20s}\t\t{1:d}".format(classNames[i], h))
def regressionFolderWrapper(inputFolder, modelType, modelName):
files = "*.wav"
if os.path.isdir(inputFolder):
strFilePattern = os.path.join(inputFolder, files)
else:
strFilePattern = inputFolder + files
wavFilesList = []
wavFilesList.extend(glob.glob(strFilePattern))
wavFilesList = sorted(wavFilesList)
if len(wavFilesList) == 0:
print("No WAV files found!")
return
Results = []
for wavFile in wavFilesList:
R, regressionNames = aT.fileRegression(wavFile, modelName, modelType)
Results.append(R)
Results = numpy.array(Results)
for i, r in enumerate(regressionNames):
[Histogram, bins] = numpy.histogram(Results[:, i])
centers = (bins[0:-1] + bins[1::]) / 2.0
plt.subplot(len(regressionNames), 1, i + 1)
plt.plot(centers, Histogram)
plt.title(r) | [
" plt.show()"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
'''
Parallel computation of pseudospecta of a square matrix by its definition.
Author: Dmitry E. Kislov
E-mail: [email protected]
Date: 25 Nov. 2015
'''
from __future__ import print_function
import multiprocessing
import warnings
import numpy as np
import itertools
__all__ = ['gersgorin_bounds', 'pseudo', 'eigen_bounds']
def gersgorin_bounds(A):
'''Localize eigenvalues of a matrix in a complex plane.
The function uses well known S.A. Gersgorin (1931) theorem about
matrix eigenvalues localization: the eigenvalues lie in the closed region
of the complex plane consisting of all the rings:
:param A: the input matrix as a ``numpy.array`` or 2D list with ``A.shape==(n, m)``.
For rectangular matrices bounding box is computed for the largest square submatrix with shape min(n,m) x min(n,m).
.. math::
|z-a_{kk}|\leq R_k - |a_{kk}|, R_k=\sum\limits_{i=1}^n|a_{ki}|
'''
n, m = np.shape(A)
if n <= m:
B = A[:n, :n]
else:
B = A[:m, :m]
n = m
_A = np.abs(B)
Rk = np.sum(_A, axis=1)
radii = [Rk[k] - _A[k, k] for k in range(n)]
rbounds = [B[k, k].real - radii[k] for k in range(n)]
rbounds.extend([B[k, k].real + radii[k] for k in range(n)])
cbounds = [B[k, k].imag - radii[k] for k in range(n)]
cbounds.extend([B[k, k].imag + radii[k] for k in range(n)])
return [np.min(rbounds), np.max(rbounds), np.min(cbounds), np.max(cbounds)]
def eigen_bounds(A, percent=0.1):
'''Build pseudospectra bounds on matrix eigenvalues
:param A: the input matrix as a ``numpy.array`` or 2D list with ``A.shape==(n, m)``.
For rectangular matrices bounding box is computed for the largest square
submatrix with shape min(n,m) x min(n,m).
:param percent: an indent for bounding box construction (default is 0.1).
Bound values are computed as extreme egienvalues +/- percent*residual,
where residual is a maximal distance between all possible
pairs of eigenvalues.
'''
n, m = np.shape(A)
if n <= m:
B = A[:n, :n]
else:
B = A[:m, :m]
eigvals = np.linalg.eigvals(B)
reals = np.real(eigvals)
imags = np.imag(eigvals)
lbr = np.min(reals)
ubr = np.max(reals)
lbc = np.min(imags)
ubc = np.max(imags)
residual = np.max([abs(x-y) for x, y in itertools.combinations(eigvals, 2)])
return [lbr-percent*residual,
ubr+percent*residual,
lbc-percent*residual,
ubc+percent*residual]
def _safe_bbox(bbox, A):
'''converts bbox array to the array of type [float, float, float, float].
'''
assert len(bbox) >= 4, "Length of bbox should be equal or greater 4."
try:
res = [float(bbox[i]) for i in range(4)]
except (TypeError, ValueError):
warnings.warn('Invalid bbox-array. Gershgorin circles will be used.',
RuntimeWarning)
res = gersgorin_bounds(A)
return res
| [
"def _calc_pseudo(A, x, y, n, m):"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
from promise import Promise
from graphql import graphql
from graphql.type import (
GraphQLSchema,
GraphQLObjectType,
GraphQLInt,
GraphQLField,
GraphQLInputObjectField
)
from ..mutation import mutation_with_client_mutation_id
class Result(object):
def __init__(self, result, clientMutationId=None):
self.clientMutationId = clientMutationId
self.result = result
simpleMutation = mutation_with_client_mutation_id(
'SimpleMutation',
input_fields={},
output_fields={
'result': GraphQLField(GraphQLInt)
},
mutate_and_get_payload=lambda _info, **_input: Result(result=1)
)
simpleMutationWithThunkFields = mutation_with_client_mutation_id(
'SimpleMutationWithThunkFields',
input_fields=lambda: {
'inputData': GraphQLInputObjectField(GraphQLInt)
},
output_fields=lambda: {
'result': GraphQLField(GraphQLInt)
},
mutate_and_get_payload=lambda _info, **input_: Result(result=input_['inputData'])
)
simplePromiseMutation = mutation_with_client_mutation_id(
'SimplePromiseMutation',
input_fields={},
output_fields={
'result': GraphQLField(GraphQLInt)
},
mutate_and_get_payload=lambda _info, **_input: Promise.resolve(Result(result=1))
)
simpleRootValueMutation = mutation_with_client_mutation_id(
'SimpleRootValueMutation',
input_fields={},
output_fields={
'result': GraphQLField(GraphQLInt)
},
mutate_and_get_payload=lambda info, **_input: info.root_value
)
mutation = GraphQLObjectType(
'Mutation',
fields={
'simpleMutation': simpleMutation,
'simpleMutationWithThunkFields': simpleMutationWithThunkFields,
'simplePromiseMutation': simplePromiseMutation,
'simpleRootValueMutation': simpleRootValueMutation
}
)
schema = GraphQLSchema(
query=mutation,
mutation=mutation
)
def test_requires_an_argument():
query = '''
mutation M {
simpleMutation {
result
}
}
'''
result = graphql(schema, query)
assert len(result.errors) == 1
def test_returns_the_same_client_mutation_id():
query = '''
mutation M {
simpleMutation(input: {clientMutationId: "abc"}) {
result
clientMutationId
}
}
'''
expected = {
'simpleMutation': {
'result': 1,
'clientMutationId': 'abc'
}
}
result = graphql(schema, query)
assert not result.errors
assert result.data == expected
def test_supports_thunks_as_input_and_output_fields():
query = '''
mutation M {
simpleMutationWithThunkFields(
input: {inputData: 1234, clientMutationId: "abc"}) {
result
clientMutationId
}
}
'''
expected = {
'simpleMutationWithThunkFields': {
'result': 1234,
'clientMutationId': 'abc'
}
}
result = graphql(schema, query)
assert not result.errors
assert result.data == expected
def test_supports_promise_mutations():
query = '''
mutation M {
simplePromiseMutation(input: {clientMutationId: "abc"}) {
result
clientMutationId
}
}
'''
expected = {
'simplePromiseMutation': {
'result': 1,
'clientMutationId': 'abc'
}
}
result = graphql(schema, query)
assert not result.errors
assert result.data == expected
def test_can_access_root_value():
query = '''
mutation M {
simpleRootValueMutation(input: {clientMutationId: "abc"}) {
result
clientMutationId
}
}
'''
expected = {
'simpleRootValueMutation': {
'result': 1,
'clientMutationId': 'abc'
}
}
result = graphql(schema, query, root=Result(result=1))
assert not result.errors
assert result.data == expected
| [
"def test_contains_correct_input():"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
"""
.. Copyright (c) 2014- Marshall Farrier
license http://opensource.org/licenses/MIT
Data - preprocessing functions (:mod:`pynance.data.prep`)
=========================================================
.. currentmodule:: pynance.data.prep
"""
import numpy as np
import pandas as pd
def center(dataset, out=None):
"""
Returns a centered data set.
Each column of the returned data will have mean 0.
The row vector subtracted from each row to achieve this
transformation is also returned.
Parameters
---------- | [
" dataset : DataFrame or ndarray"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
import os | [
"import sys\t"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
__author__ = 'aarongary'
from itertools import groupby
from elasticsearch import Elasticsearch
from app import elastic_search_uri
from bson.json_util import dumps
from models.ConditionSearchModel import ConditionSearchResults
import pymongo
from app import util
from operator import itemgetter
es = Elasticsearch([elastic_search_uri],send_get_body_as='POST',timeout=300) # Prod Clustered Server
def get_condition_search(queryTerms, pageNumber=1):
myConditionSearchResults = ConditionSearchResults()
myConditionSearchResults.name = 'my name'
cosmic_grouped_items = get_cosmic_search(queryTerms, pageNumber)
basic_results = get_cosmic_grouped_by_tissues_then_diseases(queryTerms, pageNumber) #get_cosmic_grouped_by_disease_tissue(queryTerms, pageNumber)
myConditionSearchResults.add_simplified_cosmic_item(cosmic_grouped_items)
myConditionSearchResults.add_basic_cosmic_list(basic_results)
result = myConditionSearchResults.to_JSON()
return result
# for c_g_i in cosmic_grouped_items[0]['grouped_items']:
# for c_g_i_p in c_g_i['phenotypes']:
# #myConditionSearchResults.addGroupedCosmicConditions(c_g_i['gene_name']['name'], c_g_i_p)#['phenotype_name'])
# myConditionSearchResults.addGroupedCosmicConditionsGene(c_g_i_p['phenotype_name'], c_g_i['gene_name']['name'], c_g_i_p['group_info'])#, c_g_i_p['variants'])
# clinvar_grouped_items = get_clinvar_search(queryTerms, pageNumber)
# for c_g_i in clinvar_grouped_items[0]['grouped_items']: #phenotype_name': hit["_source"]["node_name"], 'gene_name': genehit, 'resources
#for c_g_i_p in c_g_i['searchResultTitle']:
#myConditionSearchResults.addGroupedClinvarConditions(c_g_i['gene_name']['name'], c_g_i_p)#['phenotype_name'])
# myConditionSearchResults.addGroupedClinvarConditionsGene(c_g_i_p['phenotype_name'], c_g_i['gene_name']['name'], c_g_i_p['resources'])#['phenotype_name'])
# for c_g_i_p in c_g_i['phenotype_name']:
#myConditionSearchResults.addGroupedClinvarConditions(c_g_i['gene_name']['name'], c_g_i_p)#['phenotype_name'])
# myConditionSearchResults.addGroupedClinvarConditionsGene(c_g_i_p, c_g_i['gene_name'], c_g_i['resources'])#['phenotype_name'])
# myConditionSearchResults.group_items_by_conditions()
# myConditionSearchResults.updateCounts()
# result = myConditionSearchResults.to_JSON()
# return result
def get_cosmic_grouped_items(queryTerms, phenotypes=None):
hitCount = 0
phenotype_network_data = {
'searchGroupTitle': 'Phenotypes',
'clusterNodeName': "",
'searchTab': 'PHENOTYPES',
'items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1'
}
should_match = []
must_match = []
queryTermArray = queryTerms.split(',')
for queryTerm in queryTermArray:
should_match.append({"match": {"node_list.node.name": queryTerm}})
if(phenotypes is not None):
phenotypeTermArray = phenotypes.split('~')
for phenotypeTerm in phenotypeTermArray:
must_match.append({"match": {"node_name": phenotypeTerm}})
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'must': must_match,
'should': should_match
}
},
'size': 15
}
else:
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'should': should_match
}
},
'size': 15
}
result = es.search(
index = 'conditions',
doc_type = 'conditions_clinvar',
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
if(result['hits']['total'] < 1):
print 'no results'
gene_network_data_items = {
'searchResultTitle': 'No Results',
'hit_id': 'N/A',
'diseaseType': "",
'clusterName': 'No Results',
'searchResultSummary': 'No Results',
'searchResultScoreRank': '0',
'luceneScore': '0',
'searchResultScoreRankTitle': '',
'filterValue': '0.0000000029',
'emphasizeInfoArray': [],
'emphasizeInfoArrayWithWeights': [],
'top5': 'true',
'hitOrder': '0',
'pubmedCount': 0
}
phenotype_network_data['items'].append(gene_network_data_items)
return [phenotype_network_data]
else:
for hit in result['hits']['hits']:
hitCount += 1
type_counts = {}#{'genes': len(hit["_source"]["node_list"]['node'])}#'indel': 0, 'insertion': 0, 'deletion': 0, 'duplication': 0, 'single nucleotide variant': 0}
emphasizeInfoArrayWithWeights = []
searchResultSummaryString = hit["_source"]["source"] + '-' + str(hit["_source"]["degree"])
for genehit in queryTermArray:
for item in hit["_source"]["node_list"]['node']:
if(item["name"] == genehit):
emphasizeInfoArrayWithWeights.append(item)
break
for variant_hit in hit['_source']['variant_list']['node']:
# indel, insertion, deletion, duplication, single nucleotide variant
if(upcase_first_letter(variant_hit['variant_type']) in type_counts):
type_counts[upcase_first_letter(variant_hit['variant_type'])] += 1
else:
type_counts[upcase_first_letter(variant_hit['variant_type'])] = 1
phenotype_ids = []
for phenotype_id in hit['_source']['phenotype_id_list']['node']:
ids_split = phenotype_id['name'].split(':')
if(len(ids_split) > 1):
phenotype_ids.append({ids_split[0]:ids_split[1]})
gene_network_data_items = {
'searchResultTitle': hit["_source"]["node_name"],
'hit_id': hit['_id'],
'diseaseType': '', #"[Phenotype = " + hit["_source"]["node_name"] + "]",
'clusterName': hit["_source"]["node_name"],
'searchResultSummary': searchResultSummaryString,
'searchResultScoreRank': hit["_score"],
'luceneScore': hit["_score"],
'searchResultScoreRankTitle': 'pubmed references ',
'filterValue': '0.0000000029',
'emphasizeInfoArray': emphasizeInfoArrayWithWeights,
'emphasizeInfoArrayWithWeights': emphasizeInfoArrayWithWeights,
'phenotype_ids': phenotype_ids,
'node_type_counts': type_counts,
'top5': hitCount < 5,
'hitOrder': hitCount,
'pubmedCount': 0,
'queryGenesCount': len(emphasizeInfoArrayWithWeights)
}
phenotype_network_data['items'].append(gene_network_data_items)
#==================================
# GROUP PHENOTYPE BY TARGETED GENE
#==================================
phenotype_gene_grouping = []
for phenotype_hit in phenotype_network_data['items']:
match_found = False
# After first item is already added (need to append to existing array)
for gene_loop_item in phenotype_gene_grouping:
if(len(phenotype_hit['emphasizeInfoArray']) > 0):
if(gene_loop_item['gene_name'] == phenotype_hit['emphasizeInfoArray'][0]):
gene_loop_item['searchResultTitle'].append({'phenotype_name': phenotype_hit['searchResultTitle'],
'hit_id': phenotype_hit['hit_id']})
match_found = True
# First item added
if(not match_found):
if(len(phenotype_hit['emphasizeInfoArray']) > 0):
phenotype_gene_grouping.append(
{
'gene_name': phenotype_hit['emphasizeInfoArray'][0],
'searchResultTitle': [{'phenotype_name': phenotype_hit['searchResultTitle'],
'hit_id': phenotype_hit['hit_id']}]
}
)
else:
phenotype_gene_grouping.append(
{
'gene_name': 'unknown',
'searchResultTitle': [{'phenotype_name': phenotype_hit['searchResultTitle'],
'hit_id': phenotype_hit['hit_id']}]
}
)
for phenotype_gene_no_count_item in phenotype_gene_grouping:
phenotype_gene_no_count_item['gene_count'] = len(phenotype_gene_no_count_item['searchResultTitle'])
#drug_gene_dumped = dumps(drug_gene_grouping)
phenotype_network_data['grouped_items'] = phenotype_gene_grouping
return [phenotype_network_data]
def get_cosmic_searchx(queryTerms, pageNumber):
hitCount = 0
from_page = (pageNumber - 1) * 50
if(from_page < 0):
from_page = 0
phenotype_network_data = {
'searchGroupTitle': 'Phenotypes',
'clusterNodeName': "",
'searchTab': 'PHENOTYPES',
'items': [],
'geneSuperList': [],
'geneScoreRangeMax': '100',
'geneScoreRangeMin': '5',
'geneScoreRangeStep': '0.1'
}
should_match = []
must_match = []
queryTermArray = queryTerms.split(',')
for queryTerm in queryTermArray:
should_match.append({"match": {"node_list.node.name": queryTerm}})
search_body = {
'sort' : [
'_score'
],
'query': {
'bool': {
'should': should_match
}
},
'from': from_page,
'size': 50
}
result = es.search(
index = 'conditions',
doc_type = 'conditions_clinvar',
body = search_body
)
print("Got %d Hits:" % result['hits']['total'])
if(result['hits']['total'] < 1):
print 'no results'
gene_network_data_items = {
'searchResultTitle': 'No Results',
'hit_id': 'N/A',
'diseaseType': "",
'clusterName': 'No Results',
'searchResultSummary': 'No Results',
'searchResultScoreRank': '0',
'luceneScore': '0',
'searchResultScoreRankTitle': '',
'filterValue': '0.0000000029',
'emphasizeInfoArray': [],
'emphasizeInfoArrayWithWeights': [],
'top5': 'true',
'hitOrder': '0',
'pubmedCount': 0
}
| [
" phenotype_network_data['items'].append(gene_network_data_items)"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
import re
import logging
import xmltodict
from share.transform.chain import ChainTransformer, ctx, links as tools
from share.transform.chain.exceptions import InvalidIRI
from share.transform.chain.links import GuessAgentTypeLink
from share.transform.chain.parsers import Parser
from share.transform.chain.utils import force_text
from share.transform.chain.utils import oai_allowed_by_sets
logger = logging.getLogger(__name__)
def get_list(dct, key):
val = dct.get(key, [])
return val if isinstance(val, list) else [val]
#### Identifiers ####
class MODSWorkIdentifier(Parser):
schema = 'WorkIdentifier'
uri = tools.RunPython(force_text, ctx)
class Extra:
identifier_type = tools.Try(ctx['@type'])
class MODSAgentIdentifier(Parser):
schema = 'AgentIdentifier'
uri = ctx
#### Agents ####
class AffiliatedAgent(Parser):
schema = tools.GuessAgentType(ctx, default='organization')
name = ctx
class IsAffiliatedWith(Parser):
related = tools.Delegate(AffiliatedAgent, ctx)
class MODSAgent(Parser):
schema = tools.RunPython('get_agent_schema', ctx)
name = tools.OneOf(
tools.RunPython(force_text, ctx['mods:displayForm']),
tools.RunPython('squash_name_parts', ctx)
)
related_agents = tools.Map(tools.Delegate(IsAffiliatedWith), tools.Concat(tools.Try(
tools.Filter(lambda x: bool(x), tools.RunPython(force_text, ctx['mods:affiliation']))
)))
identifiers = tools.Map(
tools.Delegate(MODSAgentIdentifier),
tools.Unique(tools.Map(
tools.Try(tools.IRI(), exceptions=(InvalidIRI, )),
tools.Map(
tools.RunPython(force_text),
tools.Filter(
lambda obj: 'invalid' not in obj,
tools.Try(ctx['mods:nameIdentifier']),
)
)
))
)
class Extra:
name_type = tools.Try(ctx['@type'])
name_part = tools.Try(ctx['mods:namePart'])
affiliation = tools.Try(ctx['mods:affiliation'])
description = tools.Try(ctx['mods:description'])
display_form = tools.Try(ctx['mods:displayForm'])
etal = tools.Try(ctx['mods:etal'])
name_identifier = tools.Try(ctx['mods:nameIdentifier'])
def squash_name_parts(self, name):
name_parts = get_list(name, 'mods:namePart')
return ' '.join([force_text(n) for n in name_parts])
def get_agent_schema(self, obj):
name_type = obj.get('@type')
if name_type == 'personal':
return 'person'
if name_type == 'conference':
return 'organization'
# TODO SHARE-718
# if name_type == 'family':
# return 'family'
if name_type == 'corporate':
return GuessAgentTypeLink(default='organization').execute(self.squash_name_parts(obj))
return GuessAgentTypeLink().execute(self.squash_name_parts(obj))
class MODSPersonSplitName(MODSAgent):
schema = 'person'
name = None
family_name = tools.RunPython('get_name_part', ctx, 'family')
given_name = tools.RunPython('get_name_part', ctx, 'given')
suffix = tools.RunPython('get_name_part', ctx, 'termsOfAddress')
def get_name_part(self, obj, type):
name_parts = get_list(obj, 'mods:namePart')
return ' '.join([force_text(n) for n in name_parts if n.get('@type') == type])
class MODSSimpleAgent(Parser):
schema = tools.GuessAgentType(ctx, default='organization')
name = ctx
class MODSSimplePublisher(Parser):
schema = 'Publisher'
agent = tools.Delegate(MODSSimpleAgent, ctx)
#### Tags/Subjects ####
class MODSSubject(Parser):
schema = 'Subject'
name = ctx
class MODSThroughSubjects(Parser):
schema = 'ThroughSubjects'
subject = tools.Delegate(MODSSubject, ctx)
class MODSTag(Parser):
schema = 'Tag'
name = ctx
class MODSThroughTags(Parser):
schema = 'ThroughTags'
tag = tools.Delegate(MODSTag, ctx)
#### Work Relations ####
RELATION_MAP = {
# 'preceding':
# 'succeeding':
'original': 'IsDerivedFrom',
'host': 'IsPartOf',
'constituent': 'IsPartOf',
'series': 'IsPartOf',
# 'otherVersion':
# 'otherFormat':
'isReferencedBy': 'References',
'references': 'References',
'reviewOf': 'Reviews',
}
REVERSE_RELATIONS = {
'isReferencedBy',
'constituent',
}
# Finds the generated subclass of MODSCreativeWork
def related_work_parser(_):
return type(next(p for p in ctx.parsers if isinstance(p, MODSCreativeWork)))
def map_relation_type(obj):
return RELATION_MAP.get(obj['@type'], 'WorkRelation')
class MODSReverseWorkRelation(Parser):
schema = tools.RunPython(map_relation_type)
subject = tools.Delegate(related_work_parser, ctx)
class MODSWorkRelation(Parser):
schema = tools.RunPython(map_relation_type)
related = tools.Delegate(related_work_parser, ctx)
def work_relation_parser(obj):
if obj['@type'] in REVERSE_RELATIONS:
return MODSReverseWorkRelation
return MODSWorkRelation
#### Agent-work relations ####
def agent_parser(name):
name_parts = get_list(name, 'mods:namePart')
split_name = any(isinstance(n, dict) and n.get('@type') in {'given', 'family'} for n in name_parts)
return MODSPersonSplitName if split_name else MODSAgent
class MODSAgentWorkRelation(Parser):
schema = 'AgentWorkRelation'
agent = tools.Delegate(agent_parser, ctx)
cited_as = tools.RunPython(force_text, tools.Try(ctx['mods:displayForm']))
class MODSHost(MODSAgentWorkRelation):
schema = 'Host'
class MODSFunder(MODSAgentWorkRelation):
schema = 'Funder'
class MODSContributor(MODSAgentWorkRelation):
schema = 'Contributor'
class MODSCreator(MODSContributor):
schema = 'Creator'
order_cited = ctx('index')
class MODSPublisher(MODSAgentWorkRelation):
schema = 'Publisher'
#### Works ####
class MODSCreativeWork(Parser):
default_type = 'CreativeWork'
type_map = None
role_map = None
schema = tools.RunPython(
'get_schema',
tools.OneOf(
tools.RunPython(force_text, ctx['mods:genre']),
tools.Static(None)
)
)
title = tools.RunPython('join_title_info', ctx)
# Abstracts have the optional attribute "shareable". Don't bother checking for it, because
# abstracts that are not shareable should not have been shared with SHARE.
description = tools.Join(tools.RunPython(force_text, tools.Try(ctx['mods:abstract']), '\n'))
identifiers = tools.Map(
tools.Delegate(MODSWorkIdentifier),
tools.Filter(
lambda obj: 'invalid' not in obj,
tools.Concat( | [
" tools.Try(ctx['mods:identifier']),"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for expression_generalization_metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
from neural_guided_symbolic_regression.utils import expression_generalization_metrics
class ExpressionGeneralizationMetricsTest(parameterized.TestCase):
def test_combine_list_values_in_dict(self):
expression_dict = {'x + 1': ['x + 1'],
'1': ['1', '(1)', '((1))']}
all_expressions = (
expression_generalization_metrics.combine_list_values_in_dict(
expression_dict))
self.assertCountEqual(all_expressions, ['x + 1', '1', '(1)', '((1))'])
@parameterized.parameters([
# 'x + 1' and '(x + 1)' should be semantically equivalent.
({'1': ['1'], 'x': ['x'], 'x + 1': ['x + 1'], 'x - 1': ['x - 1']},
{'1': ['1'], 'x': ['x'], 'x + 1': ['(x + 1)'], 'x - 1': ['(x - 1)']},
False,
['1', 'x'], ['x + 1', 'x - 1'],
['1', 'x', 'x + 1', 'x - 1'], []),
# All expressions are unseen.
({'2*x + 1': ['2*x + 1'], '3*x + 1': ['3*x + 1']},
{'1': ['1'], 'x': ['x']},
False,
[], ['2*x + 1', '3*x + 1'],
[], ['2*x + 1', '3*x + 1']),
# One additional expression in training_expressions should not affect the
# result.
({'1': ['1'], 'x': ['x']},
{'1': ['1'], 'x': ['x'], 'x + 1': ['(x + 1)']},
False,
['1', 'x'], [],
['1', 'x'], []),
# When training_expressions is empty.
({'1': ['1'], 'x': ['x']},
{},
False,
[], ['1', 'x'],
[], ['1', 'x']),
# When one simplified expression has multiple expression equivalences.
({'1': ['1', '1', '(1)', '((1))']},
{'x': ['x']},
False,
[], ['1', '1', '(1)', '((1))'],
[], ['1', '1', '(1)', '((1))']),
# When generated_expressions contains duplicates.
({'x': ['x', 'x', 'x']},
{'x': ['x']},
False,
['x', 'x', 'x'], [],
['x', 'x', 'x'], []),
# When all generated_expressions are syntactic novelty but not semantic
# novelty.
({'1': ['1', '(1)']},
{'1': ['((1))']},
False,
[], ['1', '(1)'],
['1', '(1)'], []),
# When generated_expressions and training_expressions are the same.
({'x': ['((x))']},
{'x': ['((x))']},
False,
['((x))'], [],
['((x))'], []),
# When sympy.simplify makes a mistake in computing simplified expressions
# for generated_expressions.
({'(x)': ['((x))']},
{'x': ['((x))']},
False,
['((x))'], [],
['((x))'], []),
# Test whether deduplicate works.
({'1': ['1', '1', '(1)', '(1)']},
{'x': ['x']},
True,
[], ['1', '(1)'],
[], ['1']),
# Test whether deduplicate works.
({'1': ['1', '1', '(1)', '(1)']},
{'1': ['1']},
True,
['1', '1'], ['(1)'],
['1', '1', '(1)', '(1)'], []),
])
def test_get_seen_and_unseen_expressions(
self,
generated_expressions,
training_expressions,
deduplicate_unseen,
expected_syntactic_seen_expressions,
expected_syntactic_unseen_expressions,
expected_semantic_seen_expressions,
expected_semantic_unseen_expressions):
seen_and_unseen_expressions = (
expression_generalization_metrics.get_seen_and_unseen_expressions(
generated_expressions, training_expressions, deduplicate_unseen))
# The ordering of the expressions does not matter.
self.assertCountEqual(
seen_and_unseen_expressions.syntactic_novelty[0],
expected_syntactic_seen_expressions)
self.assertCountEqual(
seen_and_unseen_expressions.syntactic_novelty[1],
expected_syntactic_unseen_expressions)
self.assertCountEqual(
seen_and_unseen_expressions.semantic_novelty[0],
expected_semantic_seen_expressions)
self.assertCountEqual(
seen_and_unseen_expressions.semantic_novelty[1],
expected_semantic_unseen_expressions)
@parameterized.parameters([
(['x + 1', 'x - 1'],
['1', 'x'],
expression_generalization_metrics.NoveltySummary(
num_seen=2,
num_unseen=2,
novelty_rate=0.5)),
(['x + 1', 'x - 1'],
[],
expression_generalization_metrics.NoveltySummary(
num_seen=2,
num_unseen=0,
novelty_rate=0)),
([],
['1', 'x'],
expression_generalization_metrics.NoveltySummary(
num_seen=0,
num_unseen=2,
novelty_rate=1)),
# With replicates.
(['x + 1', 'x - 1'],
['1', '1', 'x'],
expression_generalization_metrics.NoveltySummary(
num_seen=2,
num_unseen=3,
novelty_rate=0.6)),
])
def test_get_novelty_rate(
self,
seen_expressions,
unseen_expressions,
expected):
result = expression_generalization_metrics.get_novelty_rate(
seen_expressions, unseen_expressions)
self.assertEqual(result, expected)
| [
" def test_get_novelty_rate_raises(self):"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper classes for tensor shape inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
from tensorflow.core.framework import tensor_shape_pb2
class Dimension(object):
"""Represents the value of one dimension in a TensorShape."""
def __init__(self, value):
"""Creates a new Dimension with the given value."""
if value is None:
self._value = None
else:
self._value = int(value)
if self._value < 0:
raise ValueError("Dimension %d must be >= 0" % self._value)
def __repr__(self):
return "Dimension(%s)" % repr(self._value)
def __eq__(self, other):
"""Returns true if `other` has the same known value as this Dimension."""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
return self._value == other.value
def __ne__(self, other):
"""Returns true if `other` has a different known value from `self`."""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
return self._value != other.value
def __int__(self):
return self._value
def __index__(self):
# Allow use in Python 3 range
return self._value
@property
def value(self):
"""The value of this dimension, or None if it is unknown."""
return self._value
def is_compatible_with(self, other):
"""Returns true if `other` is compatible with this Dimension.
Two known Dimensions are compatible if they have the same value.
An unknown Dimension is compatible with all other Dimensions.
Args:
other: Another Dimension.
Returns:
True if this Dimension and `other` are compatible.
"""
other = as_dimension(other)
return (self._value is None
or other.value is None
or self._value == other.value)
def assert_is_compatible_with(self, other):
"""Raises an exception if `other` is not compatible with this Dimension.
Args:
other: Another Dimension.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
if not self.is_compatible_with(other):
raise ValueError("Dimensions %s and %s are not compatible"
% (self, other))
def merge_with(self, other):
"""Returns a Dimension that combines the information in `self` and `other`.
Dimensions are combined as follows:
Dimension(n) .merge_with(Dimension(n)) == Dimension(n)
Dimension(n) .merge_with(Dimension(None)) == Dimension(n)
Dimension(None).merge_with(Dimension(n)) == Dimension(n)
Dimension(None).merge_with(Dimension(None)) == Dimension(None)
Dimension(n) .merge_with(Dimension(m)) raises ValueError for n != m
Args:
other: Another Dimension.
Returns:
A Dimension containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
other = as_dimension(other)
self.assert_is_compatible_with(other)
if self._value is None:
return Dimension(other.value)
else:
return Dimension(self._value)
def __add__(self, other):
"""Returns the sum of `self` and `other`.
Dimensions are summed as follows:
Dimension(m) + Dimension(n) == Dimension(m + n)
Dimension(m) + Dimension(None) == Dimension(None)
Dimension(None) + Dimension(n) == Dimension(None)
Dimension(None) + Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value + other.value)
def __sub__(self, other):
"""Returns the subtraction of `other` from `self`.
Dimensions are subtracted as follows:
Dimension(m) - Dimension(n) == Dimension(m - n)
Dimension(m) - Dimension(None) == Dimension(None)
Dimension(None) - Dimension(n) == Dimension(None)
Dimension(None) - Dimension(None) == Dimension(None)
Args:
other: Another Dimension.
Returns:
A Dimension whose value is the subtraction of sum of `other` from `self`.
"""
other = as_dimension(other)
if self._value is None or other.value is None: | [
" return Dimension(None)"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
"""Backend for distributed parameter evaluation."""
import os
import shutil
from psyrun.backend.base import Backend, JobSourceFile
from psyrun.jobs import Job, JobChain, JobArray
from psyrun.pspace import dict_concat, missing, Param
from psyrun.mapper import map_pspace_hdd_backed
from psyrun.store import DefaultStore
from psyrun.utils.doc import inherit_docs
@inherit_docs
class DistributeBackend(Backend):
"""Create subtasks for distributed parameter evaluation.
This will create one tasks that splits the parameter space in a number of
equal batches (at most *max_jobs*, but with at least *min_items* for each
batch). After processing all batches the results will be merged into a
single file.
This is similar to map-reduce processing.
Parameters
----------
task : `TaskDef`
Task definition to create subtasks for.
"""
@property
def resultfile(self):
"""File in which the results will be stored."""
if self.task.resultfile:
return self.task.resultfile
else:
return os.path.join(
self.workdir, 'result' + self.task.store.ext)
@property
def pspace_file(self):
"""File that will store the input parameters space."""
return os.path.join(self.workdir, 'pspace' + self.task.store.ext)
def _try_mv_to_out(self, filename):
try:
os.rename(
os.path.join(self.workdir, filename),
os.path.join(self.workdir, 'out', 'pre' + self.task.store.ext))
return True
except OSError:
return False
def create_job(self, cont=False):
if cont:
outdir = os.path.join(self.workdir, 'out')
if not self._try_mv_to_out('result' + self.task.store.ext):
Splitter.merge(
outdir, os.path.join(outdir, 'pre' + self.task.store.ext))
for filename in os.listdir(outdir):
if not filename.startswith('pre'):
os.remove(os.path.join(outdir, filename))
pspace = self.get_missing()
try:
indir = os.path.join(self.workdir, 'in')
shutil.rmtree(indir)
except OSError:
pass
else:
pspace = self.task.pspace
self.task.store.save(self.pspace_file, pspace.build())
splitter = Splitter(
self.workdir, pspace, self.task.max_jobs, self.task.min_items,
store=self.task.store)
split = self.create_split_job(splitter)
process = self.create_process_job(splitter)
merge = self.create_merge_job(splitter)
return JobChain(self.task.name, [split, process, merge])
def create_split_job(self, splitter):
code = '''
from psyrun.backend.distribute import Splitter
from psyrun.pspace import Param
pspace = Param(**task.store.load({pspace!r}))
Splitter(
{workdir!r}, pspace, {max_jobs!r}, {min_items!r},
store=task.store).split()
'''.format(
pspace=self.pspace_file,
workdir=splitter.workdir, max_jobs=self.task.max_jobs,
min_items=self.task.min_items)
file_dep = [os.path.join(os.path.dirname(self.task.path), f)
for f in self.task.file_dep]
return Job(
'split', self.submit_code, {'code': code},
[self.task.path] + file_dep,
[f for f, _ in splitter.iter_in_out_files()])
def create_process_job(self, splitter):
source_file = JobSourceFile(
os.path.join(self.workdir, self.task.name + ':process.py'),
self.task,
'''
import sys
from psyrun.backend.distribute import Worker
def execute(*args, **kwargs):
return task.execute(*args, **kwargs)
if __name__ == '__main__':
Worker(
int(sys.argv[3]), store=task.store,
exclude_from_result=task.exclude_from_result).start(
execute, sys.argv[1], sys.argv[2], pool_size={pool_size},
setup_fn=task.setup)
'''.format(pool_size=self.task.pool_size))
infile = os.path.join(splitter.indir, '%a' + splitter.store.ext)
outfile = os.path.join(splitter.outdir, '%a' + splitter.store.ext)
return JobArray(
splitter.n_splits, 'process', self.submit_array, self.submit_file,
{'job_source_file': source_file, 'args': [infile, outfile, '%a']},
[infile], [outfile])
def create_merge_job(self, splitter):
code = '''
from psyrun.backend.distribute import Splitter
Splitter.merge({outdir!r}, {filename!r}, append=False, store=task.store)
'''.format(outdir=splitter.outdir, filename=self.resultfile)
return Job(
'merge', self.submit_code, {'code': code},
[f for _, f in splitter.iter_in_out_files()], [self.resultfile])
def get_missing(self):
pspace = self.task.pspace
try:
missing_items = missing(
pspace, Param(**self.task.store.load(self.resultfile)))
except (IOError, OSError):
missing_items = pspace
try:
for filename in os.listdir(os.path.join(self.workdir, 'out')):
if os.path.splitext(filename)[1] != self.task.store.ext:
continue
outfile = os.path.join(self.workdir, 'out', filename)
try:
missing_items = missing(
missing_items,
Param(**self.task.store.load(outfile)))
except (IOError, OSError):
pass
except (IOError, OSError):
pass
return missing_items
def get_queued(self):
scheduler = self.task.scheduler
status = [scheduler.get_status(j) for j in scheduler.get_jobs()]
for s in status:
if s.status != 'D' and self.task.name + ':split' in s.name:
return Param(**self.task.store.load(self.pspace_file))
queued = Param()
for s in status:
if s.status != 'D' and self.task.name + ':process' in s.name:
num = s.name.rsplit(':', 1)[-1]
filename = os.path.join(
self.workdir, 'in', num + self.task.store.ext)
queued += Param(**self.task.store.load(filename))
return queued
def get_failed(self):
scheduler = self.task.scheduler
status = (scheduler.get_status(j) for j in scheduler.get_jobs())
queued = [s.name for s in status if s.status != 'D']
indir = os.path.join(self.workdir, 'in')
if (not os.path.exists(indir) or
self.task.name + ':split' in queued):
return []
elif not os.path.exists(indir) or len(os.listdir(indir)) == 0:
return [self.task.name + ':split']
failed = []
for filename in os.listdir(indir):
if not os.path.exists(os.path.join(self.workdir, 'out', filename)):
jobname = self.task.name + ':process:' + os.path.splitext(
filename)[0]
if jobname not in queued:
failed.append(jobname)
if len(failed) == 0:
if not os.path.exists(self.resultfile):
return [self.task.name + ':merge']
return failed
class Splitter(object):
"""Split a parameter space into multiple input files and merge results
after processing.
Parameters
----------
workdir : str
Working directory to create input files in and read output files from.
pspace : `ParameterSpace`
Parameter space to split up.
max_splits : int, optional
Maximum number of splits to perform.
min_items : int, optional
Minimum number of parameter sets in each split.
store : `Store`, optional
Input/output backend.
Attributes
----------
indir : str
Directory to store input files.
max_splits : int
Maximum number of splits to perform.
min_items : int
Minimum number of parameter sets in each split.
outdir : str
Directory to store output files.
pspace : `ParameterSpace`
Parameter space to split up.
store : `Store`
Input/output backend.
workdir : str
Working directory to create input files in and read output files from.
"""
def __init__(
self, workdir, pspace, max_splits=64, min_items=4,
store=DefaultStore()):
self.workdir = workdir
self.indir = self._get_indir(workdir)
self.outdir = self._get_outdir(workdir)
if not os.path.exists(self.indir):
os.makedirs(self.indir)
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
self.pspace = pspace
self.max_splits = max_splits
self.min_items = min_items
self.store = store
@property
def n_splits(self):
"""Number of total splits that will be generated."""
n_splits = (len(self.pspace) - 1) // self.min_items + 1
if self.max_splits is not None:
n_splits = min(self.max_splits, n_splits)
return n_splits
def split(self):
"""Perform splitting of parameters space and save input files for
processing."""
items_remaining = len(self.pspace)
param_iter = self.pspace.iterate()
for i, filename in enumerate(self._iter_filenames()):
split_size = self.min_items
if self.max_splits is not None:
split_size = max(
split_size, items_remaining // (self.max_splits - i))
items_remaining -= split_size
block = dict_concat(
[row for row in self._iter_n(param_iter, split_size)])
self.store.save(os.path.join(self.indir, filename), block)
@classmethod
def merge(cls, outdir, merged_filename, append=True, store=DefaultStore()):
"""Merge processed files together.
Parameters
----------
outdir : str
Directory with the output files.
merged_filename : str
Filename of file to save with the merged results.
append : bool, optional
If True the merged data will be appended, otherwise the file
will be overwritten with the merged data.
store : `Store`, optional
Input/output backend.
"""
if not append:
store.save(merged_filename, {})
for filename in os.listdir(outdir):
if os.path.splitext(filename)[1] != store.ext:
continue
infile = os.path.join(outdir, filename)
store.append(merged_filename, store.load(infile))
def iter_in_out_files(self):
"""Return generator returning tuples of corresponding input and output
filenames."""
return ((os.path.join(self.indir, f), os.path.join(self.outdir, f))
for f in self._iter_filenames())
def _iter_filenames(self):
return (str(i) + self.store.ext for i in range(self.n_splits))
@staticmethod
def _iter_n(it, n):
for _ in range(n):
yield next(it)
@classmethod
def _get_indir(cls, workdir):
return os.path.join(workdir, 'in')
@classmethod
def _get_outdir(cls, workdir):
return os.path.join(workdir, 'out')
class Worker(object):
"""Maps a function to the parameter space loaded from a file and writes the
result to an output file.
Parameters
----------
proc_id : int
Worker ID.
store : `Store`, optional
Input/output backend.
exclude_from_result : sequence, optional
Keys of items to exclude from the result.
Attributes
----------
proc_id : int
Worker ID. | [
" store : `Store`"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#!/usr/bin/env python3
import sys
import os.path
import glob
import re
import shlex
import subprocess
import errno
import string
from itertools import product
from functools import lru_cache
from time import time
from hashlib import md5
DEPS = {}
fmt = string.Formatter()
class Error(Exception): pass
def patsub(frompat, topat, items):
frompat = frompat.replace('%', '(.+?)')
topat = topat.replace('%', r'\1')
return [re.sub(frompat, topat, it) for it in items]
def allfiles(root):
result = []
for r, _, files in os.walk(root):
result.extend(os.path.join(r, it) for it in files)
return result
@lru_cache(None)
def makedirs(dirname):
os.makedirs(dirname, exist_ok=True)
@lru_cache(None)
def get_mtime(fname):
try:
return os.path.getmtime(fname)
except OSError as e: # pragma: no cover
if e.errno != errno.ENOENT:
raise
return 0
class fset(dict):
def __init__(self, match, frompat, topat):
if isinstance(match, (list, tuple, set)):
self.src = match
else:
self.src = glob.glob(match, recursive=True)
self.dest = patsub(frompat, topat, self.src)
dict.__init__(self, zip(self.dest, self.src))
assert not (set(self.src) & set(self.dest)), 'Source and dest files have similar items'
class Dep(object):
def __init__(self):
self.reqs = []
self.deps = []
self.order = []
self.rule = None
self.phony = False
def iter_reqs(self):
for r in self.reqs:
yield r
for r in self.deps:
yield r
for r in self.order:
yield r
@lru_cache(None)
def parse_cmd(cmd):
parts = shlex.split(cmd)
result = []
for p in parts:
flist = []
elist = []
for prefix, expr, _spec, _cnv in fmt.parse(p):
flist.append(prefix)
if expr:
flist.append('{}')
elist.append(compile(expr, expr, 'eval'))
result.append((''.join(flist), elist))
return result
def eval_cmd(cmd, globs=None, locs=None):
result = []
for f, elist in parse_cmd(cmd):
if not elist:
result.append(f)
continue
vals = []
for e in elist:
vals.append(flatten(eval(e, globs, locs)))
for va in product(*vals):
result.append(f.format(*va))
return result
def execute(cmd, globs=None, locs=None, depth=1):
if not globs and not locs:
frame = sys._getframe(depth)
globs = frame.f_globals
locs = frame.f_locals
cmd = eval_cmd(cmd, globs, locs)
subprocess.check_call(cmd)
class Rule(object):
def __init__(self, cmd, params, depth=1):
if type(cmd) == str:
cmd = [cmd]
self.cmd = cmd
self.params = params or {}
self.globals = sys._getframe(depth).f_globals
def execute(self, target, dep):
if callable(self.cmd):
print(self.cmd.__name__, dep.reqs, '->', target)
self.cmd(self, target, dep)
else:
l = {'target': target, 'reqs': dep.reqs,
'req': dep.reqs and dep.reqs[0]}
l.update(self.params)
for cmd in self.cmd:
ecmd = eval_cmd(cmd, self.globals, l)
print(' '.join(map(shlex.quote, ecmd)))
subprocess.check_call(ecmd)
def get_hash(self, target, dep):
if callable(self.cmd):
data = self.cmd.__code__.co_code
else:
l = {'target': target, 'reqs': dep.reqs,
'req': dep.reqs and dep.reqs[0]}
l.update(self.params)
data = []
for cmd in self.cmd:
data.append(' '.join(eval_cmd(cmd, self.globals, l)))
data = '|'.join(data).encode()
return md5(data).hexdigest()
class RuleHolder(object):
def __init__(self, tmap, params, depth):
self.tmap = tmap
self.params = params
self.depth = depth
def __call__(self, fn):
rule = Rule(fn, self.params, self.depth+1) | [
" for t in self.tmap:"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
import os
import operator
import tensorflow as tf
import models
import time
import numpy as np
from datetime import datetime
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_string('train_dir', './multigpu-trained',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('batch_size', 1024, """Number of images to process in a batch.""")
tf.app.flags.DEFINE_integer('epochs', 40, """Max epochs for training.""")
tf.app.flags.DEFINE_integer('log_step', 10, """Log step""")
tf.app.flags.DEFINE_integer('eval_step', 1, """Evaluate step of epoch""")
tf.app.flags.DEFINE_string('device_ids', '', """Device ids. split by comma, e.g. 0,1""")
#tf.app.flags.DEFINE_string('data_dir', '/home/comp/csshshi/data/tensorflow/MNIST_data/',
tf.app.flags.DEFINE_string('data_dir', os.environ['HOME']+'/data/tensorflow/MNIST_data/',
#tf.app.flags.DEFINE_string('data_dir', '/home/comp/pengfeixu/Data/tensorflow/MNIST_data/',
"""Path to the data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
tf.app.flags.DEFINE_boolean('log_device_placement', True,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_integer('num_gpus', 2, """How many GPUs to use.""")
tf.app.flags.DEFINE_string('local_ps_device', 'GPU', """Local parameter server GPU if gpus are peered or CPU otherwise try both.""")
tf.app.flags.DEFINE_boolean('use_dataset', False,
"""Whether to use datasets vs. feed_dict.""")
tf.app.flags.DEFINE_boolean('xla', False,
"""True to use XLA, which has to be compiled in.""")
EPOCH_SIZE = 60000
TEST_SIZE = 10000
def createFakeData(count, featureDim, labelDim):
features = np.random.randn(count, featureDim)
labels = np.random.randint(0, labelDim, size=(count, 1))
return features, labels
features, labels = createFakeData(1024, 32*32*3, 10)
def getFakeMinibatch(minibatchSize, labelDim):
feat = features[:minibatchSize]
l = labels[:minibatchSize]
lab = np.zeros((minibatchSize, labelDim))
for i in range(lab.shape[0]):
lab[i][l[i]] = 1
return feat, lab
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
def get_real_batch_data(batch_size, label_dim):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
return batch_xs, batch_ys
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for single_grads in zip(*tower_grads):
grads = [g for g, _ in single_grads]
grad = tf.add_n(grads)
grad = tf.multiply(grad, 1.0/len(grads))
v = single_grads[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train(model='fcn5'):
config = tf.ConfigProto(allow_soft_placement=True,log_device_placement=FLAGS.log_device_placement)
if FLAGS.xla:
# Turns on XLA. XLA is not included in the standard build. For single GPU this shows ~5% improvement
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
with tf.Graph().as_default(), tf.device("/" + FLAGS.local_ps_device + ":0"):
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
device_ids = FLAGS.device_ids
if not device_ids:
device_ids = [str(i) for i in range(FLAGS.num_gpus)]
else:
device_ids = device_ids.split(',')
lr = 0.05
#optimizer = tf.train.GradientDescentOptimizer(lr)
optimizer = tf.train.MomentumOptimizer(lr, 0.9)
def assign_to_device(device, ps_device=FLAGS.local_ps_device):
worker_device = device
ps_sizes = [0]
if FLAGS.local_ps_device.lower == 'gpu':
ps_sizes = [0] * FLAGS.num_gpus
def _assign(op):
if op.device:
return op.device
if op.type not in ['Variable', 'VariableV2']:
return worker_device
device_index, _ = min(enumerate(
ps_sizes), key=operator.itemgetter(1))
device_name = '/' + FLAGS.local_ps_device +':' + str(device_index)
var_size = op.outputs[0].get_shape().num_elements()
ps_sizes[device_index] += var_size
return device_name
return _assign
images = None
labels = None
if FLAGS.use_dataset:
with tf.device('/CPU:0'):
d_features = mnist.train.images
d_labels = mnist.train.labels
dataset = tf.contrib.data.Dataset.from_tensor_slices((d_features, d_labels))
dataset = dataset.shuffle(buffer_size=60000)
dataset = dataset.repeat()
dataset = dataset.batch(FLAGS.batch_size)
# Trick to get datasets to buffer the next epoch. This is needed because
# the data loading is occuring outside DataSets in python. Normally preprocessing
# would occur in DataSets and this odd looking line is not needed.
dataset = dataset.map(lambda x,y:(x,y),
num_threads=FLAGS.num_gpus,
output_buffer_size=FLAGS.num_gpus)
iterator = dataset.make_initializable_iterator()
images,labels = iterator.get_next()
tower_grads = []
feed_vars = []
average_loss_tensor = []
reuse_variables = False
accuracy = None
for i in xrange(FLAGS.num_gpus):
with tf.device(assign_to_device('/gpu:%s'%device_ids[i])):
with tf.name_scope('%s_%s' % ('TOWER', device_ids[i])) as scope:
if not FLAGS.use_dataset:
feature_dim = models.feature_dim
label_dim = models.label_dim
images = tf.placeholder(tf.float32, [None, feature_dim], name='images')
labels = tf.placeholder(tf.int64, [None, label_dim], name='labels')
feed_vars.append((images, labels))
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
logits = models.model_fcn5(images)
if i == 0:
# Prediction only on GPU:0
predictionCorrectness = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(predictionCorrectness, "float"))
loss = models.loss(logits, labels)
reuse_variables = True
average_loss_tensor.append(loss)
grads = optimizer.compute_gradients(loss)
tower_grads.append(grads)
grads = average_gradients(tower_grads)
apply_gradient_op = optimizer.apply_gradients(grads, global_step=global_step)
train_op = apply_gradient_op
average_op = tf.reduce_mean(average_loss_tensor)
saver = tf.train.Saver(tf.global_variables())
init = tf.global_variables_initializer()
sess = tf.Session(config=config)
sess.run(init)
if FLAGS.use_dataset:
sess.run(iterator.initializer)
real_batch_size = FLAGS.batch_size * FLAGS.num_gpus
num_batches_per_epoch = int((EPOCH_SIZE + real_batch_size - 1)/ real_batch_size)
iterations = FLAGS.epochs * num_batches_per_epoch
average_batch_time = 0.0
epochs_info = []
step = 0 | [
" average_loss = 0.0"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
"""This module contains a collection of unit tests which
validate the ..tor_async_google_pubsub module.
"""
import httplib
import unittest
import uuid
import mock
from . import AsyncHTTPClientPatcher
from .. import tor_async_google
from .. import tor_async_google_pubsub
class AsyncActionTestCase(unittest.TestCase):
pass
class AsyncGeneratePubSubAccessTokenTestCase(unittest.TestCase):
def test_ctr_without_async_state(self):
credentials_filename = uuid.uuid4().hex
async_action = tor_async_google_pubsub.AsyncGeneratePubSubAccessToken(
credentials_filename)
self.assertEqual(async_action.credentials_filename, credentials_filename)
self.assertIsNotNone(async_action.scope)
self.assertIsNone(async_action.async_state)
def test_ctr_with_async_state(self):
credentials_filename = uuid.uuid4().hex
async_state = uuid.uuid4().hex
async_action = tor_async_google_pubsub.AsyncGeneratePubSubAccessToken(
credentials_filename,
async_state)
self.assertEqual(async_action.credentials_filename, credentials_filename)
self.assertIsNotNone(async_action.scope)
self.assertEqual(async_action.async_state, async_state)
class AsyncGetTopicTestCase(unittest.TestCase):
def test_ctr_without_async_state(self):
access_token = uuid.uuid4().hex
topic = uuid.uuid4().hex
agt = tor_async_google_pubsub.AsyncGetTopic(
access_token,
topic)
self.assertEqual(agt.access_token, access_token)
self.assertEqual(agt.topic, topic)
self.assertIsNone(agt.async_state)
self.assertIsNone(agt.get_failure_detail)
def test_ctr_with_async_state(self):
access_token = uuid.uuid4().hex
topic = uuid.uuid4().hex
async_state = uuid.uuid4().hex
agt = tor_async_google_pubsub.AsyncGetTopic(
access_token,
topic,
async_state)
self.assertEqual(agt.access_token, access_token)
self.assertEqual(agt.topic, topic)
self.assertEqual(agt.async_state, async_state)
self.assertIsNone(agt.get_failure_detail)
def test_http_get_ok(self):
response = mock.Mock(
code=httplib.OK,
headers={},
time_info={},
request_time=0.042,
request=mock.Mock(method='GET'))
with AsyncHTTPClientPatcher([response]):
access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex)
topic = uuid.uuid4().hex
agt = tor_async_google_pubsub.AsyncGetTopic(
access_token,
topic)
callback = mock.Mock()
agt.get(callback)
callback.assert_called_once_with(True, True, agt)
self.assertEqual(agt.get_failure_detail, type(agt).GFD_OK)
def test_http_get_not_found(self):
response = mock.Mock(
code=httplib.NOT_FOUND,
headers={},
time_info={},
request_time=0.042,
request=mock.Mock(method='GET'))
with AsyncHTTPClientPatcher([response]):
access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex)
topic = uuid.uuid4().hex
agt = tor_async_google_pubsub.AsyncGetTopic(
access_token,
topic)
callback = mock.Mock()
agt.get(callback)
callback.assert_called_once_with(True, False, agt)
self.assertEqual(agt.get_failure_detail, type(agt).GFD_OK)
def test_http_get_error(self):
response = mock.Mock(
code=httplib.INTERNAL_SERVER_ERROR,
headers={},
time_info={},
request_time=0.042,
request=mock.Mock(method='GET'))
with AsyncHTTPClientPatcher([response]):
access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex)
topic = uuid.uuid4().hex
agt = tor_async_google_pubsub.AsyncGetTopic(
access_token,
topic)
callback = mock.Mock()
agt.get(callback)
callback.assert_called_once_with(False, None, agt)
self.assertEqual(agt.get_failure_detail, type(agt).GFD_ERROR_GETTING_TOPIC)
class AsyncCreateTopicTestCase(unittest.TestCase):
def test_ctr_without_async_state(self):
access_token = uuid.uuid4().hex
topic = uuid.uuid4().hex
act = tor_async_google_pubsub.AsyncCreateTopic(
access_token,
topic)
self.assertEqual(act.access_token, access_token)
self.assertEqual(act.topic, topic)
self.assertIsNone(act.async_state)
self.assertIsNone(act.create_failure_detail)
def test_ctr_with_async_state(self):
access_token = uuid.uuid4().hex
topic = uuid.uuid4().hex
async_state = uuid.uuid4().hex
act = tor_async_google_pubsub.AsyncCreateTopic(
access_token,
topic,
async_state)
self.assertEqual(act.access_token, access_token)
self.assertEqual(act.topic, topic)
self.assertEqual(act.async_state, async_state)
self.assertIsNone(act.create_failure_detail)
def test_http_put_error(self):
response = mock.Mock(
code=httplib.INTERNAL_SERVER_ERROR,
headers={},
time_info={},
request_time=0.042,
request=mock.Mock(method='PUT'))
with AsyncHTTPClientPatcher([response]):
access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex)
topic = uuid.uuid4().hex
act = tor_async_google_pubsub.AsyncCreateTopic(
access_token,
topic)
callback = mock.Mock()
act.create(callback)
callback.assert_called_once_with(False, act)
self.assertEqual(act.create_failure_detail, type(act).CFD_ERROR_CREATING_TOPIC)
def test_happy_path(self):
response = mock.Mock(
code=httplib.OK,
headers={},
time_info={},
request_time=0.042,
request=mock.Mock(method='PUT'))
with AsyncHTTPClientPatcher([response]):
access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex)
topic = uuid.uuid4().hex
act = tor_async_google_pubsub.AsyncCreateTopic(
access_token,
topic)
callback = mock.Mock()
act.create(callback)
callback.assert_called_once_with(True, act)
self.assertEqual(act.create_failure_detail, type(act).CFD_OK)
class AsyncDeleteTopicTestCase(unittest.TestCase):
def test_ctr_without_async_state(self):
access_token = uuid.uuid4().hex
topic = uuid.uuid4().hex
adt = tor_async_google_pubsub.AsyncDeleteTopic(
access_token,
topic)
self.assertEqual(adt.access_token, access_token)
self.assertEqual(adt.topic, topic)
self.assertIsNone(adt.async_state)
self.assertIsNone(adt.delete_failure_detail)
def test_ctr_with_async_state(self):
access_token = uuid.uuid4().hex
topic = uuid.uuid4().hex
async_state = uuid.uuid4().hex
adt = tor_async_google_pubsub.AsyncDeleteTopic(
access_token,
topic,
async_state)
self.assertEqual(adt.access_token, access_token)
self.assertEqual(adt.topic, topic)
self.assertEqual(adt.async_state, async_state)
self.assertIsNone(adt.delete_failure_detail)
def test_http_delete_error(self):
response = mock.Mock(
code=httplib.INTERNAL_SERVER_ERROR,
headers={},
time_info={},
request_time=0.042,
request=mock.Mock(method='DELETE'))
with AsyncHTTPClientPatcher([response]):
access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex)
topic = uuid.uuid4().hex
adt = tor_async_google_pubsub.AsyncDeleteTopic(
access_token,
topic)
callback = mock.Mock()
adt.delete(callback)
callback.assert_called_once_with(False, adt)
self.assertEqual(adt.delete_failure_detail, type(adt).DFD_ERROR_DELETING_TOPIC)
def test_happy_path(self):
response = mock.Mock(
code=httplib.OK,
headers={},
time_info={},
request_time=0.042,
request=mock.Mock(method='DELETE'))
with AsyncHTTPClientPatcher([response]):
access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex)
topic = uuid.uuid4().hex
adt = tor_async_google_pubsub.AsyncDeleteTopic(
access_token,
topic)
callback = mock.Mock()
adt.delete(callback)
callback.assert_called_once_with(True, adt)
self.assertEqual(adt.delete_failure_detail, type(adt).DFD_OK)
class AsyncGetSubscriptionTopicTestCase(unittest.TestCase):
def test_ctr_without_async_state(self):
access_token = uuid.uuid4().hex
subscription = uuid.uuid4().hex
ags = tor_async_google_pubsub.AsyncGetSubscription(
access_token,
subscription)
self.assertEqual(ags.access_token, access_token)
self.assertEqual(ags.subscription, subscription)
self.assertIsNone(ags.async_state)
self.assertIsNone(ags.get_failure_detail)
def test_ctr_with_async_state(self):
access_token = uuid.uuid4().hex
subscription = uuid.uuid4().hex
async_state = uuid.uuid4().hex
ags = tor_async_google_pubsub.AsyncGetSubscription(
access_token,
subscription,
async_state)
self.assertEqual(ags.access_token, access_token)
self.assertEqual(ags.subscription, subscription)
self.assertEqual(ags.async_state, async_state)
self.assertIsNone(ags.get_failure_detail)
def test_http_get_ok(self):
response = mock.Mock(
code=httplib.OK,
headers={},
time_info={},
request_time=0.042,
request=mock.Mock(method='GET'))
with AsyncHTTPClientPatcher([response]):
access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex)
subscription = uuid.uuid4().hex
ags = tor_async_google_pubsub.AsyncGetSubscription(
access_token,
subscription)
callback = mock.Mock()
ags.get(callback)
callback.assert_called_once_with(True, True, ags)
self.assertEqual(ags.get_failure_detail, type(ags).GFD_OK)
def test_http_get_not_found(self):
response = mock.Mock(
code=httplib.NOT_FOUND,
headers={},
time_info={},
request_time=0.042,
request=mock.Mock(method='GET'))
with AsyncHTTPClientPatcher([response]):
access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex)
subscription = uuid.uuid4().hex
ags = tor_async_google_pubsub.AsyncGetSubscription(
access_token,
subscription)
callback = mock.Mock()
ags.get(callback)
callback.assert_called_once_with(True, False, ags)
self.assertEqual(ags.get_failure_detail, type(ags).GFD_OK)
def test_http_get_error(self):
response = mock.Mock(
code=httplib.INTERNAL_SERVER_ERROR,
headers={},
time_info={},
request_time=0.042,
request=mock.Mock(method='GET'))
with AsyncHTTPClientPatcher([response]):
access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex)
subscription = uuid.uuid4().hex
ags = tor_async_google_pubsub.AsyncGetSubscription(
access_token,
subscription)
callback = mock.Mock()
ags.get(callback)
callback.assert_called_once_with(False, None, ags)
self.assertEqual(ags.get_failure_detail, type(ags).GFD_ERROR_GETTING_SUBSCRIPTION)
class AsyncCreatePushSubscriptionTestCase(unittest.TestCase):
def test_ctr_without_async_state(self):
access_token = uuid.uuid4().hex
topic = uuid.uuid4().hex
subscription = uuid.uuid4().hex
ack_deadline_in_seconds = uuid.uuid4().hex
push_endpoint = uuid.uuid4().hex
acs = tor_async_google_pubsub.AsyncCreatePushSubscription(
access_token,
topic,
subscription,
ack_deadline_in_seconds,
push_endpoint)
self.assertEqual(acs.access_token, access_token)
self.assertEqual(acs.topic, topic)
self.assertEqual(acs.subscription, subscription)
self.assertEqual(acs.ack_deadline_in_seconds, ack_deadline_in_seconds)
self.assertEqual(acs.push_endpoint, push_endpoint)
self.assertIsNone(acs.async_state)
self.assertIsNone(acs.create_failure_detail)
def test_ctr_with_async_state(self):
access_token = uuid.uuid4().hex
topic = uuid.uuid4().hex
subscription = uuid.uuid4().hex
ack_deadline_in_seconds = uuid.uuid4().hex
push_endpoint = uuid.uuid4().hex
async_state = uuid.uuid4().hex
acs = tor_async_google_pubsub.AsyncCreatePushSubscription(
access_token,
topic,
subscription,
ack_deadline_in_seconds,
push_endpoint,
async_state)
self.assertEqual(acs.access_token, access_token)
self.assertEqual(acs.topic, topic)
self.assertEqual(acs.subscription, subscription)
self.assertEqual(acs.ack_deadline_in_seconds, ack_deadline_in_seconds)
self.assertEqual(acs.push_endpoint, push_endpoint)
self.assertEqual(acs.async_state, async_state)
self.assertIsNone(acs.create_failure_detail)
def test_http_error(self):
response = mock.Mock(
code=httplib.INTERNAL_SERVER_ERROR,
headers={},
time_info={},
request_time=0.042, | [
" request=mock.Mock(method='PUT'))"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
from __future__ import print_function
import sys
sys.path.insert(0, '../')
import unittest
import os
import screenTestRunner
EXPECTED_DIR = './expected/'
screenTestCases = [{
'name': 'simpleLoadAndQuit',
}, {
'name': 'tallLoadAndQuit',
'screenConfig': {
'maxX': 140,
'maxY': 60,
},
}, {
'name': 'selectFirst',
'inputs': ['f'],
}, {
'name': 'selectFirstWithDown',
'inputs': ['F'],
}, {
'name': 'selectDownSelect',
'inputs': ['f', 'j', 'f'],
}, {
'name': 'selectWithDownSelect',
'inputs': ['F', 'f'],
}, {
'name': 'selectDownSelectInverse',
'inputs': ['f', 'j', 'f', 'A'],
}, {
'name': 'selectWithDownSelectInverse',
'inputs': ['F', 'F', 'A'],
}, {
'name': 'selectTwoCommandMode',
'input': 'absoluteGitDiff.txt',
'inputs': ['f', 'j', 'f', 'c'],
'pastScreen': 3
}, {
'name': 'selectCommandWithPassedCommand',
'input': 'absoluteGitDiff.txt',
# the last key "a" is so we quit from command mode
# after seeing the warning
'withAttributes': True,
'inputs': ['f', 'c', 'a'],
'pastScreen': 1,
'args': ["-c 'git add'"]
}, {
'name': 'simpleWithAttributes',
'withAttributes': True
}, {
'name': 'simpleSelectWithAttributes',
'withAttributes': True,
'inputs': ['f', 'j'],
}, {
'name': 'simpleSelectWithColor',
'input': 'gitDiffColor.txt',
'withAttributes': True,
'inputs': ['f', 'j'],
'screenConfig': {
'maxX': 200,
'maxY': 40,
},
}, {
'name': 'gitDiffWithScroll',
'input': 'gitDiffNoStat.txt',
'inputs': ['f', 'j'],
}, {
'name': 'gitDiffWithScrollUp',
'input': 'gitLongDiff.txt',
'inputs': ['k', 'k'],
}, {
'name': 'gitDiffWithPageDown',
'input': 'gitLongDiff.txt',
'inputs': [' ', ' '],
}, {
'name': 'gitDiffWithPageDownColor',
'input': 'gitLongDiffColor.txt',
'inputs': [' ', ' '],
'withAttributes': True,
}, {
'name': 'gitDiffWithValidation',
'input': 'gitDiffSomeExist.txt',
'validateFileExists': True,
'withAttributes': True,
}, {
'name': 'longFileNames',
'input': 'longFileNames.txt',
'validateFileExists': False,
'withAttributes': False,
'screenConfig': {
'maxX': 20,
'maxY': 30,
}
}, {
'name': 'dontWipeChrome',
'input': 'gitDiffColor.txt',
'withAttributes': True,
'validatesFileExists': False,
'inputs': ['DOWN', 'f', 'f', 'f', 'UP'],
'screenConfig': {
'maxX': 201,
'maxY': 40 | [
" },"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=
"""Parallelization utility optimizer."""
__all__ = ['split_data', 'split_and_load', 'clip_global_norm',
'check_sha1', 'download']
import os
import hashlib
import warnings
try:
import requests
except ImportError:
class requests_failed_to_import(object):
pass
requests = requests_failed_to_import
import numpy as np
from .. import ndarray
def split_data(data, num_slice, batch_axis=0, even_split=True):
"""Splits an NDArray into `num_slice` slices along `batch_axis`.
Usually used for data parallelism where each slices is sent
to one device (i.e. GPU).
Parameters
----------
data : NDArray
A batch of data.
num_slice : int
Number of desired slices.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
If `True`, an error will be raised when `num_slice` does not evenly
divide `data.shape[batch_axis]`.
Returns
-------
list of NDArray
Return value is a list even if `num_slice` is 1.
"""
size = data.shape[batch_axis]
if size < num_slice:
raise ValueError(
"Too many slices for data with shape %s. Arguments are " \
"num_slice=%d and batch_axis=%d."%(str(data.shape), num_slice, batch_axis))
if even_split and size % num_slice != 0:
raise ValueError(
"data with shape %s cannot be evenly split into %d slices along axis %d. " \
"Use a batch size that's multiple of %d or set even_split=False to allow " \
"uneven partitioning of data."%(
str(data.shape), num_slice, batch_axis, num_slice))
step = size // num_slice
if batch_axis == 0:
slices = [data[i*step:(i+1)*step] if i < num_slice - 1 else data[i*step:size]
for i in range(num_slice)]
elif even_split:
slices = ndarray.split(data, num_outputs=num_slice, axis=batch_axis)
else:
slices = [ndarray.slice_axis(data, batch_axis, i*step, (i+1)*step)
if i < num_slice - 1 else
ndarray.slice_axis(data, batch_axis, i*step, size)
for i in range(num_slice)]
return slices
def split_and_load(data, ctx_list, batch_axis=0, even_split=True):
"""Splits an NDArray into `len(ctx_list)` slices along `batch_axis` and loads
each slice to one context in `ctx_list`.
Parameters
----------
data : NDArray
A batch of data.
ctx_list : list of Context
A list of Contexts.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
Returns
-------
list of NDArray
Each corresponds to a context in `ctx_list`.
"""
if not isinstance(data, ndarray.NDArray):
data = ndarray.array(data, ctx=ctx_list[0])
if len(ctx_list) == 1:
return [data.as_in_context(ctx_list[0])]
slices = split_data(data, len(ctx_list), batch_axis, even_split)
return [i.as_in_context(ctx) for i, ctx in zip(slices, ctx_list)]
def clip_global_norm(arrays, max_norm):
"""Rescales NDArrays so that the sum of their 2-norm is smaller than `max_norm`.
"""
assert len(arrays) > 0
ctx = arrays[0].context
total_norm = ndarray.add_n(*[ndarray.dot(x, x).as_in_context(ctx)
for x in (arr.reshape((-1,)) for arr in arrays)])
total_norm = ndarray.sqrt(total_norm).asscalar()
if not np.isfinite(total_norm):
warnings.warn(UserWarning('nan or inf is detected. Clipping results will be undefined.'),
stacklevel=2)
scale = max_norm / (total_norm + 1e-8)
if scale < 1.0:
for arr in arrays:
arr *= scale
return total_norm
def _indent(s_, numSpaces):
"""Indent string
"""
s = s_.split('\n')
if len(s) == 1:
return s_
first = s.pop(0)
s = [first] + [(numSpaces * ' ') + line for line in s]
s = '\n'.join(s) | [
" return s"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
import io
import os
import tempfile
import numpy as np
import pyvips
from imageio import imwrite
from PIL import Image
from histomicstk.annotations_and_masks.annotation_and_mask_utils import (
get_image_from_htk_response, get_scale_factor_and_appendStr)
from histomicstk.annotations_and_masks.annotations_to_masks_handler import \
_visualize_annotations_on_rgb
from histomicstk.annotations_and_masks.annotations_to_object_mask_handler import \
get_all_rois_from_slide_v2
from histomicstk.annotations_and_masks.masks_to_annotations_handler import \
get_annotation_documents_from_contours
from histomicstk.workflows.workflow_runner import (Slide_iterator,
Workflow_runner)
# %============================================================================
# CONSTANTS
# source: https://libvips.github.io/libvips/API/current/Examples.md.html
# source 2: https://libvips.github.io/libvips/API/current/Examples.md.html
# source 3: https://github.com/libvips/pyvips/issues/109
# source 4: https://github.com/libvips/libvips/issues/1254
# map np dtypes to vips
DTYPE_TO_FORMAT = {
'uint8': 'uchar',
'int8': 'char',
'uint16': 'ushort',
'int16': 'short',
'uint32': 'uint',
'int32': 'int',
'float32': 'float',
'float64': 'double',
'complex64': 'complex',
'complex128': 'dpcomplex',
}
# map vips formats to np dtypes
FORMAT_TO_DTYPE = {
'uchar': np.uint8,
'char': np.int8,
'ushort': np.uint16,
'short': np.int16,
'uint': np.uint32,
'int': np.int32,
'float': np.float32,
'double': np.float64,
'complex': np.complex64,
'dpcomplex': np.complex128,
}
# %============================================================================
def get_all_rois_from_folder_v2(
gc, folderid, get_all_rois_kwargs, monitor=''):
"""Get all rois in a girder folder using get_all_rois_from_slide_v2().
Parameters
----------
gc : girder_client.Girder_Client
authenticated girder client
folderid : str
girder id of folder
get_all_rois_kwargs : dict
kwargs to pass to get_all_rois_from_slide_v2()
monitor : str
monitor prefix
Returns
-------
None
"""
def _get_all_rois(slide_id, monitorPrefix, **kwargs):
sld = gc.get('/item/%s' % slide_id)
if "." not in sld['name']:
sld['name'] += "."
sldname = sld['name'][:sld['name'].find('.')].replace('/', '_#_')
return get_all_rois_from_slide_v2(
slide_id=slide_id, monitorprefix=monitorPrefix,
# encoding slide id makes things easier later
slide_name="%s_id-%s" % (sldname, slide_id),
**kwargs)
# update with params
get_all_rois_kwargs['gc'] = gc
# pull annotations for each slide in folder
workflow_runner = Workflow_runner(
slide_iterator=Slide_iterator(
gc, source_folder_id=folderid,
keep_slides=None,
),
workflow=_get_all_rois,
workflow_kwargs=get_all_rois_kwargs,
monitorPrefix=monitor
)
workflow_runner.run()
# %============================================================================
| [
"def _get_visualization_zoomout("
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Need to figure out why this only fails on travis
# pylint: disable=bad-continuation
"""Bootstraps starting a test job.
The following should already be done:
git checkout http://k8s.io/test-infra
cd $WORKSPACE
test-infra/jenkins/bootstrap.py <--repo=R || --bare> <--job=J> <--pull=P || --branch=B>
The bootstrapper now does the following:
# Note start time
# read test-infra/jenkins/$JOB.json
# check out repoes defined in $JOB.json
# note job started
# call runner defined in $JOB.json
# upload artifacts (this will change later)
# upload build-log.txt
# note job ended
The contract with the runner is as follows:
* Runner must exit non-zero if job fails for any reason.
"""
import argparse
import contextlib
import json
import logging
import os
import pipes
import random
import re
import select
import signal
import socket
import subprocess
import sys
import tempfile
import time
ORIG_CWD = os.getcwd() # Checkout changes cwd
def read_all(end, stream, append):
"""Read all buffered lines from a stream."""
while not end or time.time() < end:
line = stream.readline()
if not line:
return True # Read everything
# Strip \n at the end if any. Last line of file may not have one.
append(line.rstrip('\n'))
# Is there more on the buffer?
ret = select.select([stream.fileno()], [], [], 0.1)
if not ret[0]:
return False # Cleared buffer but not at the end
return False # Time expired
def elapsed(since):
"""Return the number of minutes elapsed since a time."""
return (time.time() - since) / 60
def terminate(end, proc, kill):
"""Terminate or kill the process after end."""
if not end or time.time() <= end:
return False
if kill: # Process will not die, kill everything
pgid = os.getpgid(proc.pid)
logging.info(
'Kill %d and process group %d', proc.pid, pgid)
os.killpg(pgid, signal.SIGKILL)
proc.kill()
return True
logging.info(
'Terminate %d on timeout', proc.pid)
proc.terminate()
return True
def _call(end, cmd, stdin=None, check=True, output=None):
"""Start a subprocess."""
logging.info('Call: %s', ' '.join(pipes.quote(c) for c in cmd))
begin = time.time()
if end:
end = max(end, time.time() + 60) # Allow at least 60s per command
proc = subprocess.Popen(
cmd,
stdin=subprocess.PIPE if stdin is not None else None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid,
)
if stdin:
proc.stdin.write(stdin)
proc.stdin.close()
out = []
code = None
timeout = False
reads = {
proc.stderr.fileno(): (proc.stderr, logging.warning),
proc.stdout.fileno(): (
proc.stdout, (out.append if output else logging.info)), | [
" }"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#!/usr/bin/env python3
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Generates flags needed for an ARM build using clang.
Using clang on Cortex-M cores isn't intuitive as the end-to-end experience isn't
quite completely in LLVM. LLVM doesn't yet provide compatible C runtime
libraries or C/C++ standard libraries. To work around this, this script pulls
the missing bits from an arm-none-eabi-gcc compiler on the system path. This
lets clang do the heavy lifting while only relying on some headers provided by
newlib/arm-none-eabi-gcc in addition to a small assortment of needed libraries.
To use this script, specify what flags you want from the script, and run with
the required architecture flags like you would with gcc:
python -m pw_toolchain.clang_arm_toolchain --cflags -- -mthumb -mcpu=cortex-m3
The script will then print out the additional flags you need to pass to clang to
get a working build.
"""
import argparse
import sys
import os
import subprocess
from pathlib import Path
from typing import List, Dict, Tuple
_ARM_COMPILER_PREFIX = 'arm-none-eabi'
_ARM_COMPILER_NAME = _ARM_COMPILER_PREFIX + '-gcc'
def _parse_args() -> argparse.Namespace:
"""Parses arguments for this script, splitting out the command to run."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--gn-scope',
action='store_true',
help=("Formats the output like a GN scope so it can be ingested by "
"exec_script()"))
parser.add_argument('--cflags',
action='store_true',
help=('Include necessary C flags in the output'))
parser.add_argument('--ldflags',
action='store_true',
help=('Include necessary linker flags in the output'))
parser.add_argument(
'clang_flags',
nargs=argparse.REMAINDER,
help='Flags to pass to clang, which can affect library/include paths',
)
parsed_args = parser.parse_args()
assert parsed_args.clang_flags[0] == '--', 'arguments not correctly split'
parsed_args.clang_flags = parsed_args.clang_flags[1:]
return parsed_args
def _compiler_info_command(print_command: str, cflags: List[str]) -> str:
command = [_ARM_COMPILER_NAME]
command.extend(cflags)
command.append(print_command)
result = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
result.check_returncode()
return result.stdout.decode().rstrip()
def get_gcc_lib_dir(cflags: List[str]) -> Path:
return Path(_compiler_info_command('-print-libgcc-file-name',
cflags)).parent
def get_compiler_info(cflags: List[str]) -> Dict[str, str]:
compiler_info: Dict[str, str] = {}
compiler_info['gcc_libs_dir'] = os.path.relpath(
str(get_gcc_lib_dir(cflags)), ".")
compiler_info['sysroot'] = os.path.relpath(
_compiler_info_command('-print-sysroot', cflags), ".")
compiler_info['version'] = _compiler_info_command('-dumpversion', cflags)
compiler_info['multi_dir'] = _compiler_info_command(
'-print-multi-directory', cflags)
return compiler_info
def get_cflags(compiler_info: Dict[str, str]):
# TODO(amontanez): Make newlib-nano optional.
cflags = [
# TODO(amontanez): For some reason, -stdlib++-isystem and
# -isystem-after work, but emit unused argument errors. This is the only
# way to let the build succeed.
'-Qunused-arguments',
# Disable all default libraries.
"-nodefaultlibs",
'--target=arm-none-eabi'
]
# Add sysroot info.
cflags.extend((
'--sysroot=' + compiler_info['sysroot'],
'-isystem' +
str(Path(compiler_info['sysroot']) / 'include' / 'newlib-nano'),
# This must be included after Clang's builtin headers.
'-isystem-after' + str(Path(compiler_info['sysroot']) / 'include'),
'-stdlib++-isystem' + str(
Path(compiler_info['sysroot']) / 'include' / 'c++' /
compiler_info['version']),
'-isystem' + str(
Path(compiler_info['sysroot']) / 'include' / 'c++' /
compiler_info['version'] / _ARM_COMPILER_PREFIX /
compiler_info['multi_dir']),
))
return cflags
def get_crt_objs(compiler_info: Dict[str, str]) -> Tuple[str, ...]:
return (
str(Path(compiler_info['gcc_libs_dir']) / 'crtfastmath.o'),
str(Path(compiler_info['gcc_libs_dir']) / 'crti.o'),
str(Path(compiler_info['gcc_libs_dir']) / 'crtn.o'),
str(
Path(compiler_info['sysroot']) / 'lib' /
compiler_info['multi_dir'] / 'crt0.o'),
)
def get_ldflags(compiler_info: Dict[str, str]) -> List[str]:
ldflags: List[str] = [
'-lnosys',
# Add library search paths.
'-L' + compiler_info['gcc_libs_dir'],
'-L' + str(
Path(compiler_info['sysroot']) / 'lib' /
compiler_info['multi_dir']),
# Add libraries to link.
'-lc_nano',
'-lm', | [
" '-lgcc',"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#!/usr/bin/env python
#
# Merge multiple JavaScript source code files into one.
#
# Usage:
# This script requires source files to have dependencies specified in them.
#
# Dependencies are specified with a comment of the form:
#
# // @requires <file path>
#
# e.g.
#
# // @requires Geo/DataSource.js
#
# This script should be executed like so:
#
# mergejs.py <output.js> <directory> [...]
#
# e.g.
#
# mergejs.py openlayers.js Geo/ CrossBrowser/
#
# This example will cause the script to walk the `Geo` and
# `CrossBrowser` directories--and subdirectories thereof--and import
# all `*.js` files encountered. The dependency declarations will be extracted
# and then the source code from imported files will be output to
# a file named `openlayers.js` in an order which fulfils the dependencies
# specified.
#
#
# Note: This is a very rough initial version of this code.
#
# -- Copyright 2005-2012 OpenLayers contributors / OpenLayers project --
#
# TODO: Allow files to be excluded. e.g. `Crossbrowser/DebugMode.js`?
# TODO: Report error when dependency can not be found rather than KeyError.
import re
import os
import sys
SUFFIX_JAVASCRIPT = ".js"
RE_REQUIRE = "@requires?:?\s+(\S*)\s*\n" # TODO: Ensure in comment?
class MissingImport(Exception):
"""Exception raised when a listed import is not found in the lib."""
class SourceFile:
"""
Represents a Javascript source code file.
"""
def __init__(self, filepath, source, cfgExclude):
"""
"""
self.filepath = filepath
self.source = source
self.excludedFiles = []
self.requiredFiles = []
auxReq = re.findall(RE_REQUIRE, self.source)
for filename in auxReq:
if undesired(filename, cfgExclude):
self.excludedFiles.append(filename)
else:
self.requiredFiles.append(filename)
self.requiredBy = []
def _getRequirements(self):
"""
Extracts the dependencies specified in the source code and returns
a list of them.
"""
return self.requiredFiles
requires = property(fget=_getRequirements, doc="")
def usage(filename):
"""
Displays a usage message.
"""
print("%s [-c <config file>] <output.js> <directory> [...]" % filename)
class Config:
"""
Represents a parsed configuration file.
A configuration file should be of the following form:
[first]
3rd/prototype.js
core/application.js
core/params.js
# A comment
[last]
core/api.js # Another comment
[exclude]
3rd/logger.js
exclude/this/dir
All headings are required.
The files listed in the `first` section will be forced to load
*before* all other files (in the order listed). The files in `last`
section will be forced to load *after* all the other files (in the
order listed).
The files list in the `exclude` section will not be imported.
Any text appearing after a # symbol indicates a comment.
"""
def __init__(self, filename):
"""
Parses the content of the named file and stores the values.
"""
lines = [re.sub("#.*?$", "", line).strip() # Assumes end-of-line character is present
for line in open(filename)
if line.strip() and not line.strip().startswith("#")] # Skip blank lines and comments
self.forceFirst = lines[lines.index("[first]") + 1:lines.index("[last]")]
self.forceLast = lines[lines.index("[last]") + 1:lines.index("[include]")]
self.include = lines[lines.index("[include]") + 1:lines.index("[exclude]")]
self.exclude = lines[lines.index("[exclude]") + 1:]
def undesired(filepath, excludes):
# exclude file if listed
exclude = filepath in excludes
if not exclude:
# check if directory is listed
for excludepath in excludes:
if not excludepath.endswith("/"):
excludepath += "/"
if filepath.startswith(excludepath):
exclude = True
break
return exclude
def getNames (sourceDirectory, configFile = None):
return run(sourceDirectory, None, configFile, True)
def run (sourceDirectory, outputFilename = None, configFile = None,
returnAsListOfNames = False):
cfg = None
if configFile:
cfg = Config(configFile)
allFiles = []
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
if cfg and cfg.include:
if filepath in cfg.include or filepath in cfg.forceFirst:
allFiles.append(filepath)
elif (not cfg) or (not undesired(filepath, cfg.exclude)):
allFiles.append(filepath)
## Header inserted at the start of each file in the output
HEADER = "/* " + "=" * 70 + "\n %s\n" + " " + "=" * 70 + " */\n\n"
files = {}
## Import file source code
## TODO: Do import when we walk the directories above?
for filepath in allFiles:
print("Importing: %s" % filepath)
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content, cfg.exclude) # TODO: Chop path?
print
from toposort import toposort
complete = False
resolution_pass = 1
while not complete:
complete = True
## Resolve the dependencies
print("Resolution pass %s... " % resolution_pass)
resolution_pass += 1
for filepath, info in files.items():
for path in info.requires:
if not path in files:
complete = False
fullpath = os.path.join(sourceDirectory, path).strip()
if os.path.exists(fullpath):
print("Importing: %s" % path)
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[path] = SourceFile(path, content, cfg.exclude) # TODO: Chop path?
else:
raise MissingImport("File '%s' not found (required by '%s')." % (path, filepath))
# create dictionary of dependencies
dependencies = {}
for filepath, info in files.items():
dependencies[filepath] = info.requires
print("Sorting...")
order = toposort(dependencies) #[x for x in toposort(dependencies)]
## Move forced first and last files to the required position
if cfg:
print("Re-ordering files...")
order = cfg.forceFirst + [item
for item in order
if ((item not in cfg.forceFirst) and
(item not in cfg.forceLast))] + cfg.forceLast
print
## Output the files in the determined order
result = []
# Return as a list of filenames
if returnAsListOfNames:
for fp in order:
fName = os.path.normpath(os.path.join(sourceDirectory, fp)).replace("\\","/")
print("Append: ", fName) | [
" f = files[fp]"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Backup driver for IBM Tivoli Storage Manager (TSM).
Implementation of a backup service that uses IBM Tivoli Storage Manager (TSM)
as the backend. The driver uses TSM command line dsmc utility to
run the backup and restore operations.
This version supports backup of block devices, e.g, FC, iSCSI, local as well as
regular files.
A prerequisite for using the IBM TSM backup service is configuring the
Cinder host for using TSM.
"""
import json
import os
import stat
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
LOG = logging.getLogger(__name__)
tsm_opts = [
cfg.StrOpt('backup_tsm_volume_prefix',
default='backup',
help='Volume prefix for the backup id when backing up to TSM'),
cfg.StrOpt('backup_tsm_password',
default='password',
help='TSM password for the running username',
secret=True),
cfg.BoolOpt('backup_tsm_compression',
default=True,
help='Enable or Disable compression for backups'),
]
CONF = cfg.CONF
CONF.register_opts(tsm_opts)
VALID_BACKUP_MODES = ['image', 'file']
def _get_backup_metadata(backup, operation):
"""Return metadata persisted with backup object."""
try:
svc_dict = json.loads(backup.service_metadata)
backup_path = svc_dict.get('backup_path')
backup_mode = svc_dict.get('backup_mode')
except TypeError:
# for backwards compatibility
vol_prefix = CONF.backup_tsm_volume_prefix
backup_id = backup['id']
backup_path = utils.make_dev_path('%s-%s' %
(vol_prefix, backup_id))
backup_mode = 'image'
if backup_mode not in VALID_BACKUP_MODES:
volume_id = backup['volume_id']
backup_id = backup['id']
err = (_('%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. '
'Backup object has unexpected mode. Image or file '
'backups supported, actual mode is %(vol_mode)s.')
% {'op': operation,
'bck_id': backup_id,
'vol_id': volume_id,
'vol_mode': backup_mode})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
return backup_path, backup_mode
def _image_mode(backup_mode):
"""True if backup is image type."""
return backup_mode == 'image'
def _make_link(volume_path, backup_path, vol_id):
"""Create a hard link for the volume block device.
The IBM TSM client performs an image backup on a block device.
The name of the block device is the backup prefix plus the backup id
:param volume_path: real device path name for volume
:param backup_path: path name TSM will use as volume to backup
:param vol_id: id of volume to backup (for reporting)
:raises: InvalidBackup
"""
try:
utils.execute('ln', volume_path, backup_path,
run_as_root=True,
check_exit_code=True)
except processutils.ProcessExecutionError as exc:
err = (_('backup: %(vol_id)s failed to create device hardlink '
'from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': vol_id,
'vpath': volume_path,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def _create_unique_device_link(backup_id, volume_path, volume_id, bckup_mode):
"""Create a consistent hardlink for the volume block device.
Create a consistent hardlink using the backup id so TSM
will be able to backup and restore to the same block device.
:param backup_id: the backup id
:param volume_path: real path of the backup/restore device
:param volume_id: Volume id for backup or as restore target
:param bckup_mode: TSM backup mode, either 'image' or 'file'
:raises: InvalidBackup
:returns: str -- hardlink path of the volume block device
"""
if _image_mode(bckup_mode):
hardlink_path = utils.make_dev_path('%s-%s' %
(CONF.backup_tsm_volume_prefix,
backup_id))
else:
dir, volname = os.path.split(volume_path)
hardlink_path = ('%s/%s-%s' %
(dir,
CONF.backup_tsm_volume_prefix,
backup_id))
_make_link(volume_path, hardlink_path, volume_id)
return hardlink_path
def _check_dsmc_output(output, check_attrs, exact_match=True):
"""Check dsmc command line utility output.
Parse the output of the dsmc command and make sure that a given
attribute is present, and that it has the proper value.
TSM attribute has the format of "text : value".
:param output: TSM output to parse
:param check_attrs: text to identify in the output
:param exact_match: if True, the check will pass only if the parsed
value is equal to the value specified in check_attrs. If false, the
check will pass if the parsed value is greater than or equal to the
value specified in check_attrs. This is needed because for file
backups, the parent directories may also be included the first a
volume is backed up.
:returns: bool -- indicate if requited output attribute found in output
"""
parsed_attrs = {}
for line in output.split('\n'):
# parse TSM output: look for "msg : value
key, sep, val = line.partition(':')
if sep is not None and key is not None and len(val.strip()) > 0:
parsed_attrs[key] = val.strip()
for ckey, cval in check_attrs.items():
if ckey not in parsed_attrs:
return False
elif exact_match and parsed_attrs[ckey] != cval:
return False
elif not exact_match and int(parsed_attrs[ckey]) < int(cval):
return False
return True
def _get_volume_realpath(volume_file, volume_id):
"""Get the real path for the volume block device.
If the volume is not a block device or a regular file issue an
InvalidBackup exception.
:param volume_file: file object representing the volume
:param volume_id: Volume id for backup or as restore target
:raises: InvalidBackup
:returns: str -- real path of volume device
:returns: str -- backup mode to be used
"""
try:
# Get real path
volume_path = os.path.realpath(volume_file.name)
# Verify that path is a block device
volume_mode = os.stat(volume_path).st_mode
if stat.S_ISBLK(volume_mode):
backup_mode = 'image'
elif stat.S_ISREG(volume_mode):
backup_mode = 'file'
else:
err = (_('backup: %(vol_id)s failed. '
'%(path)s is unexpected file type. Block or regular '
'files supported, actual file mode is %(vol_mode)s.')
% {'vol_id': volume_id,
'path': volume_path,
'vol_mode': volume_mode})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except AttributeError:
err = (_('backup: %(vol_id)s failed. Cannot obtain real path '
'to volume at %(path)s.')
% {'vol_id': volume_id,
'path': volume_file})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except OSError:
err = (_('backup: %(vol_id)s failed. '
'%(path)s is not a file.')
% {'vol_id': volume_id,
'path': volume_path})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
return volume_path, backup_mode
def _cleanup_device_hardlink(hardlink_path, volume_path, volume_id):
"""Remove the hardlink for the volume block device.
:param hardlink_path: hardlink to the volume block device
:param volume_path: real path of the backup/restore device
:param volume_id: Volume id for backup or as restore target
"""
try:
utils.execute('rm',
'-f',
hardlink_path,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error('backup: %(vol_id)s failed to remove backup hardlink '
'from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s.',
{'vol_id': volume_id,
'vpath': volume_path,
'bpath': hardlink_path,
'out': exc.stdout,
'err': exc.stderr})
@interface.backupdriver
class TSMBackupDriver(driver.BackupDriver):
"""Provides backup, restore and delete of volumes backup for TSM."""
DRIVER_VERSION = '1.0.0'
def __init__(self, context, db=None):
super(TSMBackupDriver, self).__init__(context, db)
self.tsm_password = CONF.backup_tsm_password
self.volume_prefix = CONF.backup_tsm_volume_prefix
def check_for_setup_error(self):
required_flags = ['backup_share']
for flag in required_flags:
val = getattr(CONF, flag, None)
if not val:
raise exception.InvalidConfigurationValue(option=flag,
value=val)
def _do_backup(self, backup_path, vol_id, backup_mode):
"""Perform the actual backup operation.
:param backup_path: volume path
:param vol_id: volume id
:param backup_mode: file mode of source volume; 'image' or 'file'
:raises: InvalidBackup
"""
backup_attrs = {'Total number of objects backed up': '1'}
compr_flag = 'yes' if CONF.backup_tsm_compression else 'no'
backup_cmd = ['dsmc', 'backup']
if _image_mode(backup_mode):
backup_cmd.append('image')
backup_cmd.extend(['-quiet',
'-compression=%s' % compr_flag,
'-password=%s' % self.tsm_password,
backup_path])
out, err = utils.execute(*backup_cmd,
run_as_root=True,
check_exit_code=False)
success = _check_dsmc_output(out, backup_attrs, exact_match=False)
if not success:
err = (_('backup: %(vol_id)s failed to obtain backup '
'success notification from server.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': vol_id,
'out': out,
'err': err})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def _do_restore(self, backup_path, restore_path, vol_id, backup_mode):
"""Perform the actual restore operation.
:param backup_path: the path the backup was created from, this
identifies the backup to tsm
:param restore_path: volume path to restore into
:param vol_id: volume id
:param backup_mode: mode used to create the backup ('image' or 'file')
:raises: InvalidBackup
"""
restore_attrs = {'Total number of objects restored': '1'}
restore_cmd = ['dsmc', 'restore']
if _image_mode(backup_mode):
restore_cmd.append('image')
restore_cmd.append('-noprompt') # suppress prompt
else:
restore_cmd.append('-replace=yes') # suppress prompt
restore_cmd.extend(['-quiet',
'-password=%s' % self.tsm_password,
backup_path])
if restore_path != backup_path:
restore_cmd.append(restore_path)
out, err = utils.execute(*restore_cmd,
run_as_root=True,
check_exit_code=False)
success = _check_dsmc_output(out, restore_attrs)
if not success:
err = (_('restore: %(vol_id)s failed.\n'
'stdout: %(out)s\n stderr: %(err)s.')
% {'vol_id': vol_id,
'out': out,
'err': err})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def backup(self, backup, volume_file, backup_metadata=False):
"""Backup the given volume to TSM.
TSM performs a backup of a volume. The volume_file is used
to determine the path of the block device that TSM will back-up.
:param backup: backup information for volume
:param volume_file: file object representing the volume
:param backup_metadata: whether or not to backup volume metadata
:raises InvalidBackup:
"""
# TODO(dosaboy): this needs implementing (see backup.drivers.ceph for
# an example)
if backup_metadata:
msg = _("Volume metadata backup requested but this driver does "
"not yet support this feature.")
raise exception.InvalidBackup(reason=msg)
volume_path, backup_mode = _get_volume_realpath(volume_file,
backup.volume_id)
LOG.debug('Starting backup of volume: %(volume_id)s to TSM,'
' volume path: %(volume_path)s, mode: %(mode)s.',
{'volume_id': backup.volume_id,
'volume_path': volume_path,
'mode': backup_mode})
backup_path = _create_unique_device_link(backup.id,
volume_path,
backup.volume_id,
backup_mode)
service_metadata = {'backup_mode': backup_mode,
'backup_path': backup_path}
backup.service_metadata = json.dumps(service_metadata)
backup.save()
try:
self._do_backup(backup_path, backup.volume_id, backup_mode)
except processutils.ProcessExecutionError as exc:
err = (_('backup: %(vol_id)s failed to run dsmc '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': backup.volume_id,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as exc:
err = (_('backup: %(vol_id)s failed to run dsmc '
'due to invalid arguments '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': backup.volume_id,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
finally:
_cleanup_device_hardlink(backup_path, volume_path,
backup.volume_id)
LOG.debug('Backup %s finished.', backup.id)
def restore(self, backup, volume_id, volume_file):
"""Restore the given volume backup from TSM server.
:param backup: backup information for volume
:param volume_id: volume id
:param volume_file: file object representing the volume
:raises: InvalidBackup
"""
# backup_path is the path that was originally backed up.
backup_path, backup_mode = _get_backup_metadata(backup, 'restore')
LOG.debug('Starting restore of backup from TSM '
'to volume %(volume_id)s, '
'backup: %(backup_id)s, '
'mode: %(mode)s.',
{'volume_id': volume_id,
'backup_id': backup.id,
'mode': backup_mode})
# volume_path is the path to restore into. This may
# be different than the original volume.
volume_path, unused = _get_volume_realpath(volume_file,
volume_id)
restore_path = _create_unique_device_link(backup.id,
volume_path,
volume_id,
backup_mode)
try:
self._do_restore(backup_path, restore_path, volume_id, backup_mode)
except processutils.ProcessExecutionError as exc:
err = (_('restore: %(vol_id)s failed to run dsmc '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': restore_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as exc:
err = (_('restore: %(vol_id)s failed to run dsmc '
'due to invalid arguments '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': restore_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
finally:
_cleanup_device_hardlink(restore_path, volume_path, volume_id)
LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.',
{'backup_id': backup.id,
'volume_id': volume_id})
def delete_backup(self, backup):
"""Delete the given backup from TSM server.
:param backup: backup information for volume
:raises: InvalidBackup
"""
delete_attrs = {'Total number of objects deleted': '1'}
delete_path, backup_mode = _get_backup_metadata(backup, 'restore')
LOG.debug('Delete started for backup: %(backup)s, mode: %(mode)s.',
{'backup': backup.id,
'mode': backup_mode})
try:
out, err = utils.execute('dsmc',
'delete',
'backup',
'-quiet',
'-noprompt',
'-objtype=%s' % backup_mode, | [
" '-password=%s' % self.tsm_password,"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
"""
Mapping between vt100 key sequences, the prompt_toolkit key constants and the
Pymux namings. (Those namings are kept compatible with tmux.)
"""
from __future__ import unicode_literals
from prompt_toolkit.keys import Keys
from prompt_toolkit.input.vt100_parser import ANSI_SEQUENCES
| [
"__all__ = ("
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
"""
Import the given MODIS tile into the provided NDVI and QA worldgrid.
Assert that the MODIS tile contains (at least) the requested dates.
Note that this require a .csv file with NDVI dates to import. This file
can be create with the ``ndvi_collect_dates.py`` script.
Example invocation::
python rastercube/scripts/create_ndvi_worldgrid.py
--tile=h10v09
--worldgrid=hdfs:///user/test/
--dates_csv=$RASTERCUBE_TEST_DATA/1_manual/ndvi_dates.2.csv
"""
import os
import sys
import time
import argparse
import warnings
import ctypes
import numpy as np
import multiprocessing.sharedctypes
import rastercube.utils as utils
import rastercube.datasources.modis as modis
import rastercube.jgrid as jgrid
import rastercube.worldgrid.grids as grids
parser = argparse.ArgumentParser(description="Create a new NDVI worldgrid")
parser.add_argument('--tile', type=str, required=True,
help='tile name (e.g. h17v07)')
parser.add_argument('--noconfirm', action='store_true',
help='Skip confirmation')
parser.add_argument('--modis_dir', type=str, required=False,
help='directory where input MODIS files are stored')
parser.add_argument('--worldgrid', type=str, required=True,
help='worldgrid root')
# If we have fractions of 400x400x50 and store int16, we get
# 400 * 400 * 50 * 2 / (1024 * 1024.) = 15MB
parser.add_argument('--frac_ndates', type=int, default=50,
help='Size of a chunk along the time axis')
parser.add_argument('--nworkers', type=int, default=5,
help='Number of workers (if using multiprocessing)')
parser.add_argument('--dates_csv', type=str, default=None,
help='The dates that must be included in the grid'
'see scripts/ndvi_collect_dates.py')
parser.add_argument('--test_limit_fractions', type=int, default=None,
help='(TESTING ONLY) : Only create the first n fractions')
def collect_hdf_files(tilename, hdf_dir):
# hdf_files contains (full path, timestamp_ms)
hdf_files = modis.ndvi_hdf_for_tile(tilename, hdf_dir)
assert len(hdf_files) > 0, 'No matching HDF files found'
print len(hdf_files), ' HDF files in srcdir'
return hdf_files
# ------------------------------------- Shared multiprocessing globals
# Global variable initialize by _mp_init
_mp_ndvi = None
_mp_qa = None
def _mp_init(shared_ndvi, shared_qa):
global _mp_ndvi, _mp_qa
_mp_ndvi = shared_ndvi
_mp_qa = shared_qa
# ------------------------------------- Multiprocess HDF processing
def _real_mp_process_hdf(hdf_file, frac_ti, grid_w, grid_h, frac_ndates):
"""
Args:
frac_ti: The time index of the hdf_file in the current frac array
"""
# ignore the PEP 3118 buffer warning
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
s_ndvi = np.ctypeslib.as_array(_mp_ndvi)
s_ndvi.shape = (grid_h, grid_w, frac_ndates)
s_ndvi.dtype = np.int16
s_qa = np.ctypeslib.as_array(_mp_qa)
s_qa.shape = (grid_h, grid_w, frac_ndates)
s_qa.dtype = np.uint16
_start = time.time()
modhdf = modis.ModisHDF(hdf_file)
# -- ndvi
_ndvi_start = time.time()
ds = modhdf.load_gdal_dataset(modis.MODIS_NDVI_DATASET_NAME)
ds.ReadAsArray(buf_obj=s_ndvi[:, :, frac_ti])
_ndvi_elapsed = time.time() - _ndvi_start
del ds
# -- qa
_qa_start = time.time()
ds = modhdf.load_gdal_dataset(modis.MODIS_QA_DATASET_NAME)
ds.ReadAsArray(buf_obj=s_qa[:, :, frac_ti])
_qa_elapsed = time.time() - _qa_start
del ds
print 'Loading ', os.path.basename(hdf_file),\
'took %.02f [s] (%.02f ndvi read, %.02f qa)' % (
time.time() - _start, _ndvi_elapsed, _qa_elapsed)
sys.stdout.flush()
def _mp_process_hdf(args):
"""
Wrapper around _mp_process_hdf that correctly handles keyboard
interrupt
"""
# TODO: This is supposed to make CTRL-C work but it doesn't
try:
_real_mp_process_hdf(*args)
except (KeyboardInterrupt, SystemExit):
print "Worker interrupted, exiting..."
return False
# ------------------------------------- Multiprocess fractions writing
def _real_mp_write_frac(frac_id, grid_w, grid_h, frac_ndates):
# ignore the PEP 3118 buffer warning
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
s_ndvi = np.ctypeslib.as_array(_mp_ndvi)
s_ndvi.shape = (grid_h, grid_w, frac_ndates)
s_ndvi.dtype = np.int16
s_qa = np.ctypeslib.as_array(_mp_qa)
s_qa.shape = (grid_h, grid_w, frac_ndates)
s_qa.dtype = np.uint16
frac_num, frac_d = frac_id
i_range, j_range = modgrid.get_cell_indices_in_tile(
frac_num, tile_h, tile_v)
frac_ndvi = s_ndvi[i_range[0]:i_range[1], j_range[0]:j_range[1], :]
frac_qa = s_qa[i_range[0]:i_range[1], j_range[0]:j_range[1], :]
ndvi_header.write_frac(frac_id, frac_ndvi)
qa_header.write_frac(frac_id, frac_qa)
def _mp_write_frac(args):
try:
_real_mp_write_frac(*args)
except (KeyboardInterrupt, SystemExit):
print "Worker interrupted, exiting..."
return False
if __name__ == '__main__':
# Print help if no arguments are provided
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
tilename = args.tile
modis_dir = args.modis_dir
if modis_dir is None:
modis_dir = utils.get_modis_hdf_dir()
test_limit_fractions = args.test_limit_fractions
nworkers = args.nworkers
worldgrid = args.worldgrid
ndvi_grid_root = os.path.join(worldgrid, 'ndvi')
qa_grid_root = os.path.join(worldgrid, 'qa')
if not jgrid.Header.exists(ndvi_grid_root): | [
" assert args.dates_csv is not None"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.geometry.primitives import Primitive
from compas.geometry.primitives import Point
class Line(Primitive):
"""A line is defined by two points.
Parameters
----------
p1 : [float, float, float] | :class:`~compas.geometry.Point`
The first point.
p2 : [float, float, float] | :class:`~compas.geometry.Point`
The second point.
Attributes
----------
start : :class:`~compas.geometry.Point`
The start point of the line.
end : :class:`~compas.geometry.Point`
The end point of the line.
vector : :class:`~compas.geometry.Vector`, read-only
A vector pointing from start to end.
length : float, read-only
The length of the vector from start to end.
direction : :class:`~compas.geometry.Vector`, read-only
A unit vector pointing from start and end.
midpoint : :class:`~compas.geometry.Point`, read-only
The midpoint between start and end.
Examples
--------
>>> line = Line([0, 0, 0], [1, 1, 1])
>>> line
Line(Point(0.000, 0.000, 0.000), Point(1.000, 1.000, 1.000))
>>> line.start
Point(0.000, 0.000, 0.000)
>>> line.midpoint
Point(0.500, 0.500, 0.500)
>>> line.length == math.sqrt(1 + 1 + 1)
True
>>> line.direction
Vector(0.577, 0.577, 0.577)
"""
__slots__ = ['_start', '_end']
def __init__(self, p1, p2, **kwargs):
super(Line, self).__init__(**kwargs)
self._start = None
self._end = None
self.start = p1
self.end = p2
# ==========================================================================
# data
# ==========================================================================
@property
def DATASCHEMA(self):
""":class:`schema.Schema` : Schema of the data representation."""
from schema import Schema
return Schema({
'start': Point.DATASCHEMA.fget(None),
'end': Point.DATASCHEMA.fget(None)
})
@property
def JSONSCHEMANAME(self):
"""str : Name of the schema of the data representation in JSON format."""
return 'line'
@property
def data(self):
"""dict : The data dictionary that represents the line."""
return {'start': self.start.data, 'end': self.end.data}
@data.setter
def data(self, data):
self.start = Point.from_data(data['start'])
self.end = Point.from_data(data['end'])
@classmethod
def from_data(cls, data):
"""Construct a frame from a data dict.
Parameters
----------
data : dict
The data dictionary.
Examples
--------
>>> line = Line.from_data({'start': [0.0, 0.0, 0.0], 'end': [1.0, 0.0, 0.0]})
>>> line.end
Point(1.000, 0.000, 0.000)
"""
return cls(Point.from_data(data['start']), Point.from_data(data['end']))
# ==========================================================================
# properties
# ==========================================================================
@property
def start(self):
return self._start
@start.setter
def start(self, point):
self._start = Point(*point)
@property
def end(self):
return self._end
@end.setter
def end(self, point):
self._end = Point(*point)
@property
def vector(self):
return self.end - self.start
@property
def length(self):
return self.vector.length
@property
def direction(self):
return self.vector * (1 / self.length)
@property
def midpoint(self):
v = self.direction * (0.5 * self.length)
return self.start + v
# ==========================================================================
# customization
# ==========================================================================
def __repr__(self):
return 'Line({0!r}, {1!r})'.format(self.start, self.end)
def __len__(self):
return 2
def __getitem__(self, key):
if key == 0:
return self.start
if key == 1:
return self.end
raise KeyError
def __setitem__(self, key, value):
if key == 0:
self.start = value
return
if key == 1:
self.end = value
return
raise KeyError
def __iter__(self):
return iter([self.start, self.end])
def __eq__(self, other):
try:
other_start = other[0]
other_end = other[1]
except: # noqa: E722
return False
return self.start == other_start and self.end == other_end
# ==========================================================================
# constructors
# ==========================================================================
# ==========================================================================
# static
# ==========================================================================
@staticmethod
def transform_collection(collection, X):
"""Transform a collection of Line objects.
Parameters
----------
collection : list[[point, point] | :class:`~compas.geometry.Line`]
The collection of lines.
Returns
-------
None
The lines are modified in-place.
Examples
--------
>>> from math import radians
>>> from compas.geometry import Point
>>> from compas.geometry import Vector
>>> from compas.geometry import Rotation
>>> R = Rotation.from_axis_and_angle(Vector.Zaxis(), radians(90))
>>> a = Line(Point(0.0, 0.0, 0.0), Point(1.0, 0.0, 0.0))
>>> lines = [a]
>>> Line.transform_collection(lines, R)
>>> b = lines[0]
>>> b.end
Point(0.000, 1.000, 0.000)
>>> a is b
True
"""
points = [line.start for line in collection] + [line.end for line in collection]
Point.transform_collection(points, X)
@staticmethod
def transformed_collection(collection, X):
"""Create a collection of transformed Line objects.
Parameters
----------
collection : list[[point, point] | :class:`~compas.geometry.Line`]
The collection of lines.
Returns
-------
list[:class:`~compas.geometry.Line`]
The transformed lines.
Examples
--------
>>> from math import radians
>>> from compas.geometry import Vector
>>> from compas.geometry import Point
>>> from compas.geometry import Rotation
>>> R = Rotation.from_axis_and_angle(Vector.Zaxis(), radians(90))
>>> a = Line(Point(0.0, 0.0, 0.0), Point(1.0, 0.0, 0.0))
>>> lines = [a]
>>> lines = Line.transformed_collection(lines, R)
>>> b = lines[0]
>>> b.end
Point(0.000, 1.000, 0.000)
>>> a is b
False
""" | [
" lines = [line.copy() for line in collection]"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
|
#!/usr/bin/env python
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
PURPOSE:
create a set of data in text format delimited by tabs
:Parameters:
LINE_NUM
the number of lines to be generated
DATATYPE
datatype to be generated
currently supported datatypes:all:regression:time:timestamp:date:bigint:int:smallest:real:float:boolean:varchar:bpchar:numeric:text
TODO: random columns generator,matching table creation statement
LAST MODIFIED:
------------------------------------------------------------------------------
"""
############################################################################
# Set up some globals, and import gptest
# [YOU DO NOT NEED TO CHANGE THESE]
#
import sys
import string
import os
import subprocess
import signal
import time
import getopt
if len(sys.argv) < 2:
print "usage: createData.py <number of lines> <datatype>"
print " or usage: createData.py <number of lines> custom,<datatypelist separated by ','>\n"
print "datatype:"
print "all:regression:time:timestamp:date:bigint:int:smallest:real:float:boolean:varchar:bpchar:numeric:text\n"
sys.exit()
LINE_NUM=int(sys.argv[1])
DATATYPE=sys.argv[2]
custom = 'false'
def print_time_str(x, datatype):
HH=str(x % 24).zfill(2)
MM=str(x % 60).zfill(2)
SS=str(x % 60).zfill(2)
sss=str(x % 999)
h=str(x % 12).zfill(2)
ampm=x % 2
ampmlist= ['AM','PM']
timezone= x % 5
#timezonelist= ['ACDT','ACT','PST','ADT','ACWST','GMT0','EST5EDT','zulu']
timezonelist= ['ACT','PST','ADT','GMT0','zulu']
year = str((x % 1000) + 1).zfill(4)
month = str((x % 12) + 1).zfill(2)
monthindex = x%24
monthlist = ['January','Jan','February','Feb','March','Mar','April','Apr','May','May','June','Jun','July','Jul','August','Aug','September','Sept','October','Oct','November','Nov','December','Dec']
day = str((x % 30) + 1).zfill(2)
daynofill = str((x % 30) + 1)
if (datatype == 'time'):
#col1 - HH:MM:SS
col1 = HH+ ':' +MM+ ':' +SS
#col2 - HH:MM:SS.sss
col2 = col1+ '.' +sss
#col3 - HHMMSS
col3 = HH+MM+SS
#col4 - HH:MM AM/PM
col4 = h+ ':' +MM+ ' ' +ampmlist[ampm]
#col5 - HH:MM:SS.sss-h (timeoffset)
col5 = col2+ '-' +str(timezone)
#col6 - HH:MM:SS-HH:MM(timeoffset)
col6 = col1+ '-' +h+ ':00'
#col7 - HH:MM-HH:MM(timeoffset)
col7 = HH+':'+MM+ '-' +h+ ':00'
#col8 - HHMMSS-HH(timeoffset)
col8 = col3+ '-' +h
#col9 - HH:MM:SS XXX(timezone)
col9 = col1+ " " +timezonelist[timezone]
if (custom== 'true'):
return col1
else:
return col1+'\t'+col2+'\t'+col3+'\t'+col4+'\t'+col5+'\t'+col6+'\t'+col7+'\t'+col8+'\t'+col9+'\t\\N'
elif (datatype == 'timestamp'):
#1999-01-08 04:05:06
col1 = year+'-' +month+ '-' +day+ ' ' +HH+ ':' +MM+ ':' +SS
#1999-01-08 04:05:06 -8:00
col2 = col1+ ' -' +str(timezone)+ ':00'
#January 8 04:05:06 1999 PST
col3 = monthlist[monthindex]+ ' ' +daynofill+ ' ' +HH+ ':' +MM+ ':' +SS+ ' ' +year+ ' ' +timezonelist[timezone]
if (custom== 'true'):
return col1
else:
return col1+'\t'+col2+'\t'+col3+'\t\\N'
elif (datatype == 'date'):
#1900-01-01
col1 = year+ '-' +month+ '-' +day
#September 01, 1999
col2 = monthlist[monthindex]+ ' ' +day+ ', ' +year
#1/8/1999
col3 = month+ '/' +day+ '/' +year
#1999-Jan-08
col4 = year+ '-' +monthlist[monthindex]+ '-' +day
#Jan-08-1999
col5 = monthlist[monthindex]+ '-' +month+ '-' +year
#08-Jan-1999
col6 = month+ '-' +monthlist[monthindex]+ '-' +year
#January 8, 99 BC
col7 = monthlist[monthindex]+' ' +month+ ', ' +year+ ' BC'
if (custom== 'true'):
return col1
else:
return col1+'\t'+col2+'\t'+col3+'\t'+col4+'\t'+col5+'\t'+col6+'\t'+col7+'\t\\N'
def regression(x):
numRecipes = str(1664525 * x + 1013904223)
Borland = str(22695477 * x + 1)
glibc = str(1103515245 * x + 12345)
appCarbonLib = str((16807 * x) % 2147483647)
vax = str(69069 * x + 1)
javaRandomClass = str(25214903917 * x + 11)
return str(x)+'\t'+str(hex(x))+'\t'+numRecipes+'\t'+Borland+'\t'+glibc+'\t'+appCarbonLib+'\t'+vax+'\t'+javaRandomClass
def print_int_str(x, max, min):
if (x < max):
m = x
else:
m = 0
maxsubx = max - m
minplusx = min + m
if (custom== 'true'):
return str(maxsubx)
else:
return str(max)+'\t'+str(min)+'\t'+str(m)+'\t'+str(maxsubx)+'\t'+str(minplusx)+'\t\\N'
def print_float_str(x,max,min):
pi = float(22)/float(7)
pimulti = pi*x
if (custom== 'true'):
return str(pimulti)
else:
return str(max)+'\t'+str(min)+'\t'+str(pi)+'\t'+str(pimulti)+'\t\\N'
def print_bool(x):
n = x % 2
if n == 0:
if (custom== 'true'):
return 'true'
else:
return 'true\t\\N'
else:
if (custom== 'true'):
return 'false'
else:
return 'false\t\\N'
def print_char(x):
strx = ''
currentchar = x%128
for m in range(currentchar):
strx = strx+chr(currentchar)
if (currentchar == 9 or currentchar == 13 or currentchar == 10):
strx = 'skip'
if (custom== 'true'):
return strx
else:
return str(x)+'\t'+strx+'\t\\N'
def get_custom_type():
thelist = DATATYPE.split(',')
#currentstr = DATATYPE+ '\t' +str(x)
currentstr = ''
currentcolcnt = 1
for y in (thelist):
if (y == 'time'):
currentstr += print_time_str(x,'time')
elif (y == 'timestamp'):
currentstr += print_time_str(x,'timestamp')
elif (y == 'date'):
currentstr += print_time_str(x,'date')
elif (y == 'bigint'):
currentstr += print_int_str(x,9223372036854775807,-9223372036854775808)
elif (y == 'int'):
currentstr += print_int_str(x,2147483647,-2147483648)
elif (y == 'smallint'):
currentstr += print_int_str(x,32767,-32768)
elif (y == 'real'):
currentstr += print_float_str(x, 3.4028235E+38, -3.4028234E+38)
elif (y == 'float'):
currentstr += print_float_str(x,+1.797693134862315E+308, -1.797693134862315E+308)
elif (y == 'boolean'):
currentstr += print_bool(x)
elif (y == 'numeric'):
currentstr += print_int_str(x, 9223372036854775807000, -9223372036854775808000)
elif (y != 'custom'):
currentstr += print_char(x)
if (y != 'custom'):
currentcolcnt += 1
if currentcolcnt < len(thelist): | [
" currentstr += '\\t'"
]
| lcc | Please complete the code given below.
{context}
Next line of code: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.