repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
facebookresearch/detectron2
projects/PointSup/point_sup/detection_utils.py
annotations_to_instances
python
def annotations_to_instances(annos, image_size, sample_points=0): target = base_annotations_to_instances(annos, image_size) assert ("point_coords" in annos[0]) == ("point_labels" in annos[0]) if len(annos) and "point_labels" in annos[0]: point_coords = [] point_labels = [] for i, _ in enumerate(annos): point_coords_wrt_image = np.array(annos[i]["point_coords"]) point_labels_wrt_image = np.array(annos[i]["point_labels"]) if sample_points > 0: random_indices = np.random.choice( point_coords_wrt_image.shape[0], sample_points, replace=point_coords_wrt_image.shape[0] < sample_points, ).astype(int) point_coords_wrt_image = point_coords_wrt_image[random_indices] point_labels_wrt_image = point_labels_wrt_image[random_indices] assert point_coords_wrt_image.shape[0] == point_labels_wrt_image.size point_coords.append(point_coords_wrt_image) point_labels.append(point_labels_wrt_image) point_coords = torch.stack([torch.from_numpy(x) for x in point_coords]) point_labels = torch.stack([torch.from_numpy(x) for x in point_labels]) target.gt_point_coords = point_coords target.gt_point_labels = point_labels return target
Create an :class:`Instances` object used by the models, from instance annotations in the dataset dict. Args: annos (list[dict]): a list of instance annotations in one image, each element for one instance. image_size (tuple): height, width sample_points (int): subsample points at each iteration Returns: Instances: It will contain fields "gt_boxes", "gt_classes", "gt_point_coords", "gt_point_labels", if they can be obtained from `annos`. This is the format that builtin models with point supervision expect.
https://github.com/facebookresearch/detectron2/blob/2a1cec4c647dbe8a05f2d5b599d7d4c7cf9f04ad/projects/PointSup/point_sup/detection_utils.py#L16-L63
import numpy as np import torch from detectron2.data.detection_utils import annotations_to_instances as base_annotations_to_instances from detectron2.data.detection_utils import transform_instance_annotations as base_transform_instance_annotations
Apache License 2.0
morepath/morepath
morepath/directive.py
TemplateRenderAction.__init__
python
def __init__(self, extension): self.extension = extension
Register a template engine. :param extension: the template file extension (``.pt``, etc) we want this template engine to handle. The decorated function gets ``loader``, ``name`` and ``original_render`` arguments. It should return a ``callable`` that is a view ``render`` function: take a ``content`` and ``request`` object and return a :class:`morepath.Response` instance. This render callable should render the return value of the view with the template supplied through its ``template`` argument.
https://github.com/morepath/morepath/blob/09972904229f807da75c75d8825af1495057acdc/morepath/directive.py#L599-L613
import os import dectate from reg import methodify from .authentication import Identity, NoIdentity from .view import render_view, render_json, render_html, View from .traject import Path from .converter import ConverterRegistry from .tween import TweenRegistry from .template import TemplateEngineRegistry from .predicate import PredicateRegistry from .path import PathRegistry from .settings import SettingRegistry from .mapply import mapply def isbaseclass(a, b): return issubclass(b, a) class SettingAction(dectate.Action): config = {"setting_registry": SettingRegistry} def __init__(self, section, name): self.section = section self.name = name def identifier(self, setting_registry): return self.section, self.name def perform(self, obj, setting_registry): setting_registry.register_setting(self.section, self.name, obj) class SettingValue: def __init__(self, value): self.value = value def __call__(self): return self.value class SettingSectionAction(dectate.Composite): query_classes = [SettingAction] def __init__(self, section): self.section = section def actions(self, obj): section = obj() for name, value in section.items(): yield ( SettingAction(section=self.section, name=name), SettingValue(value), ) class PredicateFallbackAction(dectate.Action): config = {"predicate_registry": PredicateRegistry} depends = [SettingAction] filter_convert = { "dispatch": dectate.convert_dotted_name, "func": dectate.convert_dotted_name, } def __init__(self, dispatch, func): self.dispatch = dispatch self.func = func def identifier(self, predicate_registry): return self.dispatch, self.func def perform(self, obj, predicate_registry): predicate_registry.register_predicate_fallback( self.dispatch, self.func, obj ) class PredicateAction(dectate.Action): config = {"predicate_registry": PredicateRegistry} depends = [SettingAction, PredicateFallbackAction] filter_convert = { "dispatch": dectate.convert_dotted_name, "index": dectate.convert_dotted_name, "before": dectate.convert_dotted_name, "after": dectate.convert_dotted_name, } filter_name = {"before": "_before", "after": "_after"} def __init__(self, dispatch, name, default, index, before=None, after=None): self.dispatch = dispatch self.name = name self.default = default self.index = index self._before = before self._after = after def identifier(self, predicate_registry): return self.dispatch, self._before, self._after def perform(self, obj, predicate_registry): predicate_registry.register_predicate( obj, self.dispatch, self.name, self.default, self.index, self._before, self._after, ) @staticmethod def after(predicate_registry): predicate_registry.install_predicates() class MethodAction(dectate.Action): config = {} depends = [SettingAction, PredicateAction, PredicateFallbackAction] def filter_get_value(self, name): return self.key_dict.get(name, dectate.NOT_FOUND) app_class_arg = True filter_convert = {"dispatch_method": dectate.convert_dotted_name} def __init__(self, dispatch_method, **kw): self.dispatch_method = dispatch_method self.key_dict = kw def identifier(self, app_class): return ( self.dispatch_method, self.dispatch_method.by_predicates(**self.key_dict).key, ) def perform(self, obj, app_class): getattr(app_class, self.dispatch_method.__name__).register( obj, **self.key_dict ) class ConverterAction(dectate.Action): config = {"converter_registry": ConverterRegistry} depends = [SettingAction] filter_convert = {"type": dectate.convert_dotted_name} def __init__(self, type): self.type = type def identifier(self, converter_registry): return ("converter", self.type) def perform(self, obj, converter_registry): converter_registry.register_converter(self.type, obj()) class PathAction(dectate.Action): config = { "path_registry": PathRegistry, } depends = [SettingAction, ConverterAction] filter_compare = { "model": isbaseclass, "path": lambda a, b: Path(a).discriminator() == Path(b).discriminator(), } def __init__( self, path, model=None, variables=None, converters=None, required=None, get_converters=None, absorb=False, ): self.model = model self.path = path self.variables = variables self.converters = converters self.required = required self.get_converters = get_converters self.absorb = absorb def identifier(self, path_registry): return ("path", Path(self.path).discriminator()) def discriminators(self, path_registry): return [("model", self.model)] def perform(self, obj, path_registry): path_registry.register_path( self.model, self.path, self.variables, self.converters, self.required, self.get_converters, self.absorb, self.code_info, obj, ) class PathCompositeAction(dectate.Composite): filter_convert = { "model": dectate.convert_dotted_name, "variables": dectate.convert_dotted_name, "get_converters": dectate.convert_dotted_name, "absorb": dectate.convert_bool, } query_classes = [PathAction] def __init__( self, path, model=None, variables=None, converters=None, required=None, get_converters=None, absorb=False, ): self.model = model self.path = path self.variables = variables self.converters = converters self.required = required self.get_converters = get_converters self.absorb = absorb def actions(self, obj): model = self.model if isinstance(obj, type): if model is not None: raise dectate.DirectiveError( "@path decorates class so cannot " "have explicit model: %s" % model ) model = obj if model is None: raise dectate.DirectiveError( "@path does not decorate class and has no explicit model" ) yield PathAction( self.path, model, self.variables, self.converters, self.required, self.get_converters, self.absorb, ), obj class PermissionRuleAction(dectate.Action): config = {} filter_convert = { "model": dectate.convert_dotted_name, "permission": dectate.convert_dotted_name, "identity": dectate.convert_dotted_name, } filter_compare = { "model": isbaseclass, "permission": issubclass, "identity": issubclass, } app_class_arg = True depends = [SettingAction] def __init__(self, model, permission, identity=Identity): self.model = model self.permission = permission if identity is None: identity = NoIdentity self.identity = identity def identifier(self, app_class): return (self.model, self.permission, self.identity) def perform(self, obj, app_class): app_class._permits.register( methodify(obj, selfname="app"), identity=self.identity, obj=self.model, permission=self.permission, ) template_directory_id = 0 class TemplateDirectoryAction(dectate.Action): config = {"template_engine_registry": TemplateEngineRegistry} depends = [SettingAction] filter_name = {"after": "_after", "before": "_before"} filter_convert = { "after": dectate.convert_dotted_name, "before": dectate.convert_dotted_name, } def __init__(self, after=None, before=None, name=None): global template_directory_id self._after = after self._before = before if name is None: name = "template_directory_%s" % template_directory_id template_directory_id += 1 self.name = name def identifier(self, template_engine_registry): return self.name def perform(self, obj, template_engine_registry): directory = obj() if not os.path.isabs(directory): directory = os.path.join( os.path.dirname(self.code_info.path), directory ) template_engine_registry.register_template_directory_info( obj, directory, self._before, self._after, self.directive.configurable, ) class TemplateLoaderAction(dectate.Action): config = {"template_engine_registry": TemplateEngineRegistry} depends = [TemplateDirectoryAction] def __init__(self, extension): self.extension = extension def identifier(self, template_engine_registry): return self.extension def perform(self, obj, template_engine_registry): template_engine_registry.initialize_template_loader(self.extension, obj) class TemplateRenderAction(dectate.Action): config = {"template_engine_registry": TemplateEngineRegistry} depends = [SettingAction, TemplateLoaderAction]
BSD 3-Clause New or Revised License
docusign/docusign-python-client
docusign_esign/models/user_signature_definition.py
UserSignatureDefinition.phonetic_name
python
def phonetic_name(self, phonetic_name): self._phonetic_name = phonetic_name
Sets the phonetic_name of this UserSignatureDefinition. # noqa: E501 :param phonetic_name: The phonetic_name of this UserSignatureDefinition. # noqa: E501 :type: str
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/user_signature_definition.py#L281-L290
import pprint import re import six from docusign_esign.client.configuration import Configuration class UserSignatureDefinition(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'date_stamp_properties': 'DateStampProperties', 'disallow_user_resize_stamp': 'str', 'external_id': 'str', 'image_type': 'str', 'is_default': 'str', 'nrds_id': 'str', 'nrds_last_name': 'str', 'phonetic_name': 'str', 'signature_font': 'str', 'signature_id': 'str', 'signature_initials': 'str', 'signature_name': 'str', 'signature_type': 'str', 'stamp_format': 'str', 'stamp_size_mm': 'str' } attribute_map = { 'date_stamp_properties': 'dateStampProperties', 'disallow_user_resize_stamp': 'disallowUserResizeStamp', 'external_id': 'externalID', 'image_type': 'imageType', 'is_default': 'isDefault', 'nrds_id': 'nrdsId', 'nrds_last_name': 'nrdsLastName', 'phonetic_name': 'phoneticName', 'signature_font': 'signatureFont', 'signature_id': 'signatureId', 'signature_initials': 'signatureInitials', 'signature_name': 'signatureName', 'signature_type': 'signatureType', 'stamp_format': 'stampFormat', 'stamp_size_mm': 'stampSizeMM' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._date_stamp_properties = None self._disallow_user_resize_stamp = None self._external_id = None self._image_type = None self._is_default = None self._nrds_id = None self._nrds_last_name = None self._phonetic_name = None self._signature_font = None self._signature_id = None self._signature_initials = None self._signature_name = None self._signature_type = None self._stamp_format = None self._stamp_size_mm = None self.discriminator = None setattr(self, "_{}".format('date_stamp_properties'), kwargs.get('date_stamp_properties', None)) setattr(self, "_{}".format('disallow_user_resize_stamp'), kwargs.get('disallow_user_resize_stamp', None)) setattr(self, "_{}".format('external_id'), kwargs.get('external_id', None)) setattr(self, "_{}".format('image_type'), kwargs.get('image_type', None)) setattr(self, "_{}".format('is_default'), kwargs.get('is_default', None)) setattr(self, "_{}".format('nrds_id'), kwargs.get('nrds_id', None)) setattr(self, "_{}".format('nrds_last_name'), kwargs.get('nrds_last_name', None)) setattr(self, "_{}".format('phonetic_name'), kwargs.get('phonetic_name', None)) setattr(self, "_{}".format('signature_font'), kwargs.get('signature_font', None)) setattr(self, "_{}".format('signature_id'), kwargs.get('signature_id', None)) setattr(self, "_{}".format('signature_initials'), kwargs.get('signature_initials', None)) setattr(self, "_{}".format('signature_name'), kwargs.get('signature_name', None)) setattr(self, "_{}".format('signature_type'), kwargs.get('signature_type', None)) setattr(self, "_{}".format('stamp_format'), kwargs.get('stamp_format', None)) setattr(self, "_{}".format('stamp_size_mm'), kwargs.get('stamp_size_mm', None)) @property def date_stamp_properties(self): return self._date_stamp_properties @date_stamp_properties.setter def date_stamp_properties(self, date_stamp_properties): self._date_stamp_properties = date_stamp_properties @property def disallow_user_resize_stamp(self): return self._disallow_user_resize_stamp @disallow_user_resize_stamp.setter def disallow_user_resize_stamp(self, disallow_user_resize_stamp): self._disallow_user_resize_stamp = disallow_user_resize_stamp @property def external_id(self): return self._external_id @external_id.setter def external_id(self, external_id): self._external_id = external_id @property def image_type(self): return self._image_type @image_type.setter def image_type(self, image_type): self._image_type = image_type @property def is_default(self): return self._is_default @is_default.setter def is_default(self, is_default): self._is_default = is_default @property def nrds_id(self): return self._nrds_id @nrds_id.setter def nrds_id(self, nrds_id): self._nrds_id = nrds_id @property def nrds_last_name(self): return self._nrds_last_name @nrds_last_name.setter def nrds_last_name(self, nrds_last_name): self._nrds_last_name = nrds_last_name @property def phonetic_name(self): return self._phonetic_name @phonetic_name.setter
MIT License
galaxyproject/galaxy-language-server
server/galaxyls/services/completion.py
XmlCompletionService._build_node_completion_item
python
def _build_node_completion_item(self, node: XsdNode, order: int = 0) -> CompletionItem: return CompletionItem( label=node.name, kind=CompletionItemKind.Class, documentation=node.get_doc(), sort_text=str(order).zfill(2), )
Generates a completion item with the information about the given node definition. Args: node (XsdNode): The node definition used to build the completion item. order (int): The position for ordering this item. Returns: CompletionItem: The completion item with the basic information about the node.
https://github.com/galaxyproject/galaxy-language-server/blob/2076440615876edd4164c0f61c7787294525b54f/server/galaxyls/services/completion.py#L183-L201
from typing import Optional, cast from galaxyls.services.definitions import DocumentDefinitionsProvider from galaxyls.services.xml.nodes import XmlCDATASection, XmlElement from pygls.lsp.types import ( CompletionContext, CompletionItem, CompletionItemKind, CompletionList, CompletionTriggerKind, InsertTextFormat, Position, Range, ) from ..config import CompletionMode from ..types import AutoCloseTagResult from .context import XmlContext from .xsd.parser import XsdAttribute, XsdNode, XsdTree class XmlCompletionService: def __init__(self, xsd_tree: XsdTree, definitions_provider: DocumentDefinitionsProvider): self.xsd_tree: XsdTree = xsd_tree self.definitions_provider = definitions_provider def get_completion_at_context( self, context: XmlContext, completion_context: CompletionContext, mode: CompletionMode = CompletionMode.AUTO ) -> Optional[CompletionList]: if isinstance(context.node, XmlCDATASection): return None triggerKind = completion_context.trigger_kind if mode == CompletionMode.AUTO and triggerKind == CompletionTriggerKind.TriggerCharacter and not context.is_attribute: if completion_context.trigger_character == "<": return self.get_node_completion(context) if completion_context.trigger_character == " ": return self.get_attribute_completion(context) elif triggerKind == CompletionTriggerKind.Invoked: if context.is_inside_attribute_value: return self.get_attribute_value_completion(context) if context.is_attribute_key: return self.get_attribute_completion(context) if context.is_tag and not context.is_closing_tag and not context.is_at_end: if context.is_valid_tag() and not context.is_tag_name: return self.get_attribute_completion(context) return self.get_node_completion(context) return None def get_node_completion(self, context: XmlContext) -> CompletionList: result = [] if context.is_empty or context.is_root: result.append(self._build_node_completion_item(self.xsd_tree.root)) elif context.xsd_element: for child in context.xsd_element.children: if not context.has_reached_max_occurs(child): result.append(self._build_node_completion_item(child, len(result))) result.append(self._build_node_completion_item(self.xsd_tree.expand_element, len(result))) return CompletionList(items=result, is_incomplete=False) def get_attribute_completion(self, context: XmlContext) -> CompletionList: result = [] if ( context.is_empty or context.is_content or context.is_attribute_value or context.is_closing_tag or not context.node.name ): return CompletionList(items=result, is_incomplete=False) if context.xsd_element: existing_attr_names = context.node.get_attribute_names() for attr_name in context.xsd_element.attributes: if attr_name in existing_attr_names: continue attr = context.xsd_element.attributes[attr_name] result.append(self._build_attribute_completion_item(attr, len(result))) if context.node.name == "expand": element = cast(XmlElement, context.node) macro_name = element.get_attribute("macro") if macro_name: token_params = self.definitions_provider.macro_definitions_provider.get_macro_token_params( context.xml_document, macro_name ) for token in token_params: if token.param_name in existing_attr_names: continue result.append( CompletionItem( label=token.param_name, kind=CompletionItemKind.Variable, insert_text=f'{token.param_name}="${{1:{token.default_value}}}"', insert_text_format=InsertTextFormat.Snippet, sort_text=str(len(result)).zfill(2), ) ) return CompletionList(items=result, is_incomplete=False) def get_attribute_value_completion(self, context: XmlContext) -> CompletionList: if context.attribute_name: attribute = context.xsd_element.attributes.get(context.attribute_name) if attribute and attribute.enumeration: result = [CompletionItem(label=item, kind=CompletionItemKind.Value) for item in attribute.enumeration] return CompletionList(items=result, is_incomplete=False) if attribute and attribute.name == "macro": macro_names = self.definitions_provider.macro_definitions_provider.get_macro_names(context.xml_document) result = [CompletionItem(label=item, kind=CompletionItemKind.Value) for item in macro_names] return CompletionList(items=result, is_incomplete=False) return CompletionList(items=[], is_incomplete=False) def get_auto_close_tag(self, context: XmlContext, trigger_character: str) -> Optional[AutoCloseTagResult]: if ( isinstance(context.node, XmlCDATASection) or context.is_closing_tag or context.node.is_closed or (context.is_attribute and not context.is_attribute_end) or context.characted_at_position == ">" ): return None tag = context.xsd_element.name snippet = f"$0</{tag}>" replace_range = None is_self_closing = trigger_character == "/" if is_self_closing: start = Position(line=context.position.line, character=context.position.character + 1) end_character = context.position.character + 2 if len(context.line_text) > end_character and context.line_text[end_character] == ">": end_character = end_character + 1 end = Position(line=context.position.line, character=end_character) replace_range = Range(start=start, end=end) if not context.is_content: snippet = "/>$0" elif context.is_content: return None return AutoCloseTagResult(snippet, replace_range)
Apache License 2.0
yelp/pyleus
pyleus/cli/virtualenv_proxy.py
VirtualenvProxy.execute_module
python
def execute_module(self, module, args=None, cwd=None): cmd = [os.path.join(self.path, "bin", "python"), "-m", module] if args: cmd += args proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd) out_data, err_data = proc.communicate() if proc.returncode != 0: raise VirtualenvError("Failed to execute Python module: {0}." " Error: {1}".format(module, err_data)) return out_data
Call "virtualenv/interpreter -m" to execute a python module.
https://github.com/yelp/pyleus/blob/8ab87e2d18b8b6a7e0471ceefdbb3ff23a576cce/pyleus/cli/virtualenv_proxy.py#L100-L115
from __future__ import absolute_import import os import subprocess from pyleus.exception import VirtualenvError def _exec_shell_cmd(cmd, stdout, stderr, err_msg): proc = subprocess.Popen(cmd, stdout=stdout, stderr=stderr) out_data, _ = proc.communicate() if proc.returncode != 0: raise VirtualenvError(err_msg) return out_data class VirtualenvProxy(object): def __init__(self, path, system_site_packages=False, pypi_index_url=None, use_wheel=True, python_interpreter=None, verbose=False): self.path = path self._system_site_packages = system_site_packages self._pypi_index_url = pypi_index_url self._use_wheel = use_wheel self._python_interpreter = python_interpreter self._verbose = verbose self._out_stream = None if not self._verbose: self._out_stream = open(os.devnull, "w") self._err_stream = subprocess.STDOUT self._create_virtualenv() def _create_virtualenv(self): cmd = ["virtualenv", self.path] if self._system_site_packages: cmd.append("--system-site-packages") if self._python_interpreter: cmd.extend(["--python", self._python_interpreter]) _exec_shell_cmd(cmd, stdout=self._out_stream, stderr=self._err_stream, err_msg="Failed to create virtualenv: {0}". format(self.path)) def install_package(self, package): cmd = [os.path.join(self.path, "bin", "pip"), "install", package] if self._pypi_index_url is not None: cmd += ["-i", self._pypi_index_url] if self._use_wheel: cmd += ['--use-wheel'] _exec_shell_cmd( cmd, stdout=self._out_stream, stderr=self._err_stream, err_msg="Failed to install {0} package." " Run with --verbose for detailed info.".format(package)) def install_from_requirements(self, req): cmd = [os.path.join(self.path, "bin", "pip"), "install", "-r", req] if self._pypi_index_url is not None: cmd += ["-i", self._pypi_index_url] if self._use_wheel: cmd += ['--use-wheel'] _exec_shell_cmd( cmd, stdout=self._out_stream, stderr=self._err_stream, err_msg="Failed to install dependencies for this topology." " Run with --verbose for detailed info.")
Apache License 2.0
laiguokun/funnel-transformer
tensorflow/create_pretrain_data.py
_get_boundary_indices
python
def _get_boundary_indices(tokenizer, seg, reverse=False): seg_len = len(seg) if reverse: seg = np.flip(seg, 0) boundary_indices = [] for idx, token_id in enumerate(seg.tolist()): if tokenizer.is_start_id(token_id) and not tokenizer.is_func_id(token_id): boundary_indices.append(idx) boundary_indices.append(seg_len) if reverse: boundary_indices = [seg_len - idx for idx in boundary_indices] return boundary_indices
Get all boundary indices of whole words.
https://github.com/laiguokun/funnel-transformer/blob/1085523bc768e499d8c55edf6af0d70cb1cd27d2/tensorflow/create_pretrain_data.py#L263-L278
from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os import random from absl import flags import absl.logging as _logging import numpy as np import tensorflow.compat.v1 as tf import data_utils import tokenization tf.disable_v2_behavior() FLAGS = flags.FLAGS flags.DEFINE_integer("min_doc_len", 1, help="Minimum document length allowed.") flags.DEFINE_integer("seq_len", 512, help="Sequence length.") flags.DEFINE_string("input_glob", "data/example/*.txt", help="Input file glob.") flags.DEFINE_string("save_dir", "proc_data/example", help="Directory for saving the processed data.") flags.DEFINE_enum("split", "train", ["train", "dev", "test"], help="Save the data as which split.") flags.DEFINE_integer("pass_id", 0, help="ID of the current pass." "Different passes sample different negative segment.") flags.DEFINE_integer("num_task", 1, help="Number of total tasks.") flags.DEFINE_integer("task", 0, help="The Task ID. This value is used when " "using multiple workers to identify each worker.") def _int64_feature(values): return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def _float_feature(values): return tf.train.Feature(float_list=tf.train.FloatList(value=values)) def _tfrecord_path(save_dir): data_prefix = "data.{}.pass-{}".format(FLAGS.split, FLAGS.pass_id) data_suffix = "tfrecord-{:05d}-of-{:05d}".format(FLAGS.task, FLAGS.num_task) tfrecord_name = data_utils.format_filename( prefix=data_prefix, suffix=data_suffix, seq_len=FLAGS.seq_len, uncased=FLAGS.uncased, ) tfrecord_path = os.path.join(save_dir, tfrecord_name) return tfrecord_path def _meta_path(save_dir): meta_prefix = "meta.{}.pass-{}".format(FLAGS.split, FLAGS.pass_id) meta_suffix = "json-{:05d}-of-{:05d}".format(FLAGS.task, FLAGS.num_task) meta_name = data_utils.format_filename( prefix=meta_prefix, suffix=meta_suffix, seq_len=FLAGS.seq_len, uncased=FLAGS.uncased, ) meta_path = os.path.join(save_dir, meta_name) return meta_path def create_pretrain_data(input_paths, tokenizer): input_shards = [] input_data, sent_ids = [], [] end_of_doc = False doc_length = [] total_num_tok = 0 for input_path in input_paths: sent_id, line_cnt = True, 0 tf.logging.info("Start processing %s", input_path) for line in tf.io.gfile.GFile(input_path): if line_cnt % 100000 == 0: tf.logging.info("Loading line %d", line_cnt) if not line.strip(): end_of_doc = True cur_sent = [] else: cur_sent = tokenizer.convert_text_to_ids(line.strip()) if cur_sent: input_data.extend(cur_sent) sent_ids.extend([sent_id] * len(cur_sent)) sent_id = not sent_id if end_of_doc: doc_length.append(len(input_data)) if len(input_data) >= max(FLAGS.min_doc_len, 1): input_data = np.array(input_data, dtype=np.int64) sent_ids = np.array(sent_ids, dtype=np.bool) input_shards.append((input_data, sent_ids)) total_num_tok += len(input_data) input_data, sent_ids = [], [] end_of_doc = False line_cnt += 1 tf.logging.info("Finish %s with %d lines.", input_path, line_cnt) tf.logging.info("[Task %d] Total number tokens: %d", FLAGS.task, total_num_tok) hist, bins = np.histogram(doc_length, bins=[0, 64, 128, 256, 512, 1024, 2048, 102400]) percent = hist / np.sum(hist) tf.logging.info("***** Doc length histogram *****") for pct, l, r in zip(percent, bins[:-1], bins[1:]): tf.logging.info(" - [%d, %d]: %.4f", l, r, pct) np.random.seed(100 * FLAGS.task + FLAGS.pass_id) perm_indices = np.random.permutation(len(input_shards)) input_data_list, sent_ids_list = [], [] prev_sent_id = None for perm_idx in perm_indices: input_data, sent_ids = input_shards[perm_idx] tf.logging.debug("Idx %d: data %s sent %s", perm_idx, input_data.shape, sent_ids.shape) if prev_sent_id is not None and sent_ids[0] == prev_sent_id: sent_ids = np.logical_not(sent_ids) input_data_list.append(input_data) sent_ids_list.append(sent_ids) prev_sent_id = sent_ids[-1] input_data = np.concatenate(input_data_list) sent_ids = np.concatenate(sent_ids_list) create_tfrecords( save_dir=FLAGS.save_dir, data=[input_data, sent_ids], tokenizer=tokenizer, ) def main(_): tokenizer = tokenization.get_tokenizer() data_utils.setup_special_ids(tokenizer) if not tf.io.gfile.exists(FLAGS.save_dir): tf.io.gfile.makedirs(FLAGS.save_dir) file_paths = sorted(tf.io.gfile.glob(FLAGS.input_glob)) tf.logging.info("Use glob: %s", FLAGS.input_glob) tf.logging.info("Find %d files: %s", len(file_paths), file_paths) task_file_paths = file_paths[FLAGS.task::FLAGS.num_task] if not task_file_paths: tf.logging.info("Exit: task %d has no file to process.", FLAGS.task) return tf.logging.info("Task %d process %d files: %s", FLAGS.task, len(task_file_paths), task_file_paths) create_pretrain_data(task_file_paths, tokenizer) def _split_a_and_b(data, sent_ids, begin_idx, tot_len): data_len = data.shape[0] if begin_idx + tot_len >= data_len: tf.logging.info("Not enough data: " "begin_idx %d + tot_len %d >= data_len %d", begin_idx, tot_len, data_len) return None end_idx = begin_idx + 1 cut_points = [] while end_idx < data_len: if sent_ids[end_idx] != sent_ids[end_idx - 1]: if end_idx - begin_idx >= tot_len: break cut_points.append(end_idx) end_idx += 1 a_begin = begin_idx if not cut_points or random.random() < 0.5: label = 0 if not cut_points: a_end = end_idx else: a_end = random.choice(cut_points) b_len = max(1, tot_len - (a_end - a_begin)) b_begin = random.randint(0, data_len - b_len) b_end = b_begin + b_len while b_begin > 0 and sent_ids[b_begin - 1] == sent_ids[b_begin]: b_begin -= 1 while b_end < data_len and sent_ids[b_end - 1] == sent_ids[b_end]: b_end += 1 new_begin = a_end else: label = 1 a_end = random.choice(cut_points) b_begin = a_end b_end = end_idx new_begin = b_end while a_end - a_begin + b_end - b_begin > tot_len: if a_end - a_begin > b_end - b_begin: a_end -= 1 else: if random.random() < 0.5: b_end -= 1 else: b_begin += 1 ret = [data[a_begin: a_end], data[b_begin: b_end], label, new_begin] return ret
MIT License
julien6387/supvisors
supvisors/address.py
AddressStatus.add_process
python
def add_process(self, process): self.processes[process.namespec] = process
Add a new process to the process list.
https://github.com/julien6387/supvisors/blob/4e32bce566dec2cf9e9a213a3698178030eb869b/supvisors/address.py#L122-L124
from typing import Dict from supervisor.loggers import Logger from supervisor.xmlrpc import capped_int from .process import ProcessStatus from .ttypes import AddressStates, InvalidTransition class AddressStatus(object): INACTIVITY_TIMEOUT = 10 def __init__(self, node_name: str, logger: Logger): self.logger: Logger = logger self.node_name: str = node_name self._state: AddressStates = AddressStates.UNKNOWN self.sequence_counter: int = 0 self.remote_time: int = 0 self.local_time: int = 0 self.processes: Dict[str, ProcessStatus] = {} def reset(self): if self.state in [AddressStates.CHECKING, AddressStates.RUNNING]: self._state = AddressStates.UNKNOWN self.remote_time = 0 self.local_time = 0 @property def state(self): return self._state @state.setter def state(self, new_state): if self._state != new_state: if self.check_transition(new_state): self._state = new_state self.logger.info('AddressStatus.state: {} is {}'.format(self.node_name, self.state.name)) else: raise InvalidTransition('AddressStatus.state: {} transition rejected from {} to {}' .format(self.node_name, self.state.name, new_state.name)) def serial(self): return {'address_name': self.node_name, 'statecode': self.state.value, 'statename': self.state.name, 'sequence_counter': self.sequence_counter, 'remote_time': capped_int(self.remote_time), 'local_time': capped_int(self.local_time), 'loading': self.get_loading()} def inactive(self, current_time: float): return (self.state in [AddressStates.CHECKING, AddressStates.RUNNING] and (current_time - self.local_time) > self.INACTIVITY_TIMEOUT) def in_isolation(self): return self.state in [AddressStates.ISOLATING, AddressStates.ISOLATED] def update_times(self, sequence_counter: int, remote_time: int, local_time: int): self.sequence_counter = sequence_counter self.remote_time = remote_time self.local_time = local_time for process in self.processes.values(): process.update_times(self.node_name, remote_time) def check_transition(self, new_state): return new_state in self._Transitions[self.state]
Apache License 2.0
dtuwindenergy/pywake
py_wake/deficit_models/selfsimilarity.py
SelfSimilarityDeficit2020.inter_gamma_fac
python
def inter_gamma_fac(self, x_ijlk): finter_ijlk = np.abs(self.a0f(x_ijlk) - self.a0f(-1.)) / np.ptp(self.a0f(np.array([-6, -1]))) finter_ijlk[x_ijlk < -6] = 1. finter_ijlk[x_ijlk > -1] = 0. return finter_ijlk
Interpolation coefficient between near- and far-field gamma(CT)
https://github.com/dtuwindenergy/pywake/blob/ab02a41b5b4ebe7d17877e265ae64d2902324298/py_wake/deficit_models/selfsimilarity.py#L146-L153
import numpy as np from numpy import newaxis as na from py_wake.deficit_models.no_wake import NoWakeDeficit from py_wake.deficit_models import BlockageDeficitModel class SelfSimilarityDeficit(BlockageDeficitModel): args4deficit = ['WS_ilk', 'D_src_il', 'dw_ijlk', 'cw_ijlk', 'ct_ilk'] def __init__(self, ss_gamma=1.1, ss_lambda=0.587, ss_eta=1.32, ss_alpha=8. / 9., ss_beta=np.sqrt(2), limiter=1e-10, superpositionModel=None): super().__init__(superpositionModel=superpositionModel) self.ss_gamma = ss_gamma self.ss_lambda = ss_lambda self.ss_eta = ss_eta self.ss_alpha = ss_alpha self.ss_beta = ss_beta self.a0p = np.array([0.2460, 0.0586, 0.0883]) self.limiter = limiter def r12(self, x_ijlk): r12_ijlk = np.sqrt(self.ss_lambda * (self.ss_eta + x_ijlk ** 2)) return r12_ijlk def gamma(self, x_ijlk, ct_ilk): return self.ss_gamma * np.ones_like(x_ijlk) def f_eps(self, x_ijlk, cw_ijlk, R_ijl): r12_ijlk = self.r12(x_ijlk) with np.warnings.catch_warnings(): np.warnings.filterwarnings('ignore', r'overflow encountered in cosh') feps_ijlk = (1 / np.cosh(self.ss_beta * cw_ijlk / (R_ijl[..., na] * r12_ijlk))) ** self.ss_alpha return feps_ijlk def a0f(self, x_ijlk): a0f_ijlk = (1. + x_ijlk / np.sqrt(1. + x_ijlk**2)) return a0f_ijlk def a0(self, x_ijlk, ct_ilk): gamma_ct_ijlk = self.gamma(x_ijlk, ct_ilk) * ct_ilk[:, na] a0_ijlk = self.a0p[2] * gamma_ct_ijlk**3 + self.a0p[1] * gamma_ct_ijlk**2 + self.a0p[0] * gamma_ct_ijlk return a0_ijlk def calc_deficit(self, WS_ilk, D_src_il, dw_ijlk, cw_ijlk, ct_ilk, **_): R_ijl = (D_src_il / 2)[:, na] x_ijlk = dw_ijlk / R_ijl[..., na] feps_ijlk = self.f_eps(x_ijlk, cw_ijlk, R_ijl) a0x_ijlk = self.a0(x_ijlk, ct_ilk) * self.a0f(x_ijlk) return WS_ilk[:, na] * (x_ijlk < -self.limiter) * a0x_ijlk * feps_ijlk class SelfSimilarityDeficit2020(SelfSimilarityDeficit): def __init__(self, ss_alpha=8. / 9., ss_beta=np.sqrt(2), r12p=np.array([-0.672, 0.4897]), ngp=np.array([-1.381, 2.627, -1.524, 1.336]), fgp=np.array([-0.06489, 0.4911, 1.116, -0.1577]), limiter=1e-10, superpositionModel=None): BlockageDeficitModel.__init__(self, superpositionModel=superpositionModel) self.ss_alpha = ss_alpha self.ss_beta = ss_beta self.r12p = r12p self.ngp = ngp self.fgp = fgp self.a0p = np.array([0.2460, 0.0586, 0.0883]) self.limiter = limiter def r12(self, x_ijlk): r12_ijlk = self.r12p[0] * x_ijlk + self.r12p[1] return r12_ijlk def far_gamma(self, ct_ilk): fg_ilk = self.fgp[0] * np.sin((ct_ilk - self.fgp[1]) / self.fgp[3]) + self.fgp[2] return fg_ilk def near_gamma(self, ct_ilk): fn_ilk = self.ngp[0] * ct_ilk**3 + self.ngp[1] * ct_ilk**2 + self.ngp[2] * ct_ilk + self.ngp[3] return fn_ilk
MIT License
facebookresearch/pytorchvideo
pytorchvideo/transforms/transforms.py
ConvertUint8ToFloat.forward
python
def forward(self, x: torch.Tensor) -> torch.Tensor: assert x.dtype == torch.uint8, "image must have dtype torch.uint8" return self.convert_func(x)
Args: x (torch.Tensor): video tensor with shape (C, T, H, W).
https://github.com/facebookresearch/pytorchvideo/blob/832a6bc683257f07e74c95a1f9441ebaa64d95d8/pytorchvideo/transforms/transforms.py#L183-L189
from typing import Callable, Dict, List, Optional, Tuple import pytorchvideo.transforms.functional import torch import torchvision.transforms class ApplyTransformToKey: def __init__(self, key: str, transform: Callable): self._key = key self._transform = transform def __call__(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: x[self._key] = self._transform(x[self._key]) return x class RemoveKey(torch.nn.Module): def __init__(self, key: str): self._key = key def __call__(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: if self._key in x: del x[self._key] return x class UniformTemporalSubsample(torch.nn.Module): def __init__(self, num_samples: int): super().__init__() self._num_samples = num_samples def forward(self, x: torch.Tensor) -> torch.Tensor: return pytorchvideo.transforms.functional.uniform_temporal_subsample( x, self._num_samples ) class UniformTemporalSubsampleRepeated(torch.nn.Module): def __init__(self, frame_ratios: Tuple[int]): super().__init__() self._frame_ratios = frame_ratios def forward(self, x: torch.Tensor): return pytorchvideo.transforms.functional.uniform_temporal_subsample_repeated( x, self._frame_ratios ) class ShortSideScale(torch.nn.Module): def __init__(self, size: int): super().__init__() self._size = size def forward(self, x: torch.Tensor) -> torch.Tensor: return pytorchvideo.transforms.functional.short_side_scale(x, self._size) class RandomShortSideScale(torch.nn.Module): def __init__(self, min_size: int, max_size: int): super().__init__() self._min_size = min_size self._max_size = max_size def forward(self, x: torch.Tensor) -> torch.Tensor: size = torch.randint(self._min_size, self._max_size + 1, (1,)).item() return pytorchvideo.transforms.functional.short_side_scale(x, size) class UniformCropVideo(torch.nn.Module): def __init__( self, size: int, video_key: str = "video", aug_index_key: str = "aug_index" ): super().__init__() self._size = size self._video_key = video_key self._aug_index_key = aug_index_key def __call__(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: x[self._video_key] = pytorchvideo.transforms.functional.uniform_crop( x[self._video_key], self._size, x[self._aug_index_key] ) return x class Normalize(torchvision.transforms.Normalize): def forward(self, x: torch.Tensor) -> torch.Tensor: vid = x.permute(1, 0, 2, 3) vid = super().forward(vid) vid = vid.permute(1, 0, 2, 3) return vid class ConvertUint8ToFloat(torch.nn.Module): def __init__(self): super().__init__() self.convert_func = torchvision.transforms.ConvertImageDtype(torch.float32)
Apache License 2.0
kuri65536/python-for-android
python-modules/twisted/twisted/protocols/ftp.py
FTP.ftp_PASV
python
def ftp_PASV(self): if self.dtpFactory is not None: self.cleanupDTP() self.dtpFactory = DTPFactory(pi=self) self.dtpFactory.setTimeout(self.dtpTimeout) self.dtpPort = self.getDTPPort(self.dtpFactory) host = self.transport.getHost().host port = self.dtpPort.getHost().port self.reply(ENTERING_PASV_MODE, encodeHostPort(host, port)) return self.dtpFactory.deferred.addCallback(lambda ign: None)
Request for a passive connection from the rfc:: This command requests the server-DTP to \"listen\" on a data port (which is not its default data port) and to wait for a connection rather than initiate one upon receipt of a transfer command. The response to this command includes the host and port address this server is listening on.
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/twisted/twisted/protocols/ftp.py#L835-L858
import os import time import re import operator import stat import errno import fnmatch import warnings try: import pwd, grp except ImportError: pwd = grp = None from zope.interface import Interface, implements from twisted import copyright from twisted.internet import reactor, interfaces, protocol, error, defer from twisted.protocols import basic, policies from twisted.python import log, failure, filepath from twisted.python.compat import reduce from twisted.cred import error as cred_error, portal, credentials, checkers RESTART_MARKER_REPLY = "100" SERVICE_READY_IN_N_MINUTES = "120" DATA_CNX_ALREADY_OPEN_START_XFR = "125" FILE_STATUS_OK_OPEN_DATA_CNX = "150" CMD_OK = "200.1" TYPE_SET_OK = "200.2" ENTERING_PORT_MODE = "200.3" CMD_NOT_IMPLMNTD_SUPERFLUOUS = "202" SYS_STATUS_OR_HELP_REPLY = "211" DIR_STATUS = "212" FILE_STATUS = "213" HELP_MSG = "214" NAME_SYS_TYPE = "215" SVC_READY_FOR_NEW_USER = "220.1" WELCOME_MSG = "220.2" SVC_CLOSING_CTRL_CNX = "221" GOODBYE_MSG = "221" DATA_CNX_OPEN_NO_XFR_IN_PROGRESS = "225" CLOSING_DATA_CNX = "226" TXFR_COMPLETE_OK = "226" ENTERING_PASV_MODE = "227" ENTERING_EPSV_MODE = "229" USR_LOGGED_IN_PROCEED = "230.1" GUEST_LOGGED_IN_PROCEED = "230.2" REQ_FILE_ACTN_COMPLETED_OK = "250" PWD_REPLY = "257.1" MKD_REPLY = "257.2" USR_NAME_OK_NEED_PASS = "331.1" GUEST_NAME_OK_NEED_EMAIL = "331.2" NEED_ACCT_FOR_LOGIN = "332" REQ_FILE_ACTN_PENDING_FURTHER_INFO = "350" SVC_NOT_AVAIL_CLOSING_CTRL_CNX = "421.1" TOO_MANY_CONNECTIONS = "421.2" CANT_OPEN_DATA_CNX = "425" CNX_CLOSED_TXFR_ABORTED = "426" REQ_ACTN_ABRTD_FILE_UNAVAIL = "450" REQ_ACTN_ABRTD_LOCAL_ERR = "451" REQ_ACTN_ABRTD_INSUFF_STORAGE = "452" SYNTAX_ERR = "500" SYNTAX_ERR_IN_ARGS = "501" CMD_NOT_IMPLMNTD = "502" BAD_CMD_SEQ = "503" CMD_NOT_IMPLMNTD_FOR_PARAM = "504" NOT_LOGGED_IN = "530.1" AUTH_FAILURE = "530.2" NEED_ACCT_FOR_STOR = "532" FILE_NOT_FOUND = "550.1" PERMISSION_DENIED = "550.2" ANON_USER_DENIED = "550.3" IS_NOT_A_DIR = "550.4" REQ_ACTN_NOT_TAKEN = "550.5" FILE_EXISTS = "550.6" IS_A_DIR = "550.7" PAGE_TYPE_UNK = "551" EXCEEDED_STORAGE_ALLOC = "552" FILENAME_NOT_ALLOWED = "553" RESPONSE = { RESTART_MARKER_REPLY: '110 MARK yyyy-mmmm', SERVICE_READY_IN_N_MINUTES: '120 service ready in %s minutes', DATA_CNX_ALREADY_OPEN_START_XFR: '125 Data connection already open, starting transfer', FILE_STATUS_OK_OPEN_DATA_CNX: '150 File status okay; about to open data connection.', CMD_OK: '200 Command OK', TYPE_SET_OK: '200 Type set to %s.', ENTERING_PORT_MODE: '200 PORT OK', CMD_NOT_IMPLMNTD_SUPERFLUOUS: '202 Command not implemented, superfluous at this site', SYS_STATUS_OR_HELP_REPLY: '211 System status reply', DIR_STATUS: '212 %s', FILE_STATUS: '213 %s', HELP_MSG: '214 help: %s', NAME_SYS_TYPE: '215 UNIX Type: L8', WELCOME_MSG: "220 %s", SVC_READY_FOR_NEW_USER: '220 Service ready', GOODBYE_MSG: '221 Goodbye.', DATA_CNX_OPEN_NO_XFR_IN_PROGRESS: '225 data connection open, no transfer in progress', CLOSING_DATA_CNX: '226 Abort successful', TXFR_COMPLETE_OK: '226 Transfer Complete.', ENTERING_PASV_MODE: '227 Entering Passive Mode (%s).', ENTERING_EPSV_MODE: '229 Entering Extended Passive Mode (|||%s|).', USR_LOGGED_IN_PROCEED: '230 User logged in, proceed', GUEST_LOGGED_IN_PROCEED: '230 Anonymous login ok, access restrictions apply.', REQ_FILE_ACTN_COMPLETED_OK: '250 Requested File Action Completed OK', PWD_REPLY: '257 "%s"', MKD_REPLY: '257 "%s" created', 'userotp': '331 Response to %s.', USR_NAME_OK_NEED_PASS: '331 Password required for %s.', GUEST_NAME_OK_NEED_EMAIL: '331 Guest login ok, type your email address as password.', REQ_FILE_ACTN_PENDING_FURTHER_INFO: '350 Requested file action pending further information.', SVC_NOT_AVAIL_CLOSING_CTRL_CNX: '421 Service not available, closing control connection.', TOO_MANY_CONNECTIONS: '421 Too many users right now, try again in a few minutes.', CANT_OPEN_DATA_CNX: "425 Can't open data connection.", CNX_CLOSED_TXFR_ABORTED: '426 Transfer aborted. Data connection closed.', REQ_ACTN_ABRTD_LOCAL_ERR: '451 Requested action aborted. Local error in processing.', SYNTAX_ERR: "500 Syntax error: %s", SYNTAX_ERR_IN_ARGS: '501 syntax error in argument(s) %s.', CMD_NOT_IMPLMNTD: "502 Command '%s' not implemented", BAD_CMD_SEQ: '503 Incorrect sequence of commands: %s', CMD_NOT_IMPLMNTD_FOR_PARAM: "504 Not implemented for parameter '%s'.", NOT_LOGGED_IN: '530 Please login with USER and PASS.', AUTH_FAILURE: '530 Sorry, Authentication failed.', NEED_ACCT_FOR_STOR: '532 Need an account for storing files', FILE_NOT_FOUND: '550 %s: No such file or directory.', PERMISSION_DENIED: '550 %s: Permission denied.', ANON_USER_DENIED: '550 Anonymous users are forbidden to change the filesystem', IS_NOT_A_DIR: '550 Cannot rmd, %s is not a directory', FILE_EXISTS: '550 %s: File exists', IS_A_DIR: '550 %s: is a directory', REQ_ACTN_NOT_TAKEN: '550 Requested action not taken: %s', EXCEEDED_STORAGE_ALLOC: '552 Requested file action aborted, exceeded file storage allocation', FILENAME_NOT_ALLOWED: '553 Requested action not taken, file name not allowed' } class InvalidPath(Exception): def toSegments(cwd, path): if path.startswith('/'): segs = [] else: segs = cwd[:] for s in path.split('/'): if s == '.' or s == '': continue elif s == '..': if segs: segs.pop() else: raise InvalidPath(cwd, path) elif '\0' in s or '/' in s: raise InvalidPath(cwd, path) else: segs.append(s) return segs def errnoToFailure(e, path): if e == errno.ENOENT: return defer.fail(FileNotFoundError(path)) elif e == errno.EACCES or e == errno.EPERM: return defer.fail(PermissionDeniedError(path)) elif e == errno.ENOTDIR: return defer.fail(IsNotADirectoryError(path)) elif e == errno.EEXIST: return defer.fail(FileExistsError(path)) elif e == errno.EISDIR: return defer.fail(IsADirectoryError(path)) else: return defer.fail() class FTPCmdError(Exception): def __init__(self, *msg): Exception.__init__(self, *msg) self.errorMessage = msg def response(self): return RESPONSE[self.errorCode] % self.errorMessage class FileNotFoundError(FTPCmdError): errorCode = FILE_NOT_FOUND class AnonUserDeniedError(FTPCmdError): def __init__(self): FTPCmdError.__init__(self, None) errorCode = ANON_USER_DENIED class PermissionDeniedError(FTPCmdError): errorCode = PERMISSION_DENIED class IsNotADirectoryError(FTPCmdError): errorCode = IS_NOT_A_DIR class FileExistsError(FTPCmdError): errorCode = FILE_EXISTS class IsADirectoryError(FTPCmdError): errorCode = IS_A_DIR class CmdSyntaxError(FTPCmdError): errorCode = SYNTAX_ERR class CmdArgSyntaxError(FTPCmdError): errorCode = SYNTAX_ERR_IN_ARGS class CmdNotImplementedError(FTPCmdError): errorCode = CMD_NOT_IMPLMNTD class CmdNotImplementedForArgError(FTPCmdError): errorCode = CMD_NOT_IMPLMNTD_FOR_PARAM class FTPError(Exception): pass class PortConnectionError(Exception): pass class BadCmdSequenceError(FTPCmdError): errorCode = BAD_CMD_SEQ class AuthorizationError(FTPCmdError): errorCode = AUTH_FAILURE def debugDeferred(self, *_): log.msg('debugDeferred(): %s' % str(_), debug=True) _months = [ None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] class DTP(object, protocol.Protocol): implements(interfaces.IConsumer) isConnected = False _cons = None _onConnLost = None _buffer = None def connectionMade(self): self.isConnected = True self.factory.deferred.callback(None) self._buffer = [] def connectionLost(self, reason): self.isConnected = False if self._onConnLost is not None: self._onConnLost.callback(None) def sendLine(self, line): self.transport.write(line + '\r\n') def _formatOneListResponse(self, name, size, directory, permissions, hardlinks, modified, owner, group): def formatMode(mode): return ''.join([mode & (256 >> n) and 'rwx'[n % 3] or '-' for n in range(9)]) def formatDate(mtime): now = time.gmtime() info = { 'month': _months[mtime.tm_mon], 'day': mtime.tm_mday, 'year': mtime.tm_year, 'hour': mtime.tm_hour, 'minute': mtime.tm_min } if now.tm_year != mtime.tm_year: return '%(month)s %(day)02d %(year)5d' % info else: return '%(month)s %(day)02d %(hour)02d:%(minute)02d' % info format = ('%(directory)s%(permissions)s%(hardlinks)4d ' '%(owner)-9s %(group)-9s %(size)15d %(date)12s ' '%(name)s') return format % { 'directory': directory and 'd' or '-', 'permissions': formatMode(permissions), 'hardlinks': hardlinks, 'owner': owner[:8], 'group': group[:8], 'size': size, 'date': formatDate(time.gmtime(modified)), 'name': name} def sendListResponse(self, name, response): self.sendLine(self._formatOneListResponse(name, *response)) def registerProducer(self, producer, streaming): return self.transport.registerProducer(producer, streaming) def unregisterProducer(self): self.transport.unregisterProducer() self.transport.loseConnection() def write(self, data): if self.isConnected: return self.transport.write(data) raise Exception("Crap damn crap damn crap damn") def _conswrite(self, bytes): try: self._cons.write(bytes) except: self._onConnLost.errback() def dataReceived(self, bytes): if self._cons is not None: self._conswrite(bytes) else: self._buffer.append(bytes) def _unregConsumer(self, ignored): self._cons.unregisterProducer() self._cons = None del self._onConnLost return ignored def registerConsumer(self, cons): assert self._cons is None self._cons = cons self._cons.registerProducer(self, True) for chunk in self._buffer: self._conswrite(chunk) self._buffer = None if self.isConnected: self._onConnLost = d = defer.Deferred() d.addBoth(self._unregConsumer) return d else: self._cons.unregisterProducer() self._cons = None return defer.succeed(None) def resumeProducing(self): self.transport.resumeProducing() def pauseProducing(self): self.transport.pauseProducing() def stopProducing(self): self.transport.stopProducing() class DTPFactory(protocol.ClientFactory): _IN_PROGRESS = object() _FAILED = object() _FINISHED = object() _state = _IN_PROGRESS peerCheck = False def __init__(self, pi, peerHost=None, reactor=None): self.pi = pi self.peerHost = peerHost self.deferred = defer.Deferred() self.delayedCall = None if reactor is None: from twisted.internet import reactor self._reactor = reactor def buildProtocol(self, addr): log.msg('DTPFactory.buildProtocol', debug=True) if self._state is not self._IN_PROGRESS: return None self._state = self._FINISHED self.cancelTimeout() p = DTP() p.factory = self p.pi = self.pi self.pi.dtpInstance = p return p def stopFactory(self): log.msg('dtpFactory.stopFactory', debug=True) self.cancelTimeout() def timeoutFactory(self): log.msg('timed out waiting for DTP connection') if self._state is not self._IN_PROGRESS: return self._state = self._FAILED d = self.deferred self.deferred = None d.errback( PortConnectionError(defer.TimeoutError("DTPFactory timeout"))) def cancelTimeout(self): if self.delayedCall is not None and self.delayedCall.active(): log.msg('cancelling DTP timeout', debug=True) self.delayedCall.cancel() def setTimeout(self, seconds): log.msg('DTPFactory.setTimeout set to %s seconds' % seconds) self.delayedCall = self._reactor.callLater(seconds, self.timeoutFactory) def clientConnectionFailed(self, connector, reason): if self._state is not self._IN_PROGRESS: return self._state = self._FAILED d = self.deferred self.deferred = None d.errback(PortConnectionError(reason)) class ASCIIConsumerWrapper(object): def __init__(self, cons): self.cons = cons self.registerProducer = cons.registerProducer self.unregisterProducer = cons.unregisterProducer assert os.linesep == "\r\n" or len(os.linesep) == 1, "Unsupported platform (yea right like this even exists)" if os.linesep == "\r\n": self.write = cons.write def write(self, bytes): return self.cons.write(bytes.replace(os.linesep, "\r\n")) class FileConsumer(object): implements(interfaces.IConsumer) def __init__(self, fObj): self.fObj = fObj def registerProducer(self, producer, streaming): self.producer = producer assert streaming def unregisterProducer(self): self.producer = None self.fObj.close() def write(self, bytes): self.fObj.write(bytes) class FTPOverflowProtocol(basic.LineReceiver): def connectionMade(self): self.sendLine(RESPONSE[TOO_MANY_CONNECTIONS]) self.transport.loseConnection() class FTP(object, basic.LineReceiver, policies.TimeoutMixin): disconnected = False UNAUTH, INAUTH, AUTHED, RENAMING = range(4) dtpTimeout = 10 portal = None shell = None dtpFactory = None dtpPort = None dtpInstance = None binary = True passivePortRange = xrange(0, 1) listenFactory = reactor.listenTCP def reply(self, key, *args): msg = RESPONSE[key] % args self.sendLine(msg) def connectionMade(self): self.state = self.UNAUTH self.setTimeout(self.timeOut) self.reply(WELCOME_MSG, self.factory.welcomeMessage) def connectionLost(self, reason): if self.dtpFactory: self.cleanupDTP() self.setTimeout(None) if hasattr(self.shell, 'logout') and self.shell.logout is not None: self.shell.logout() self.shell = None self.transport = None def timeoutConnection(self): self.transport.loseConnection() def lineReceived(self, line): self.resetTimeout() self.pauseProducing() def processFailed(err): if err.check(FTPCmdError): self.sendLine(err.value.response()) elif (err.check(TypeError) and err.value.args[0].find('takes exactly') != -1): self.reply(SYNTAX_ERR, "%s requires an argument." % (cmd,)) else: log.msg("Unexpected FTP error") log.err(err) self.reply(REQ_ACTN_NOT_TAKEN, "internal server error") def processSucceeded(result): if isinstance(result, tuple): self.reply(*result) elif result is not None: self.reply(result) def allDone(ignored): if not self.disconnected: self.resumeProducing() spaceIndex = line.find(' ') if spaceIndex != -1: cmd = line[:spaceIndex] args = (line[spaceIndex + 1:],) else: cmd = line args = () d = defer.maybeDeferred(self.processCommand, cmd, *args) d.addCallbacks(processSucceeded, processFailed) d.addErrback(log.err) from twisted.internet import reactor reactor.callLater(0, d.addBoth, allDone) def processCommand(self, cmd, *params): cmd = cmd.upper() if self.state == self.UNAUTH: if cmd == 'USER': return self.ftp_USER(*params) elif cmd == 'PASS': return BAD_CMD_SEQ, "USER required before PASS" else: return NOT_LOGGED_IN elif self.state == self.INAUTH: if cmd == 'PASS': return self.ftp_PASS(*params) else: return BAD_CMD_SEQ, "PASS required after USER" elif self.state == self.AUTHED: method = getattr(self, "ftp_" + cmd, None) if method is not None: return method(*params) return defer.fail(CmdNotImplementedError(cmd)) elif self.state == self.RENAMING: if cmd == 'RNTO': return self.ftp_RNTO(*params) else: return BAD_CMD_SEQ, "RNTO required after RNFR" def getDTPPort(self, factory): for portn in self.passivePortRange: try: dtpPort = self.listenFactory(portn, factory) except error.CannotListenError: continue else: return dtpPort raise error.CannotListenError('', portn, "No port available in range %s" % (self.passivePortRange,)) def ftp_USER(self, username): if not username: return defer.fail(CmdSyntaxError('USER requires an argument')) self._user = username self.state = self.INAUTH if self.factory.allowAnonymous and self._user == self.factory.userAnonymous: return GUEST_NAME_OK_NEED_EMAIL else: return (USR_NAME_OK_NEED_PASS, username) def ftp_PASS(self, password): if self.factory.allowAnonymous and self._user == self.factory.userAnonymous: creds = credentials.Anonymous() reply = GUEST_LOGGED_IN_PROCEED else: creds = credentials.UsernamePassword(self._user, password) reply = USR_LOGGED_IN_PROCEED del self._user def _cbLogin((interface, avatar, logout)): assert interface is IFTPShell, "The realm is busted, jerk." self.shell = avatar self.logout = logout self.workingDirectory = [] self.state = self.AUTHED return reply def _ebLogin(failure): failure.trap(cred_error.UnauthorizedLogin, cred_error.UnhandledCredentials) self.state = self.UNAUTH raise AuthorizationError d = self.portal.login(creds, None, IFTPShell) d.addCallbacks(_cbLogin, _ebLogin) return d
Apache License 2.0
graphql-python/graphql-core
src/graphql/execution/values.py
get_argument_values
python
def get_argument_values( type_def: Union[GraphQLField, GraphQLDirective], node: Union[FieldNode, DirectiveNode], variable_values: Optional[Dict[str, Any]] = None, ) -> Dict[str, Any]: coerced_values: Dict[str, Any] = {} arg_node_map = {arg.name.value: arg for arg in node.arguments or []} for name, arg_def in type_def.args.items(): arg_type = arg_def.type argument_node = arg_node_map.get(name) if argument_node is None: if arg_def.default_value is not Undefined: coerced_values[arg_def.out_name or name] = arg_def.default_value elif is_non_null_type(arg_type): raise GraphQLError( f"Argument '{name}' of required type '{arg_type}'" " was not provided.", node, ) continue value_node = argument_node.value is_null = isinstance(argument_node.value, NullValueNode) if isinstance(value_node, VariableNode): variable_name = value_node.name.value if variable_values is None or variable_name not in variable_values: if arg_def.default_value is not Undefined: coerced_values[arg_def.out_name or name] = arg_def.default_value elif is_non_null_type(arg_type): raise GraphQLError( f"Argument '{name}' of required type '{arg_type}'" f" was provided the variable '${variable_name}'" " which was not provided a runtime value.", value_node, ) continue is_null = variable_values[variable_name] is None if is_null and is_non_null_type(arg_type): raise GraphQLError( f"Argument '{name}' of non-null type '{arg_type}' must not be null.", value_node, ) coerced_value = value_from_ast(value_node, arg_type, variable_values) if coerced_value is Undefined: raise GraphQLError( f"Argument '{name}' has invalid value {print_ast(value_node)}.", value_node, ) coerced_values[arg_def.out_name or name] = coerced_value return coerced_values
Get coerced argument values based on provided definitions and nodes. Prepares a dict of argument values given a list of argument definitions and list of argument AST nodes. For internal use only.
https://github.com/graphql-python/graphql-core/blob/6f51c70d1fa5f61cceca9ed1bce4a0f57c30151e/src/graphql/execution/values.py#L147-L212
from typing import Any, Callable, Dict, List, Optional, Union, cast from ..error import GraphQLError from ..language import ( DirectiveNode, EnumValueDefinitionNode, ExecutableDefinitionNode, FieldNode, FieldDefinitionNode, InputValueDefinitionNode, NullValueNode, SchemaDefinitionNode, SelectionNode, TypeDefinitionNode, TypeExtensionNode, VariableDefinitionNode, VariableNode, print_ast, ) from ..pyutils import inspect, print_path_list, FrozenList, Undefined from ..type import ( GraphQLDirective, GraphQLField, GraphQLInputType, GraphQLSchema, is_input_type, is_non_null_type, ) from ..utilities.coerce_input_value import coerce_input_value from ..utilities.type_from_ast import type_from_ast from ..utilities.value_from_ast import value_from_ast __all__ = ["get_variable_values", "get_argument_values", "get_directive_values"] CoercedVariableValues = Union[List[GraphQLError], Dict[str, Any]] def get_variable_values( schema: GraphQLSchema, var_def_nodes: FrozenList[VariableDefinitionNode], inputs: Dict[str, Any], max_errors: Optional[int] = None, ) -> CoercedVariableValues: errors: List[GraphQLError] = [] def on_error(error: GraphQLError) -> None: if max_errors is not None and len(errors) >= max_errors: raise GraphQLError( "Too many errors processing variables," " error limit reached. Execution aborted." ) errors.append(error) try: coerced = coerce_variable_values(schema, var_def_nodes, inputs, on_error) if not errors: return coerced except GraphQLError as e: errors.append(e) return errors def coerce_variable_values( schema: GraphQLSchema, var_def_nodes: FrozenList[VariableDefinitionNode], inputs: Dict[str, Any], on_error: Callable[[GraphQLError], None], ) -> Dict[str, Any]: coerced_values: Dict[str, Any] = {} for var_def_node in var_def_nodes: var_name = var_def_node.variable.name.value var_type = type_from_ast(schema, var_def_node.type) if not is_input_type(var_type): var_type_str = print_ast(var_def_node.type) on_error( GraphQLError( f"Variable '${var_name}' expected value of type '{var_type_str}'" " which cannot be used as an input type.", var_def_node.type, ) ) continue var_type = cast(GraphQLInputType, var_type) if var_name not in inputs: if var_def_node.default_value: coerced_values[var_name] = value_from_ast( var_def_node.default_value, var_type ) elif is_non_null_type(var_type): var_type_str = inspect(var_type) on_error( GraphQLError( f"Variable '${var_name}' of required type '{var_type_str}'" " was not provided.", var_def_node, ) ) continue value = inputs[var_name] if value is None and is_non_null_type(var_type): var_type_str = inspect(var_type) on_error( GraphQLError( f"Variable '${var_name}' of non-null type '{var_type_str}'" " must not be null.", var_def_node, ) ) continue def on_input_value_error( path: List[Union[str, int]], invalid_value: Any, error: GraphQLError ) -> None: invalid_str = inspect(invalid_value) prefix = f"Variable '${var_name}' got invalid value {invalid_str}" if path: prefix += f" at '{var_name}{print_path_list(path)}'" on_error( GraphQLError( prefix + "; " + error.message, var_def_node, original_error=error.original_error, ) ) coerced_values[var_name] = coerce_input_value( value, var_type, on_input_value_error ) return coerced_values
MIT License
rajammanabrolu/worldgeneration
evennia-engine/evennia/evennia/contrib/turnbattle/tb_items.py
TBItemsCharacter.apply_turn_conditions
python
def apply_turn_conditions(self): if "Regeneration" in self.db.conditions: to_heal = randint(REGEN_RATE[0], REGEN_RAGE[1]) if self.db.hp + to_heal > self.db.max_hp: to_heal = self.db.max_hp - self.db.hp self.db.hp += to_heal self.location.msg_contents("%s regains %i HP from Regeneration." % (self, to_heal)) if "Poisoned" in self.db.conditions: to_hurt = randint(POISON_RATE[0], POISON_RATE[1]) apply_damage(self, to_hurt) self.location.msg_contents("%s takes %i damage from being Poisoned." % (self, to_hurt)) if self.db.hp <= 0: at_defeat(self) if is_in_combat(self) and "Haste" in self.db.conditions: self.db.combat_actionsleft += 1 self.msg("You gain an extra action this turn from Haste!") if is_in_combat(self) and "Paralyzed" in self.db.conditions: self.db.combat_actionsleft = 0 self.location.msg_contents("%s is Paralyzed, and can't act this turn!" % self) self.db.combat_turnhandler.turn_end_check(self)
Applies the effect of conditions that occur at the start of each turn in combat, or every 30 seconds out of combat.
https://github.com/rajammanabrolu/worldgeneration/blob/5e97df013399e1a401d0a7ec184c4b9eb3100edd/evennia-engine/evennia/evennia/contrib/turnbattle/tb_items.py#L569-L600
from random import randint from evennia import DefaultCharacter, Command, default_cmds, DefaultScript from evennia.commands.default.muxcommand import MuxCommand from evennia.commands.default.help import CmdHelp from evennia.prototypes.spawner import spawn from evennia import TICKER_HANDLER as tickerhandler TURN_TIMEOUT = 30 ACTIONS_PER_TURN = 1 NONCOMBAT_TURN_TIME = 30 REGEN_RATE = (4, 8) POISON_RATE = (4, 8) ACC_UP_MOD = 25 ACC_DOWN_MOD = -25 DMG_UP_MOD = 5 DMG_DOWN_MOD = -5 DEF_UP_MOD = 15 DEF_DOWN_MOD = -15 def roll_init(character): return randint(1, 1000) def get_attack(attacker, defender): attack_value = randint(1, 100) if "Accuracy Up" in attacker.db.conditions: attack_value += ACC_UP_MOD if "Accuracy Down" in attacker.db.conditions: attack_value += ACC_DOWN_MOD return attack_value def get_defense(attacker, defender): defense_value = 50 if "Defense Up" in defender.db.conditions: defense_value += DEF_UP_MOD if "Defense Down" in defender.db.conditions: defense_value += DEF_DOWN_MOD return defense_value def get_damage(attacker, defender): damage_value = randint(15, 25) if "Damage Up" in attacker.db.conditions: damage_value += DMG_UP_MOD if "Damage Down" in attacker.db.conditions: damage_value += DMG_DOWN_MOD return damage_value def apply_damage(defender, damage): defender.db.hp -= damage if defender.db.hp <= 0: defender.db.hp = 0 def at_defeat(defeated): defeated.location.msg_contents("%s has been defeated!" % defeated) def resolve_attack( attacker, defender, attack_value=None, defense_value=None, damage_value=None, inflict_condition=[], ): if not attack_value: attack_value = get_attack(attacker, defender) if not defense_value: defense_value = get_defense(attacker, defender) if attack_value < defense_value: attacker.location.msg_contents("%s's attack misses %s!" % (attacker, defender)) else: if not damage_value: damage_value = get_damage(attacker, defender) attacker.location.msg_contents( "%s hits %s for %i damage!" % (attacker, defender, damage_value) ) apply_damage(defender, damage_value) for condition in inflict_condition: add_condition(defender, attacker, condition[0], condition[1]) if defender.db.hp <= 0: at_defeat(defender) def combat_cleanup(character): for attr in character.attributes.all(): if attr.key[:7] == "combat_": character.attributes.remove(key=attr.key) def is_in_combat(character): return bool(character.db.combat_turnhandler) def is_turn(character): turnhandler = character.db.combat_turnhandler currentchar = turnhandler.db.fighters[turnhandler.db.turn] return bool(character == currentchar) def spend_action(character, actions, action_name=None): if action_name: character.db.combat_lastaction = action_name if actions == "all": character.db.combat_actionsleft = 0 else: character.db.combat_actionsleft -= actions if character.db.combat_actionsleft < 0: character.db.combat_actionsleft = 0 character.db.combat_turnhandler.turn_end_check(character) def spend_item_use(item, user): item.db.item_uses -= 1 if item.db.item_uses > 0: user.msg("%s has %i uses remaining." % (item.key.capitalize(), item.db.item_uses)) else: if not item.db.item_consumable: user.msg("%s has no uses remaining." % item.key.capitalize()) else: if item.db.item_consumable == True: user.msg("%s has been consumed." % item.key.capitalize()) item.delete() else: residue = spawn({"prototype": item.db.item_consumable})[0] residue.location = item.location user.msg("After using %s, you are left with %s." % (item, residue)) item.delete() def use_item(user, item, target): if item.db.item_selfonly and target == None: target = user if item.db.item_selfonly and user != target: user.msg("%s can only be used on yourself." % item) return kwargs = {} if item.db.item_kwargs: kwargs = item.db.item_kwargs try: item_func = ITEMFUNCS[item.db.item_func] except KeyError: user.msg("ERROR: %s not defined in ITEMFUNCS" % item.db.item_func) return if item_func(item, user, target, **kwargs) == False: return if item.db.item_uses: spend_item_use(item, user) if is_in_combat(user): spend_action(user, 1, action_name="item") def condition_tickdown(character, turnchar): for key in character.db.conditions: condition_duration = character.db.conditions[key][0] condition_turnchar = character.db.conditions[key][1] if not condition_duration is True: if condition_turnchar == turnchar: character.db.conditions[key][0] -= 1 if character.db.conditions[key][0] <= 0: character.location.msg_contents( "%s no longer has the '%s' condition." % (str(character), str(key)) ) del character.db.conditions[key] def add_condition(character, turnchar, condition, duration): character.db.conditions.update({condition: [duration, turnchar]}) character.location.msg_contents("%s gains the '%s' condition." % (character, condition)) class TBItemsCharacter(DefaultCharacter): def at_object_creation(self): self.db.max_hp = 100 self.db.hp = self.db.max_hp self.db.conditions = {} tickerhandler.add(NONCOMBAT_TURN_TIME, self.at_update, idstring="update") def at_before_move(self, destination): if is_in_combat(self): self.msg("You can't exit a room while in combat!") return False if self.db.HP <= 0: self.msg("You can't move, you've been defeated!") return False return True def at_turn_start(self): self.msg("|wIt's your turn! You have %i HP remaining.|n" % self.db.hp) self.apply_turn_conditions()
MIT License
shiyuechengineer/meraki-dashboard
meraki_v0/aio/api/cameras.py
AsyncCameras.updateDeviceCameraQualityAndRetentionSettings
python
async def updateDeviceCameraQualityAndRetentionSettings(self, serial: str, **kwargs): kwargs.update(locals()) if 'quality' in kwargs: options = ['Standard', 'High', 'Enhanced'] assert kwargs['quality'] in options, f'''"quality" cannot be "{kwargs['quality']}", & must be set to one of: {options}''' if 'resolution' in kwargs: options = ['1280x720', '1920x1080', '1080x1080', '2058x2058'] assert kwargs['resolution'] in options, f'''"resolution" cannot be "{kwargs['resolution']}", & must be set to one of: {options}''' if 'motionDetectorVersion' in kwargs: options = [1, 2] assert kwargs['motionDetectorVersion'] in options, f'''"motionDetectorVersion" cannot be "{kwargs['motionDetectorVersion']}", & must be set to one of: {options}''' metadata = { 'tags': ['Cameras'], 'operation': 'updateDeviceCameraQualityAndRetentionSettings', } resource = f'/devices/{serial}/camera/qualityAndRetentionSettings' body_params = ['profileId', 'motionBasedRetentionEnabled', 'audioRecordingEnabled', 'restrictedBandwidthModeEnabled', 'quality', 'resolution', 'motionDetectorVersion'] payload = {k.strip(): v for (k, v) in kwargs.items() if k.strip() in body_params} return await self._session.put(metadata, resource, payload)
**Update quality and retention settings for the given camera** https://developer.cisco.com/meraki/api/#!update-device-camera-quality-and-retention-settings - serial (string) - profileId (string): The ID of a quality and retention profile to assign to the camera. The profile's settings will override all of the per-camera quality and retention settings. If the value of this parameter is null, any existing profile will be unassigned from the camera. - motionBasedRetentionEnabled (boolean): Boolean indicating if motion-based retention is enabled(true) or disabled(false) on the camera - audioRecordingEnabled (boolean): Boolean indicating if audio recording is enabled(true) or disabled(false) on the camera - restrictedBandwidthModeEnabled (boolean): Boolean indicating if restricted bandwidth is enabled(true) or disabled(false) on the camera - quality (string): Quality of the camera. Can be one of 'Standard', 'High' or 'Enhanced'. Not all qualities are supported by every camera model. - resolution (string): Resolution of the camera. Can be one of '1280x720', '1920x1080', '1080x1080' or '2058x2058'. Not all resolutions are supported by every camera model. - motionDetectorVersion (integer): The version of the motion detector that will be used by the camera. Only applies to Gen 2 cameras. Defaults to v2.
https://github.com/shiyuechengineer/meraki-dashboard/blob/f00442acf762a94e7e446f80a2485d120e7090d5/meraki_v0/aio/api/cameras.py#L22-L58
class AsyncCameras: def __init__(self, session): super().__init__() self._session = session async def getDeviceCameraQualityAndRetentionSettings(self, serial: str): metadata = { 'tags': ['Cameras'], 'operation': 'getDeviceCameraQualityAndRetentionSettings', } resource = f'/devices/{serial}/camera/qualityAndRetentionSettings' return await self._session.get(metadata, resource)
MIT License
nuagenetworks/vspk-python
vspk/v6/nugnmisession.py
NUGNMISession.subscription_state
python
def subscription_state(self, value): self._subscription_state = value
Set subscription_state value. Notes: Status of gnmi subscriptions to device from Netconf Manager This attribute is named `subscriptionState` in VSD API.
https://github.com/nuagenetworks/vspk-python/blob/375cce10ae144ad6017104e57fcd3630898cc2a6/vspk/v6/nugnmisession.py#L529-L539
from .fetchers import NUMetadatasFetcher from .fetchers import NUGlobalMetadatasFetcher from bambou import NURESTObject class NUGNMISession(NURESTObject): __rest_name__ = "gnmisession" __resource_name__ = "gnmisessions" CONST_SUBSCRIPTION_STATE_IN_PROGRESS = "IN_PROGRESS" CONST_STATUS_CONNECTED = "CONNECTED" CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_GATEWAY_VENDOR_NOKIA = "NOKIA" CONST_SUBSCRIPTION_STATE_INIT = "INIT" CONST_GATEWAY_VENDOR_CISCO = "CISCO" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" CONST_SUBSCRIPTION_STATE_COMPLETED = "COMPLETED" CONST_SUBSCRIPTION_STATE_FAILED = "FAILED" CONST_STATUS_DISCONNECTED = "DISCONNECTED" def __init__(self, **kwargs): super(NUGNMISession, self).__init__() self._last_updated_by = None self._last_updated_date = None self._gateway_model = None self._gateway_vendor = None self._gateway_version = None self._embedded_metadata = None self._entity_scope = None self._creation_date = None self._associated_gateway_id = None self._associated_gateway_name = None self._status = None self._subscription_error = None self._subscription_failure_reason = None self._subscription_failure_retry_count = None self._subscription_state = None self._owner = None self._external_id = None self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="gateway_model", remote_name="gatewayModel", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="gateway_vendor", remote_name="gatewayVendor", attribute_type=str, is_required=False, is_unique=False, choices=[u'CISCO', u'NOKIA']) self.expose_attribute(local_name="gateway_version", remote_name="gatewayVersion", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_gateway_id", remote_name="associatedGatewayID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_gateway_name", remote_name="associatedGatewayName", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="status", remote_name="status", attribute_type=str, is_required=False, is_unique=False, choices=[u'CONNECTED', u'DISCONNECTED']) self.expose_attribute(local_name="subscription_error", remote_name="subscriptionError", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="subscription_failure_reason", remote_name="subscriptionFailureReason", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="subscription_failure_retry_count", remote_name="subscriptionFailureRetryCount", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="subscription_state", remote_name="subscriptionState", attribute_type=str, is_required=False, is_unique=False, choices=[u'COMPLETED', u'FAILED', u'IN_PROGRESS', u'INIT']) self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self._compute_args(**kwargs) @property def last_updated_by(self): return self._last_updated_by @last_updated_by.setter def last_updated_by(self, value): self._last_updated_by = value @property def last_updated_date(self): return self._last_updated_date @last_updated_date.setter def last_updated_date(self, value): self._last_updated_date = value @property def gateway_model(self): return self._gateway_model @gateway_model.setter def gateway_model(self, value): self._gateway_model = value @property def gateway_vendor(self): return self._gateway_vendor @gateway_vendor.setter def gateway_vendor(self, value): self._gateway_vendor = value @property def gateway_version(self): return self._gateway_version @gateway_version.setter def gateway_version(self, value): self._gateway_version = value @property def embedded_metadata(self): return self._embedded_metadata @embedded_metadata.setter def embedded_metadata(self, value): self._embedded_metadata = value @property def entity_scope(self): return self._entity_scope @entity_scope.setter def entity_scope(self, value): self._entity_scope = value @property def creation_date(self): return self._creation_date @creation_date.setter def creation_date(self, value): self._creation_date = value @property def associated_gateway_id(self): return self._associated_gateway_id @associated_gateway_id.setter def associated_gateway_id(self, value): self._associated_gateway_id = value @property def associated_gateway_name(self): return self._associated_gateway_name @associated_gateway_name.setter def associated_gateway_name(self, value): self._associated_gateway_name = value @property def status(self): return self._status @status.setter def status(self, value): self._status = value @property def subscription_error(self): return self._subscription_error @subscription_error.setter def subscription_error(self, value): self._subscription_error = value @property def subscription_failure_reason(self): return self._subscription_failure_reason @subscription_failure_reason.setter def subscription_failure_reason(self, value): self._subscription_failure_reason = value @property def subscription_failure_retry_count(self): return self._subscription_failure_retry_count @subscription_failure_retry_count.setter def subscription_failure_retry_count(self, value): self._subscription_failure_retry_count = value @property def subscription_state(self): return self._subscription_state @subscription_state.setter
BSD 3-Clause New or Revised License
shadowmint/nwidget
lib/cocos2d-0.5.5/cocos/layer/scrolling.py
ScrollingManager.add
python
def add(self, child, z=0, name=None): super(ScrollingManager, self).add(child, z=z, name=name) self.set_focus(self.fx, self.fy, force=True)
Add the child and then update the manager's focus / viewport.
https://github.com/shadowmint/nwidget/blob/c0561af51fdf3d207242fafc8e72c6cd333860bc/lib/cocos2d-0.5.5/cocos/layer/scrolling.py#L233-L239
__docformat__ = 'restructuredtext' from cocos.director import director from cocos.layer.base_layers import Layer import pyglet from pyglet.gl import * class ScrollableLayer(Layer): view_x, view_y = 0, 0 view_w, view_h = 0, 0 origin_x = origin_y = origin_z = 0 def __init__(self, parallax=1): super(ScrollableLayer,self).__init__() self.parallax = parallax self.transform_anchor_x = 0 self.transform_anchor_y = 0 self.batch = pyglet.graphics.Batch() def on_enter(self): director.push_handlers(self.on_cocos_resize) super(ScrollableLayer, self).on_enter() def on_exit(self): super(ScrollableLayer, self).on_exit() director.pop_handlers() def set_view(self, x, y, w, h, viewport_ox=0, viewport_oy=0): x *= self.parallax y *= self.parallax self.view_x, self.view_y = x, y self.view_w, self.view_h = w, h x -= self.origin_x y -= self.origin_y x -= viewport_ox y -= viewport_oy self.position = (-x, -y) def draw(self): super(ScrollableLayer, self).draw() glPushMatrix() self.transform() self.batch.draw() glPopMatrix() def set_dirty(self): pass def on_cocos_resize(self, usable_width, usable_height): self.set_dirty() class ScrollingManager(Layer): def __init__(self, viewport=None, do_not_scale=None): if do_not_scale is None: do_not_scale = director.do_not_scale_window self.autoscale = not do_not_scale and not director.do_not_scale_window self.viewport = viewport self.view_x, self.view_y = 0, 0 self.view_w, self.view_h = 1, 1 self.childs_ox = 0 self.childs_oy = 0 self.fx = self.fy = 0 super(ScrollingManager, self).__init__() self.transform_anchor_x = 0 self.transform_anchor_y = 0 def on_enter(self): super(ScrollingManager, self).on_enter() director.push_handlers(self.on_cocos_resize) self.update_view_size() self.refresh_focus() def on_exit(self): director.pop_handlers() super(ScrollingManager, self).on_exit() def update_view_size(self): if self.viewport is not None: self.view_w, self.view_h = self.viewport.width, self.viewport.height self.view_x, self.view_y = getattr(self.viewport, 'position', (0,0)) if director.do_not_scale_window: self._scissor_flat = (self.view_x, self.view_y, self.view_w, self.view_h) else: w, h = director.get_window_size() sx = director._usable_width/float(w) sy = director._usable_height/float(h) self._scissor_flat = (int(self.view_x * sx), int(self.view_y * sy), int(self.view_w * sx), int(self.view_h * sy)) elif self.autoscale: self.view_w, self.view_h = director.get_window_size() else: self.view_w = director._usable_width self.view_h = director._usable_height def on_cocos_resize(self, usable_width, usable_height): self.update_view_size() self.refresh_focus() def refresh_focus(self): if self.children: self._old_focus = None self.set_focus(self.fx, self.fy) _scale = 1.0 def set_scale(self, scale): self._scale = 1.0*scale self.refresh_focus() scale = property(lambda s: s._scale, set_scale)
Apache License 2.0
terrance/immp
immp/core/util.py
Watchable.unwrap
python
def unwrap(cls, obj): if isinstance(obj, MutableMapping): return {key: cls.unwrap(value) for key, value in obj.items()} elif isinstance(obj, MutableSequence): return [cls.unwrap(item) for item in obj] else: return obj
Recursively replace :class:`Watchable` subclasses with their native equivalents. Args: obj: Container type, or any value. Returns: Unwrapped container, or the original value.
https://github.com/terrance/immp/blob/e710648a3d3b47107fba7659c9b4b20823a23cb7/immp/core/util.py#L142-L158
from asyncio import Condition from collections.abc import MutableMapping, MutableSequence from enum import Enum from functools import reduce, wraps from importlib import import_module import logging import re import time from warnings import warn try: from aiohttp import ClientSession except ImportError: ClientSession = None from .error import ConfigError from .schema import Schema log = logging.getLogger(__name__) def resolve_import(path): module, class_ = path.rsplit(".", 1) return getattr(import_module(module), class_) def pretty_str(cls): def nest_str(obj): if isinstance(obj, dict): return "{{...{}...}}".format(len(obj)) if obj else "{}" elif isinstance(obj, list): return "[...{}...]".format(len(obj)) if obj else "[]" else: return str(obj) def __str__(self): if hasattr(self, "__dict__"): data = self.__dict__ elif hasattr(self, "__slots__"): data = {attr: getattr(self, attr) for attr in self.__slots__} else: raise TypeError("No __dict__ or __slots__ to collect attributes") args = "\n".join("{}: {}".format(k, nest_str(v).replace("\n", "\n" + " " * (len(k) + 2))) for k, v in data.items() if not k.startswith("_")) return "[{}]\n{}".format(self.__class__.__name__, args) cls.__str__ = __str__ return cls def _no_escape(char): return r"(?<!\\)(?:\\\\)*{}".format(char) def escape(raw, *chars): args = (raw, r"\\", *chars) return reduce(lambda current, char: current.replace(char, r"\{}".format(char)), args) def unescape(raw, *chars): args = (raw, *chars, r"\\") return reduce(lambda current, char: re.sub(_no_escape(r"\\{}".format(char)), char, current), args) class Watchable: _callback = None def __init__(self, watch): self._callback = watch def __call__(self): return self._callback() def _wrap_inline(self, obj): if isinstance(obj, MutableMapping): obj.update((key, self._wrap(value)) for key, value in obj.items()) elif isinstance(obj, MutableSequence): obj[:] = (self._wrap(value) for value in obj) else: raise TypeError def _wrap(self, obj): if isinstance(obj, Watchable): return obj elif isinstance(obj, MutableMapping): return WatchedDict(self, {key: self._wrap(value) for key, value in obj.items()}) elif isinstance(obj, MutableSequence): return WatchedList(self, [self._wrap(item) for item in obj]) else: return obj @classmethod
BSD 3-Clause New or Revised License
rustychris/stompy
stompy/model/delft/io.py
parse_time0
python
def parse_time0(time0): try: time0=time0.decode() except AttributeError: pass m=re.match(r'T0:\s+(\S+)\s+\(scu=\s*(\d+)(\w+)\)',time0) dt = m.group(1) dt=dt.replace('-','T').replace('/','-') + "Z" origin=np.datetime64(dt) unit=np.timedelta64(int(m.group(2)),m.group(3)) return (origin, unit)
return a np.datetime64 for the time stamp, and the time unit in seconds (almost always equal to 1 second) input format is: b'T0: 2012/08/07-00:00:00 (scu= 1s)'
https://github.com/rustychris/stompy/blob/ef04d8b3ee9c9af827c87c72c7b50d365e5e567d/stompy/model/delft/io.py#L258-L275
import os import glob import subprocess import copy import datetime import io import numpy as np import pandas as pd import re import xarray as xr import six from collections import defaultdict from shapely import geometry import logging log=logging.getLogger('delft.io') from ... import utils def parse_his_file(fn): fp=open(fn,'rb') sim_descs=np.fromfile(fp,'S40',4) time0=sim_descs[3] n_fields,n_regions=np.fromfile(fp,'i4',2) fdtype=np.dtype( [ ('sub','S10'), ('proc','S10') ] ) fields=np.fromfile( fp, fdtype, n_fields) regions=np.fromfile(fp, [('num','i4'),('name','S20')], n_regions) frame_dtype=np.dtype( [('tsec','i4'), ('data','f4',(n_regions,n_fields))] ) frames=np.fromfile(fp,frame_dtype) return sim_descs,time0,regions,fields,frames def bal_his_file_dataframe(fn): sim_descs,time0,regions,fields,frames = parse_his_file(fn) n_regions=len(regions) n_fields=len(fields) cols=[] tuples=[] for ri,region in enumerate(regions): for fi,field in enumerate(fields): tuples.append( (region['name'].strip(), field['sub'].strip(), field['proc'].strip()) ) col_index=pd.MultiIndex.from_tuples(tuples,names=('region','sub','proc')) df=pd.DataFrame(data=frames['data'].reshape( (-1,n_regions*n_fields) ), index=frames['tsec'], columns=col_index) return df def his_file_xarray(fn,region_exclude=None,region_include=None): sim_descs,time_meta,regions,fields,frames = parse_his_file(fn) def decstrip(s): try: s=s.decode() except AttributeError: pass return s.strip() ds=xr.Dataset() ds['descs']=( ('n_desc',), [decstrip(s) for s in sim_descs]) time0,time_unit = parse_time0(time_meta) times=time0 + time_unit*frames['tsec'] ds['time']=( ('time',), times) ds['tsec']=( ('time',), frames['tsec']) region_names=[decstrip(s) for s in regions['name']] subs=[decstrip(s) for s in np.unique(fields['sub'])] procs=[decstrip(s) for s in np.unique(fields['proc'])] if region_include: region_mask=np.array( [bool(re.match(region_include,region)) for region in region_names] ) else: region_mask=np.ones(len(region_names),np.bool8) if region_exclude: skip=[bool(re.match(region_exclude,region)) for region in region_names] region_mask &= ~np.array(skip) sub_proc=[] for s,p in fields: if decstrip(p): sub_proc.append("%s,%s"%(decstrip(s),decstrip(p))) else: sub_proc.append(decstrip(s)) region_idxs=np.nonzero(region_mask)[0] ds['region']=( ('region',), [region_names[i] for i in region_idxs] ) ds['sub'] =( ('sub',), subs) ds['proc'] =( ('proc',), procs) ds['field']=( ('field',), sub_proc) ds['bal']=( ('time','region','field'), frames['data'][:,region_mask,:] ) return ds bal_his_file_xarray=his_file_xarray def mon_his_file_dataframe(fn): df=bal_his_file_dataframe(fn) df.columns=df.columns.droplevel(2) return df def inp_tok(fp,comment=';'): for line in fp: if comment in line: line=line[ : line.index(comment)] matches=re.findall(r'\s*((\'[^\']+\')|([-/:a-zA-Z_#+0-9\.]+))', line) for m in matches: yield m[0] def inp_tok_include(fp,fn,**kw): tokr=inp_tok(fp,**kw) while 1: tok=next(tokr) if tok.upper()!='INCLUDE': yield tok else: inc_fn=next(tokr) if inc_fn[0] in ["'",'"']: inc_fn=inc_fn.strip(inc_fn[0]) inc_path=os.path.join( os.path.dirname(fn), inc_fn ) with open(inc_path,'rt') as inc_fp: inc_tokr=inp_tok_include(inc_fp,inc_path,**kw) for tok in inc_tokr: yield tok def parse_inp_monitor_locations(inp_file): with open(inp_file,'rt') as fp: tokr=inp_tok(fp) while next(tokr)!='#1': continue for _ in range(4): next(tokr) for t in tokr: if re.match(r'[-_a-zA-Z]+',t): continue break for _ in range(3): next(tokr) areas={} if int(next(tokr)) == 1: nmon = int(next(tokr)) for imon in range(nmon): name, segcount=next(tokr),int(next(tokr)) segs=[int(next(tokr)) for iseg in range(segcount)] areas[name.strip("'")]=segs transects={} if int(next(tokr)) == 1: ntrans=int(next(tokr)) for itrans in range(ntrans): name,style,ecount = next(tokr),next(tokr),int(next(tokr)) exchs=[int(next(tokr)) for _ in range(ecount)] transects[name.strip("'")]=exchs return areas,transects def parse_inp_transects(inp_file): areas,transects=parse_inp_monitor_locations(inp_file) return transects
MIT License
longnow/longview
lvutils.py
NowCellPNG.__init__
python
def __init__(self, filename, onCell, nowXCoord): self.filename = filename self.onCell = onCell self.nowXCoord = nowXCoord self.width = 25 self.height = 20 self.backgroundColor = (0xff, 0xff, 0xff) self.stippleColor = (0x99, 0x99, 0x99) self.borderColor = (0x00, 0x33, 0x66) self.nowBoxColor = (0x66, 0x66, 0x66) self.nowBoxWidth = 4 return
Initialize a NowCellPNG object filename -- filename to write cell to onCell -- is this the "on" version of the cell? nowXCoord -- the X coordinate of the now
https://github.com/longnow/longview/blob/9345faacec64f427eab43790abc165af6a572e3d/lvutils.py#L52-L80
from __future__ import absolute_import, division, with_statement import os import re import shutil from filecmp import dircmp import gd __author__ = "Dan Mosedale, James Home, and Ben Keating" __maintainer__ = "Ben Keating" __email__ = "[email protected]" __version__ = "1.1" __license__ = "BSD-style" __status__ = "Beta" class NowCellPNG:
BSD 2-Clause Simplified License
contentful/contentful-management.py
contentful_management/client.py
Client._contentful_user_agent
python
def _contentful_user_agent(self): header = {} from . import __version__ header['sdk'] = { 'name': 'contentful-management.py', 'version': __version__ } header['app'] = { 'name': self.application_name, 'version': self.application_version } header['integration'] = { 'name': self.integration_name, 'version': self.integration_version } header['platform'] = { 'name': 'python', 'version': platform.python_version() } os_name = platform.system() if os_name == 'Darwin': os_name = 'macOS' elif not os_name or os_name == 'Java': os_name = None elif os_name and os_name not in ['macOS', 'Windows']: os_name = 'Linux' header['os'] = { 'name': os_name, 'version': platform.release() } def format_header(key, values): header = "{0} {1}".format(key, values['name']) if values['version'] is not None: header = "{0}/{1}".format(header, values['version']) return "{0};".format(header) result = [] for k, values in header.items(): if not values['name']: continue result.append(format_header(k, values)) return ' '.join(result)
Sets the X-Contentful-User-Agent header.
https://github.com/contentful/contentful-management.py/blob/658341bc5af529b00fa317362c0b6cca221d76e4/contentful_management/client.py#L571-L618
import requests import platform from re import sub from .resource_builder import ResourceBuilder from .errors import get_error, RateLimitExceededError from .utils import ConfigurationException, retry_request from .users_proxy import UsersProxy from .roles_proxy import RolesProxy from .assets_proxy import AssetsProxy from .spaces_proxy import SpacesProxy from .entries_proxy import EntriesProxy from .locales_proxy import LocalesProxy from .uploads_proxy import UploadsProxy from .api_keys_proxy import ApiKeysProxy from .webhooks_proxy import WebhooksProxy from .snapshots_proxy import SnapshotsProxy from .environments_proxy import EnvironmentsProxy from .webhooks_call_proxy import WebhooksCallProxy from .ui_extensions_proxy import UIExtensionsProxy from .content_types_proxy import ContentTypesProxy from .organizations_proxy import OrganizationsProxy from .webhooks_health_proxy import WebhooksHealthProxy from .preview_api_keys_proxy import PreviewApiKeysProxy from .space_memberships_proxy import SpaceMembershipsProxy from .editor_interfaces_proxy import EditorInterfacesProxy from .space_periodic_usages_proxy import SpacePeriodicUsagesProxy from .personal_access_tokens_proxy import PersonalAccessTokensProxy from .organization_periodic_usages_proxy import OrganizationPeriodicUsagesProxy try: import multijson as json except ImportError: import json class Client(object): def __init__( self, access_token, api_url='api.contentful.com', uploads_api_url='upload.contentful.com', api_version=1, default_locale='en-US', https=True, raw_mode=False, gzip_encoded=True, raise_errors=True, proxy_host=None, proxy_port=None, proxy_username=None, proxy_password=None, max_rate_limit_retries=1, max_rate_limit_wait=60, application_name=None, application_version=None, integration_name=None, integration_version=None, additional_headers=None): self.access_token = access_token self.api_url = api_url self.uploads_api_url = uploads_api_url self.api_version = api_version self.default_locale = default_locale self.https = https self.raw_mode = raw_mode self.gzip_encoded = gzip_encoded self.raise_errors = raise_errors self.proxy_host = proxy_host self.proxy_port = proxy_port self.proxy_username = proxy_username self.proxy_password = proxy_password self.max_rate_limit_retries = max_rate_limit_retries self.max_rate_limit_wait = max_rate_limit_wait self.application_name = application_name self.application_version = application_version self.integration_name = integration_name self.integration_version = integration_version self.additional_headers = additional_headers or {} self._validate_configuration() def spaces(self): return SpacesProxy(self) def memberships(self, space_id): return SpaceMembershipsProxy(self, space_id) def organizations(self): return OrganizationsProxy(self) def organization_periodic_usages(self, organization_id): return OrganizationPeriodicUsagesProxy(self, organization_id) def space_periodic_usages(self, organization_id): return SpacePeriodicUsagesProxy(self, organization_id) def users(self): return UsersProxy(self) def content_types(self, space_id, environment_id): return ContentTypesProxy(self, space_id, environment_id) def entries(self, space_id, environment_id): return EntriesProxy(self, space_id, environment_id) def assets(self, space_id, environment_id): return AssetsProxy(self, space_id, environment_id) def locales(self, space_id, environment_id): return LocalesProxy(self, space_id, environment_id) def webhooks(self, space_id): return WebhooksProxy(self, space_id) def webhook_calls(self, space_id, webhook_id): return WebhooksCallProxy(self, space_id, webhook_id) def webhook_health(self, space_id, webhook_id): return WebhooksHealthProxy(self, space_id, webhook_id) def api_keys(self, space_id): return ApiKeysProxy(self, space_id) def preview_api_keys(self, space_id): return PreviewApiKeysProxy(self, space_id) def personal_access_tokens(self): return PersonalAccessTokensProxy(self) def roles(self, space_id): return RolesProxy(self, space_id) def ui_extensions(self, space_id, environment_id): return UIExtensionsProxy(self, space_id, environment_id) def editor_interfaces(self, space_id, environment_id, content_type_id): return EditorInterfacesProxy(self, space_id, environment_id, content_type_id) def snapshots(self, space_id, environment_id, resource_id, resource_kind='entries'): return SnapshotsProxy(self, space_id, environment_id, resource_id, resource_kind) def entry_snapshots(self, space_id, environment_id, entry_id): return SnapshotsProxy(self, space_id, environment_id, entry_id, 'entries') def content_type_snapshots(self, space_id, environment_id, content_type_id): return SnapshotsProxy(self, space_id, environment_id, content_type_id, 'content_types') def uploads(self, space_id): return UploadsProxy(self, space_id) def environments(self, space_id): return EnvironmentsProxy(self, space_id) def _validate_configuration(self): if not self.access_token: raise ConfigurationException( 'You will need to initialize a client with an Access Token' ) if not self.api_url: raise ConfigurationException( 'The client configuration needs to contain an API URL' ) if not self.default_locale: raise ConfigurationException( 'The client configuration needs to contain a Default Locale' ) if not self.api_version or self.api_version < 1: raise ConfigurationException( 'The API Version must be a positive number' )
MIT License
tlc-pack/tenset
python/tvm/topi/rocm/dense.py
dense_rocblas
python
def dense_rocblas(cfg, data, weight, bias=None, out_dtype=None): if out_dtype is None: out_dtype = data.dtype assert out_dtype == data.dtype, "Mixed precision not supported." matmul = rocblas.matmul(data, weight, False, True) batch, in_dim = data.shape out_dim, _ = weight.shape cfg.add_flop(batch * in_dim * out_dim * 2) if bias is not None: matmul = te.compute( (batch, out_dim), lambda i, j: matmul[i, j] + bias[j], tag=tag.BROADCAST ) return matmul
Dense operator for rocm backend with cblas. Parameters ---------- data : tvm.te.Tensor 2-D with shape [batch, in_dim] weight : tvm.te.Tensor 2-D with shape [out_dim, in_dim] bias : tvm.te.Tensor, optional 1-D with shape [out_dim] out_dtype : str The output type. This is used for mixed precision. Returns ------- output : tvm.te.Tensor 2-D with shape [batch, out_dim]
https://github.com/tlc-pack/tenset/blob/3f7ed0291df47331d43f43a064fffacdc2914b47/python/tvm/topi/rocm/dense.py#L104-L137
from tvm import te from tvm import autotvm from tvm.contrib import rocblas from .. import generic, nn from .. import tag from ..utils import traverse_inline @autotvm.register_topi_compute("dense.rocm") def dense(cfg, data, weight, bias=None, out_dtype=None): assert len(data.shape) == 2 and len(weight.shape) == 2, "only support 2-dim dense" if bias is not None: assert len(bias.shape) == 1 if out_dtype is None: out_dtype = data.dtype return nn.dense(data, weight, bias, out_dtype) @autotvm.register_topi_schedule("dense.rocm") def schedule_dense(cfg, outs): outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs s = te.create_schedule([x.op for x in outs]) def _callback(op): if op.tag == "dense": Dense = op.output(0) num_thread = 64 k = Dense.op.reduce_axis[0] ko, kf = s[Dense].split(k, factor=num_thread) DenseF = s.rfactor(Dense, kf) if Dense.op in s.outputs: Out = Dense else: Out = outs[0].op.output(0) s[Dense].compute_at(s[Out], s[Out].op.axis[1]) s[Out].bind(s[Out].op.axis[0], te.thread_axis("blockIdx.y")) s[Out].bind(s[Out].op.axis[1], te.thread_axis("blockIdx.x")) tx = s[Dense].op.reduce_axis[0] thread_x = te.thread_axis("threadIdx.x") s[Dense].bind(tx, thread_x) s[DenseF].compute_at(s[Dense], tx) s[Dense].set_store_predicate(thread_x.var.equal(0)) s[Out].set_store_predicate(thread_x.var.equal(0)) traverse_inline(s, outs[0].op, _callback) return s @autotvm.register_topi_compute("dense_rocblas.rocm")
Apache License 2.0
ceph/ceph-deploy
ceph_deploy/hosts/centos/__init__.py
choose_init
python
def choose_init(module): if module.normalized_release.int_major < 7: return 'sysvinit' if is_systemd(module.conn): return 'systemd' if not module.conn.remote_module.path_exists("/usr/lib/systemd/system/ceph.target"): return 'sysvinit' return 'systemd'
Select a init system Returns the name of a init system (upstart, sysvinit ...).
https://github.com/ceph/ceph-deploy/blob/a16316fc4dd364135b11226df42d9df65c0c60a2/ceph_deploy/hosts/centos/__init__.py#L15-L31
from . import mon from .install import install, mirror_install, repo_install, repository_url_part, rpm_dist from .uninstall import uninstall from ceph_deploy.util import pkg_managers from ceph_deploy.util.system import is_systemd distro = None release = None codename = None
MIT License
cupy/cupy
cupy/_logic/comparison.py
isclose
python
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): a = cupy.asanyarray(a) b = cupy.asanyarray(b) if (a.dtype in [numpy.complex64, numpy.complex128]) or (b.dtype in [numpy.complex64, numpy.complex128]): return _is_close_complex(a, b, rtol, atol, equal_nan) else: return _is_close(a, b, rtol, atol, equal_nan)
Returns a boolean array where two arrays are equal within a tolerance. Two values in ``a`` and ``b`` are considiered equal when the following equation is satisfied. .. math:: |a - b| \\le \\mathrm{atol} + \\mathrm{rtol} |b| Args: a (cupy.ndarray): Input array to compare. b (cupy.ndarray): Input array to compare. rtol (float): The relative tolerance. atol (float): The absolute tolerance. equal_nan (bool): If ``True``, NaN's in ``a`` will be considered equal to NaN's in ``b``. Returns: cupy.ndarray: A boolean array storing where ``a`` and ``b`` are equal. .. seealso:: :func:`numpy.isclose`
https://github.com/cupy/cupy/blob/a466b03ef0afd7c1ce1615e3f48da64ae38c1320/cupy/_logic/comparison.py#L100-L130
import numpy import cupy from cupy import _core from cupy._logic import content _is_close = _core.create_ufunc( 'cupy_is_close', ('eeee?->?', 'ffff?->?', 'dddd?->?'), ''' bool equal_nan = in4; if (isfinite(in0) && isfinite(in1)) { out0 = fabs(in0 - in1) <= in3 + in2 * fabs(in1); } else if (equal_nan) { out0 = (in0 == in1) || (isnan(in0) && isnan(in1)); } else { out0 = (in0 == in1); } ''' ) _is_close_complex = _core.create_ufunc( 'cupy_is_close_complex', ('FFff?->?', 'DDdd?->?'), ''' bool equal_nan = in4; if (isfinite(in0) && isfinite(in1)) { out0 = abs(in0 - in1) <= in3 + in2 * abs(in1); } else if (equal_nan) { out0 = (in0 == in1) || (isnan(in0) && isnan(in1)); } else { out0 = (in0 == in1); } ''' ) def array_equal(a1, a2, equal_nan=False): if a1.shape != a2.shape: return cupy.array(False) if not equal_nan: return (a1 == a2).all() a1nan, a2nan = content.isnan(a1), content.isnan(a2) if not (a1nan == a2nan).all(): return cupy.array(False) return (a1[~a1nan] == a2[~a1nan]).all() def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): return isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan).all()
MIT License
branislav1991/pytorchprojectframework
utils/visualizer.py
Visualizer.__init__
python
def __init__(self, configuration): self.configuration = configuration self.display_id = 0 self.name = configuration['name'] self.ncols = 0 self.vis = visdom.Visdom() if not self.vis.check_connection(): self.create_visdom_connections()
Initialize the Visualizer class. Input params: configuration -- stores all the configurations
https://github.com/branislav1991/pytorchprojectframework/blob/c2e2e9d391060a11f9151f021adc96c27ad8a894/utils/visualizer.py#L12-L25
import numpy as np import sys from subprocess import Popen, PIPE import utils import visdom class Visualizer():
MIT License
diana-hep/madminer
madminer/utils/ml/models/masks.py
create_masks
python
def create_masks(degrees): ms = [] for l, (d0, d1) in enumerate(zip(degrees[:-1], degrees[1:])): m = (d0[:, np.newaxis] <= d1).astype(np.float64) m = tensor(m) ms.append(m) mmp = (degrees[-1][:, np.newaxis] < degrees[0]).astype(np.float64) mmp = tensor(mmp) return ms, mmp
Creates the binary masks that make the connectivity autoregressive. Parameters ---------- degrees : a list of degrees for every layer Returns ------- type list of all masks, as theano shared variables
https://github.com/diana-hep/madminer/blob/ce741d7558dee56ae6b3258f55f4032149388be7/madminer/utils/ml/models/masks.py#L72-L97
import logging import numpy as np import numpy.random as rng import torch.nn as nn from torch import tensor logger = logging.getLogger(__name__) def create_degrees(n_inputs, n_hiddens, input_order, mode): degrees = [] if isinstance(input_order, str): if input_order == "random": degrees_0 = np.arange(1, n_inputs + 1) rng.shuffle(degrees_0) elif input_order == "sequential": degrees_0 = np.arange(1, n_inputs + 1) else: raise ValueError("invalid input order") else: input_order = np.array(input_order) assert np.all(np.sort(input_order) == np.arange(1, n_inputs + 1)), "invalid input order" degrees_0 = input_order degrees.append(degrees_0) if mode == "random": for n in n_hiddens: min_prev_degree = min(np.min(degrees[-1]), n_inputs - 1) degrees_l = rng.randint(min_prev_degree, n_inputs, n) degrees.append(degrees_l) elif mode == "sequential": for n in n_hiddens: degrees_l = np.arange(n) % max(1, n_inputs - 1) + min(1, n_inputs - 1) degrees.append(degrees_l) else: raise ValueError("invalid mode") return degrees
MIT License
tiramiseb/python-openevse
openevse.py
BaseOpenEVSE.elapsed
python
def elapsed(self): done, data1 = self._request('GS') if done: if data1[0] != '3': raise NotCharging done, data2 = self._request('GU') if done: return { 'seconds': int(data1[1]), 'Wh': float(data2[0])/3600 } raise EvseError
Get the elapsed time and energy used in the current charging session time is in seconds energy is in Watt-hour Returns a dictionary: { 'seconds': X, 'Wh': Y ... where X is an int and Y is a float If the charge state is not C (charging), raises NotCharging
https://github.com/tiramiseb/python-openevse/blob/393d966713ca65f1da6181215533bf9922203f55/openevse.py#L632-L656
import datetime import re try: import serial SERIAL = True except ImportError: SERIAL = False import threading import time import urllib.request import urllib.error import urllib.parse _version = '0.4' states = { 0: 'unknown', 1: 'not connected', 2: 'connected', 3: 'charging', 4: 'vent required', 5: 'diode check failed', 6: 'gfci fault', 7: 'no ground', 8: 'stuck relay', 9: 'gfci self-test failure', 10: 'over temperature', 254: 'sleeping', 255: 'disabled' } _lcd_colors = ['off', 'red', 'green', 'yellow', 'blue', 'violet', 'teal', 'white'] _status_functions = {'disable': 'FD', 'enable': 'FE', 'sleep': 'FS'} _lcd_types = ['monochrome', 'rgb'] _service_levels = ['A', '1', '2'] STANDARD_SERIAL_TIMEOUT = 0.5 RESET_SERIAL_TIMEOUT = 10 STATUS_SERIAL_TIMEOUT = 0 SYNC_SERIAL_TIMEOUT = 0.5 NEWLINE_MAX_AGE = 5 CORRECT_RESPONSE_PREFIXES = ('$OK', '$NK') class EvseError(Exception): pass class EvseTimeoutError(EvseError): pass class NoClock(EvseError): pass class NotCharging(EvseError): pass class BaseOpenEVSE: def _silent_request(self, *args): raise NotImplementedError def _request(self, *args): raise NotImplementedError def _reinitialize(self): return def _flags(self): done, data = self._request('GE') if done: flags = int(data[1], 16) else: raise EvseError return { 'service_level': (flags & 0x0001) + 1, 'diode_check': not flags & 0x0002, 'vent_required': not flags & 0x0004, 'ground_check': not flags & 0x0008, 'stuck_relay_check': not flags & 0x0010, 'auto_service_level': not flags & 0x0020, 'auto_start': not flags & 0x0040, 'serial_debug': not not flags & 0x0080, 'lcd_type': 'monochrome' if flags & 0x0100 else 'rgb', 'gfi_self_test': not flags & 0x0200 } def reset(self): self._silent_request('FR') self._reinitialize() time.sleep(1) def lcd_backlight_color(self, color='off'): colorcode = _lcd_colors.index(color) if self._request('FB', str(colorcode))[0]: return True raise EvseError def status(self, action=None): if action: function = _status_functions[action] done, data = self._request(function) if done: if data: return states[int(data[0], 16)] else: raise EvseError done, data = self._request('GS') if done: return states[int(data[0])] raise EvseError def display_text(self, x, y, text): if self._request('FP', str(x), str(y), str(text))[0]: return True raise EvseError def lcd_type(self, lcdtype=None): if lcdtype: typecode = _lcd_types.index(lcdtype) if self._request('S0', str(typecode))[0]: return lcdtype else: return self._flags()['lcd_type'] raise EvseError def time(self, the_datetime=None): if the_datetime: if self._request( 'S1', the_datetime.strftime('%y'), str(the_datetime.month), str(the_datetime.day), str(the_datetime.hour), str(the_datetime.minute), str(the_datetime.second) )[0]: return the_datetime else: done, data = self._request('GT') if done: if data == ['165', '165', '165', '165', '165', '85']: raise NoClock return datetime.datetime( year=int(data[0])+2000, month=int(data[1]), day=int(data[2]), hour=int(data[3]), minute=int(data[4]), second=int(data[5]) ) raise EvseError def ammeter_calibration(self, enabled=True): if self._request('S2', str(int(enabled)))[0]: return True raise EvseError def time_limit(self, limit=None): if limit is None: done, data = self._request('G3') if done: return int(data[0])*15 else: limit = int(round(limit/15.0)) if self._request('S3', str(limit))[0]: return limit raise EvseError def ammeter_settings(self, scalefactor=None, offset=None): if scalefactor is not None and offset is not None: if self._request('SA', str(scalefactor), str(offset))[0]: return scalefactor, offset else: done, data = self._request('GA') if done: return int(data[0]), int(data[1]) raise EvseError def current_capacity(self, capacity=None): if capacity: if self._request('SC', str(capacity))[0]: return capacity else: done, data = self._request('GE') if done: return int(data[0]) raise EvseError def diode_check(self, enabled=None): if enabled is None: return self._flags()['diode_check'] if self._request('FF', 'D', '1' if enabled else '0')[0]: return enabled raise EvseError def echo(self, enabled=True): if self._request('FF', 'E', '1' if enabled else '0')[0]: return True raise EvseError def gfi_self_test(self, enabled=None): if enabled is None: return self._flags()['gfi_self_test'] if self._request('FF', 'F', '1' if enabled else '0')[0]: return enabled raise EvseError def ground_check(self, enabled=None): if enabled is None: return self._flags()['ground_check'] if self._request('FF', 'G', '1' if enabled else '0')[0]: return enabled raise EvseError def charge_limit(self, limit=None): if limit is None: done, data = self._request('GH') if done: return int(data[0]) else: if self._request('SH', str(int(limit)))[0]: return limit raise EvseError def accumulated_wh(self, wh=None): if wh is None: done, data = self._request('GU') if done: return int(data[1]) else: if self._request('SK', str(int(wh)))[0]: return wh raise EvseError def service_level(self, level=None): if level is None: flags = self._flags() if flags['auto_service_level']: return 0 return flags['service_level'] else: if self._request('SL', _service_levels[level])[0]: return level raise EvseError def voltmeter_settings(self, scalefactor, offset): if scalefactor is not None and offset is not None: if self._request('SM', str(scalefactor), str(offset))[0]: return scalefactor, offset else: done, data = self._request('GM') if done: return int(data[0]), int(data[1]) raise EvseError def stuck_relay_check(self, enabled=True): if enabled is None: return self._flags()['stuck_relay_check'] if self._request('FF', 'R', '1' if enabled else '0')[0]: return enabled raise EvseError def timer(self, starthour=None, startminute=None, endhour=None, endminute=None): if starthour is None or startminute is None or endhour is None or endminute is None: done = self._request('ST', '0', '0', '0', '0')[0] else: done = self._request('ST', str(starthour), str(startminute), str(endhour), str(endminute))[0] if done: return True raise EvseError def vent_required(self, enabled=None): if enabled is None: return self._flags()['vent_required'] if self._request('FF', 'V', '1' if enabled else '0')[0]: return enabled raise EvseError def current_capacity_range(self): done, data = self._request('GC') if done: return int(data[0]), int(data[1]) raise EvseError def fault_counters(self): done, data = self._request('GF') if done: return { 'GFI self test': int(data[0], 16), 'Ground': int(data[1], 16), 'Stuck relay': int(data[2], 16) } raise EvseError def charging_current_and_voltage(self): done, data = self._request('GG') if done: milliamps = float(data[0]) millivolts = float(data[1]) return { 'amps': float(milliamps) / 1000 if milliamps > 0 else 0.0, 'volts': float(millivolts) / 1000 if millivolts > 0 else 0.0 } raise EvseError def temperature(self): done, data = self._request('GP') if done: return { 'ds3231temp': float(data[0])/10, 'mcp9808temp': float(data[1])/10, 'tmp007temp': float(data[2])/10 } raise EvseError
MIT License
khan/guacamole
mirt/mirt_util.py
get_normalized_time
python
def get_normalized_time(time, min_time=1, max_time=100, log_time=True): time[~np.isfinite(time)] = 1. time[time < min_time] = min_time time[time > max_time] = max_time if log_time: time = np.log(time) return time
Normalize a time vector to reasonable values (as defined by the caller). Input: A potentially messy vector of times taken Output: A normalized vector (probably with the log taken)
https://github.com/khan/guacamole/blob/3b905f498f1b921c18483c4d928196f8147dd264/mirt/mirt_util.py#L615-L633
import json import numpy as np import scipy import sys import warnings import multiprocessing import time from train_util.regression_util import sigmoid from train_util.model_training_util import FieldIndexer class Parameters(object): def __init__(self, num_abilities, num_exercises, vals=None, exercise_ind_dict=None): self.num_abilities = num_abilities self.num_exercises = num_exercises if vals is None: self.W_correct = np.zeros((num_exercises, num_abilities + 1)) self.W_time = np.zeros((num_exercises, num_abilities + 1)) self.sigma_time = np.zeros((num_exercises)) else: num_couplings = num_exercises * (num_abilities + 1) self.W_correct = vals[:num_couplings].copy().reshape( (-1, num_abilities + 1)) self.W_time = vals[num_couplings:2 * num_couplings].copy().reshape( (-1, num_abilities + 1)) self.sigma_time = vals[2 * num_couplings:].reshape((-1)) self.exercise_ind_dict = exercise_ind_dict def flat(self): return np.concatenate((self.W_correct.ravel(), self.W_time.ravel(), self.sigma_time.ravel())) def bias(self): bias_dict = {} for exercise, index in self.exercise_ind_dict.iteritems(): bias_dict[exercise] = self.W_correct[index][-1] def discriminations(self): bias_dict = {} for exercise, index in self.exercise_ind_dict.iteritems(): bias_dict[exercise] = self.W_correct[index][:-1] def get_params_for_exercise(self, exercise): index = self.exercise_ind_dict.get(exercise) return self.W_correct[index] class UserState(object): def __init__(self): self.correct = None self.log_time_taken = None self.abilities = None self.exercise_ind = None def add_data(self, lines, exercise_ind_dict, args): idx_pl = get_indexer(args) self.correct = np.asarray([line[idx_pl.correct] for line in lines] ).astype(int) self.log_time_taken = get_normalized_time(np.asarray( [line[idx_pl.time_taken] for line in lines]).astype(int)) self.exercises = [line[idx_pl.exercise] for line in lines] self.exercise_ind = [exercise_ind_dict[ex] for ex in self.exercises] self.exercise_ind = np.array(self.exercise_ind) self.abilities = np.random.randn(args.num_abilities, 1) _, idx = np.unique(self.exercise_ind, return_index=True) self.exercise_ind = self.exercise_ind[idx] self.correct = self.correct[idx] self.log_time_taken = self.log_time_taken[idx] def get_indexer(options): if options.data_format == 'simple': idx_pl = FieldIndexer(FieldIndexer.simple_fields) else: idx_pl = FieldIndexer(FieldIndexer.plog_fields) return idx_pl def get_exercise_ind(exercise_names, exercise_ind_dict): if isinstance(exercise_names, str) or isinstance(exercise_names, unicode): exercise_names = [exercise_names] inds = np.zeros(len(exercise_names), int) for i in range(len(exercise_names)): if exercise_names[i] in exercise_ind_dict: inds[i] = exercise_ind_dict.get(exercise_names[i]) else: print 'Warning: Unseen exercise %s' % exercise_names[i] inds[i] = -1 return inds def conditional_probability_correct(abilities, ex_parameters, exercise_ind): abilities = np.append(abilities.copy(), np.ones((1, 1)), axis=0) difficulties = ex_parameters.W_correct[exercise_ind, :] Z = sigmoid(np.dot(difficulties, abilities)) Z = np.reshape(Z, Z.size) return Z def conditional_energy_data( abilities, theta, exercise_ind, correct, log_time_taken): c_pred = conditional_probability_correct(abilities, theta, exercise_ind) p_data = c_pred * correct + (1 - c_pred) * (1 - correct) abilities = np.append(abilities.copy(), np.ones((1, 1)), axis=0) W_time = theta.W_time[exercise_ind, :] sigma_time = theta.sigma_time[exercise_ind] pred_time_taken = np.dot(W_time, abilities) err = pred_time_taken.ravel() - log_time_taken E_time_taken = (err.ravel() ** 2 / (2. * sigma_time.ravel() ** 2) + 0.5 * np.log(sigma_time ** 2)) E_time_taken = 0 E_observed = -np.log(p_data) + E_time_taken assert len(E_observed.shape) == 1 return E_observed def sample_abilities_diffusion_wrapper(args): theta, state, options, user_index = args id = multiprocessing.current_process()._identity if len(id) > 0: np.random.seed([id[0], time.time() * 1e9]) else: np.random.seed([time.time() * 1e9]) num_steps = options.sampling_num_steps abilities, Eabilities, _, _ = sample_abilities_diffusion( theta, state, num_steps=num_steps) return abilities, Eabilities, user_index def sample_abilities_diffusion(theta, state, num_steps=200, sampling_epsilon=.5): abilities_init = state.abilities correct = state.correct log_time_taken = state.log_time_taken exercise_ind = state.exercise_ind if abilities_init is None: abilities = np.random.randn(theta.num_abilities, 1) else: abilities = abilities_init E_abilities = 0.5 * np.dot(abilities.T, abilities) + np.sum( conditional_energy_data( abilities, theta, exercise_ind, correct, log_time_taken)) sample_chain = [] for _ in range(num_steps): proposal = abilities + sampling_epsilon * np.random.randn( theta.num_abilities, 1) E_proposal = 0.5 * np.dot(proposal.T, proposal) + np.sum( conditional_energy_data( proposal, theta, exercise_ind, correct, log_time_taken)) if E_abilities - E_proposal > 0.: p_accept = 1.0 else: p_accept = np.exp(E_abilities - E_proposal) if not np.isfinite(E_proposal): warnings.warn("Warning. Non-finite proposal energy.") p_accept = 0.0 if p_accept > np.random.rand(): abilities = proposal E_abilities = E_proposal sample_chain.append(abilities[:, 0].tolist()) sample_chain = np.asarray(sample_chain) mean_sample_abilities = np.mean(sample_chain, 0).reshape( theta.num_abilities, 1) stdev = np.std(sample_chain, 0).reshape(theta.num_abilities, 1) return abilities, E_abilities, mean_sample_abilities, stdev def L_dL_singleuser(arg): theta, state, options = arg abilities = state.abilities.copy() dL = Parameters(theta.num_abilities, theta.num_exercises) abilities = np.append(abilities.copy(), np.ones((1, abilities.shape[1])), axis=0) W_correct = theta.W_correct[state.exercise_ind, :] Y = np.dot(W_correct, abilities) Z = sigmoid(Y) Zt = state.correct.reshape(Z.shape) pdata = Zt * Z + (1. - Zt) * (1. - Z) dLdY = ((2. * Zt - 1.) * Z * (1. - Z)) / pdata L = -np.sum(np.log(pdata)) dL.W_correct = -np.dot(dLdY, abilities.T) if options.time: W_time = theta.W_time[state.exercise_ind, :] sigma = theta.sigma_time[state.exercise_ind].reshape((-1, 1)) Y = np.dot(W_time, abilities) err = (Y - state.log_time_taken.reshape((-1, 1))) L += np.sum(err ** 2 / sigma ** 2) / 2. dLdY = err / sigma ** 2 dL.W_time = np.dot(dLdY, abilities.T) dL.sigma_time = (-err ** 2 / sigma ** 3).ravel() L += np.sum(0.5 * np.log(sigma ** 2)) dL.sigma_time += 1. / sigma.ravel() return L, dL, state.exercise_ind def L_dL(theta_flat, user_states, num_exercises, options, pool): L = 0. theta = Parameters(options.num_abilities, num_exercises, vals=theta_flat.copy()) num_users = float(len(user_states)) L += options.regularization * num_users * np.sum(theta_flat ** 2) dL_flat = 2. * options.regularization * num_users * theta_flat dL = Parameters(theta.num_abilities, theta.num_exercises, vals=dL_flat) L += np.sum(options.regularization * num_users / theta.sigma_time ** 2) dL.sigma_time += (-2. * options.regularization * num_users / theta.sigma_time ** 3) if pool is None: rslts = map(L_dL_singleuser, [(theta, state, options) for state in user_states]) else: rslts = pool.map(L_dL_singleuser, [(theta, state, options) for state in user_states], chunksize=100) for r in rslts: Lu, dLu, exercise_indu = r L += Lu dL.W_correct[exercise_indu, :] += dLu.W_correct if options.time: dL.W_time[exercise_indu, :] += dLu.W_time dL.sigma_time[exercise_indu] += dLu.sigma_time if not options.time: dL.W_time[:, :] = 0. dL.sigma_time[:] = 0. dL_flat = dL.flat() L /= np.log(2.) * num_users dL_flat /= np.log(2.) * num_users return L, dL_flat class MirtModel(object): def __init__(self, options, num_exercises, exercise_ind_dict, user_states): self.theta = Parameters(options.num_abilities, num_exercises) self.theta.sigma_time[:] = 1. if options.resume_from_file: resume_from_model = json_to_data(options.resume_from_file) self.theta = resume_from_model['params'] exercise_ind_dict = self.theta.exercise_ind_dict sys.stderr.write("Loaded parameters from %s" % ( options.resume_from_file)) self.num_exercises = num_exercises self.pool = None if options.workers > 1: self.pool = multiprocessing.Pool(options.workers) self.options = options self.exercise_ind_dict = exercise_ind_dict self.user_states = user_states def get_sampling_results(self): if self.pool is None: results = [sample_abilities_diffusion_wrapper([ self.theta, self.user_states[ind], self.options, ind]) for ind in range(len(self.user_states))] else: results = self.pool.map( sample_abilities_diffusion_wrapper, [(self.theta, self.user_states[ind], self.options, ind) for ind in range(len(self.user_states))], chunksize=100) return results def run_em_step(self, epoch): sys.stderr.write("epoch %d, " % epoch) average_energy = 0. results = self.get_sampling_results() for result in results: abilities, El, ind = result self.user_states[ind].abilities = abilities.copy() average_energy += El / float(len(self.user_states)) sys.stderr.write("E joint log L + const %f, " % ( - average_energy / np.log(2.))) mn_a = 0. cov_a = 0. for state in self.user_states: mn_a += state.abilities[:, 0].T / float(len(self.user_states)) cov_a += (state.abilities[:, 0] ** 2).T / ( float(len(self.user_states))) sys.stderr.write("<abilities> " + str(mn_a)) sys.stderr.write(", <abilities^2>" + str(cov_a) + ", ") old_theta_flat = self.theta.flat() theta_flat, L, _ = scipy.optimize.fmin_l_bfgs_b( L_dL, self.theta.flat(), args=( self.user_states, self.num_exercises, self.options, self.pool), maxfun=self.options.max_pass_lbfgs, m=100) self.theta = Parameters(self.options.num_abilities, self.num_exercises, vals=theta_flat) if not self.options.time: self.theta.sigma_time[:] = 1. self.theta.W_time[:, :] = 0. sys.stderr.write("M conditional log L %f, " % (-L)) sys.stderr.write("reg penalty %f, " % ( self.options.regularization * np.sum(theta_flat ** 2))) sys.stderr.write("||couplings|| %f, " % ( np.sqrt(np.sum(self.theta.flat() ** 2)))) sys.stderr.write("||dcouplings|| %f\n" % ( np.sqrt(np.sum((theta_flat - old_theta_flat) ** 2)))) coupling_sign = np.sign(np.mean(self.theta.W_correct[:, :-1], axis=0)) coupling_sign = coupling_sign.reshape((1, -1)) self.theta.W_correct[:, :-1] *= coupling_sign self.theta.W_time[:, :-1] *= coupling_sign for user_state in self.user_states: user_state.abilities *= coupling_sign.T data_to_json( theta=self.theta, exercise_ind_dict=self.exercise_ind_dict, max_time_taken=self.options.max_time_taken, outfilename="%s_epoch=%d.json" % (self.options.output, epoch), ) self.write_csv(epoch, self.exercise_ind_dict) def write_csv(self, epoch, exercise_ind_dict): with open("%s_epoch=%d.csv" % ( self.options.output, epoch), 'w+') as outfile: exercises = sorted( exercise_ind_dict.keys(), key=lambda nm: self.theta.W_correct[exercise_ind_dict[nm], -1]) outfile.write('correct bias,') for coupling_index in range(self.options.num_abilities): outfile.write("correct coupling %d, " % coupling_index) outfile.write('time bias, ') for time_coupling_index in range(self.options.num_abilities): outfile.write("time coupling %d," % time_coupling_index) outfile.write('time variance, exercise name\n') for exercise in exercises: exercise_index = exercise_ind_dict[exercise] outfile.write(str( self.theta.W_correct[exercise_index, -1]) + ',') for index in range(self.options.num_abilities): outfile.write(str( self.theta.W_correct[exercise_index, index]) + ',') outfile.write( str(self.theta.W_time[exercise_index, -1]) + ',') for time_index in range(self.options.num_abilities): outfile.write(str( self.theta.W_time[exercise_index, time_index]) + ',') outfile.write(str(self.theta.sigma_time[exercise_index]) + ',') outfile.write(exercise + '\n')
MIT License
wildmeorg/wildbook-ia
wbia/algo/graph/nx_edge_augmentation.py
weighted_one_edge_augmentation
python
def weighted_one_edge_augmentation(G, avail, weight=None, partial=False): avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G) C = collapse(G, nx.connected_components(G)) mapping = C.graph['mapping'] candidate_mapping = _lightest_meta_edges(mapping, avail_uv, avail_w) C.add_edges_from( (mu, mv, {'weight': w, 'generator': uv}) for (mu, mv), uv, w in candidate_mapping ) meta_mst = nx.minimum_spanning_tree(C) if not partial and not nx.is_connected(meta_mst): raise nx.NetworkXUnfeasible('Not possible to connect G with available edges') for mu, mv, d in meta_mst.edges(data=True): if 'generator' in d: edge = d['generator'] yield edge
Finds the minimum weight set of edges to connect G if one exists. This is a variant of the weighted MST problem. Example ------- >>> G = nx.Graph([(1, 2), (2, 3), (4, 5)]) >>> G.add_nodes_from([6, 7, 8]) >>> # any edge not in avail has an implicit weight of infinity >>> avail = [(1, 3), (1, 5), (4, 7), (4, 8), (6, 1), (8, 1), (8, 2)] >>> sorted(weighted_one_edge_augmentation(G, avail)) [(1, 5), (4, 7), (6, 1), (8, 1)] >>> # find another solution by giving large weights to edges in the >>> # previous solution (note some of the old edges must be used) >>> avail = [(1, 3), (1, 5, 99), (4, 7, 9), (6, 1, 99), (8, 1, 99), (8, 2)] >>> sorted(weighted_one_edge_augmentation(G, avail)) [(1, 5), (4, 7), (6, 1), (8, 2)]
https://github.com/wildmeorg/wildbook-ia/blob/017057cfd3a2a7ea22f575842c9473e121c66ea4/wbia/algo/graph/nx_edge_augmentation.py#L484-L522
import logging import random import math import sys import itertools as it import networkx as nx from networkx.utils import not_implemented_for from collections import defaultdict, namedtuple from wbia.algo.graph import nx_edge_kcomponents as nx_ec import utool as ut print, rrr, profile = ut.inject2(__name__) logger = logging.getLogger('wbia') @not_implemented_for('directed') @not_implemented_for('multigraph') def is_k_edge_connected(G, k): if k < 1: raise ValueError('k must be positive, not {}'.format(k)) if G.number_of_nodes() < k + 1: return False elif any(d < k for n, d in G.degree()): return False else: if k == 1: return nx.is_connected(G) elif k == 2: return not nx.has_bridges(G) else: return nx.edge_connectivity(G) >= k @not_implemented_for('directed') @not_implemented_for('multigraph') def is_locally_k_edge_connected(G, s, t, k): if k < 1: raise ValueError('k must be positive, not {}'.format(k)) if G.degree(s) < k or G.degree(t) < k: return False else: if k == 1: return nx.has_path(G, s, t) else: localk = nx.connectivity.local_edge_connectivity(G, s, t, cutoff=k) return localk >= k @not_implemented_for('directed') @not_implemented_for('multigraph') def k_edge_augmentation(G, k, avail=None, weight=None, partial=False): try: if k <= 0: raise ValueError('k must be a positive integer, not {}'.format(k)) elif G.number_of_nodes() < k + 1: raise nx.NetworkXUnfeasible( ('impossible to {} connect in graph with less than {} ' 'nodes').format( k, k + 1 ) ) elif avail is not None and len(avail) == 0: if not is_k_edge_connected(G, k): raise nx.NetworkXUnfeasible('no available edges') aug_edges = [] elif k == 1: aug_edges = one_edge_augmentation( G, avail=avail, weight=weight, partial=partial ) elif k == 2: aug_edges = bridge_augmentation(G, avail=avail, weight=weight) else: aug_edges = greedy_k_edge_augmentation( G, k=k, avail=avail, weight=weight, seed=0 ) yield from list(aug_edges) except nx.NetworkXUnfeasible: if partial: if avail is None: aug_edges = complement_edges(G) else: aug_edges = partial_k_edge_augmentation( G, k=k, avail=avail, weight=weight ) yield from aug_edges else: raise def partial_k_edge_augmentation(G, k, avail, weight=None): def _edges_between_disjoint(H, only1, only2): only1_adj = {u: set(H.adj[u]) for u in only1} for u, neighbs in only1_adj.items(): neighbs12 = neighbs.intersection(only2) for v in neighbs12: yield (u, v) avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G) H = G.copy() H.add_edges_from( ((u, v, {'weight': w, 'generator': (u, v)}) for (u, v), w in zip(avail, avail_w)) ) k_edge_subgraphs = list(nx_ec.k_edge_subgraphs(H, k=k)) for nodes in k_edge_subgraphs: if len(nodes) > 1: C = H.subgraph(nodes).copy() sub_avail = { d['generator']: d['weight'] for (u, v, d) in C.edges(data=True) if 'generator' in d } C.remove_edges_from(sub_avail.keys()) yield from k_edge_augmentation(C, k=k, avail=sub_avail) for cc1, cc2 in it.combinations(k_edge_subgraphs, 2): for (u, v) in _edges_between_disjoint(H, cc1, cc2): d = H.get_edge_data(u, v) edge = d.get('generator', None) if edge is not None: yield edge @not_implemented_for('multigraph') @not_implemented_for('directed') def one_edge_augmentation(G, avail=None, weight=None, partial=False): if avail is None: return unconstrained_one_edge_augmentation(G) else: return weighted_one_edge_augmentation( G, avail=avail, weight=weight, partial=partial ) @not_implemented_for('multigraph') @not_implemented_for('directed') def bridge_augmentation(G, avail=None, weight=None): if G.number_of_nodes() < 3: raise nx.NetworkXUnfeasible('impossible to bridge connect less than 3 nodes') if avail is None: return unconstrained_bridge_augmentation(G) else: return weighted_bridge_augmentation(G, avail, weight=weight) def _ordered(u, v): return (u, v) if u < v else (v, u) def _unpack_available_edges(avail, weight=None, G=None): if weight is None: weight = 'weight' if isinstance(avail, dict): avail_uv = list(avail.keys()) avail_w = list(avail.values()) else: def _try_getitem(d): try: return d[weight] except TypeError: return d avail_uv = [tup[0:2] for tup in avail] avail_w = [1 if len(tup) == 2 else _try_getitem(tup[-1]) for tup in avail] if G is not None: flags = [not G.has_edge(u, v) for u, v in avail_uv] avail_uv = list(it.compress(avail_uv, flags)) avail_w = list(it.compress(avail_w, flags)) return avail_uv, avail_w MetaEdge = namedtuple('MetaEdge', ('meta_uv', 'uv', 'w')) def _lightest_meta_edges(mapping, avail_uv, avail_w): grouped_wuv = defaultdict(list) for w, (u, v) in zip(avail_w, avail_uv): meta_uv = _ordered(mapping[u], mapping[v]) grouped_wuv[meta_uv].append((w, u, v)) for (mu, mv), choices_wuv in grouped_wuv.items(): if mu != mv: w, u, v = min(choices_wuv) yield MetaEdge((mu, mv), (u, v), w) def unconstrained_one_edge_augmentation(G): ccs1 = list(nx.connected_components(G)) C = collapse(G, ccs1) meta_nodes = list(C.nodes()) meta_aug = list(zip(meta_nodes, meta_nodes[1:])) inverse = defaultdict(list) for k, v in C.graph['mapping'].items(): inverse[v].append(k) for mu, mv in meta_aug: yield (inverse[mu][0], inverse[mv][0])
Apache License 2.0
google/python-spanner-orm
spanner_orm/table_apis.py
update
python
def update(transaction: spanner_transaction.Transaction, table_name: str, columns: Iterable[str], values: Iterable[Iterable[Any]]) -> None: _logger.debug('Update table=%s columns=%s values=%s', table_name, columns, values) transaction.update(table=table_name, columns=columns, values=values)
Updates rows in the given table based on the provided values. Note that if a row is specified for which the primary key does not exist in the table, an exception will be thrown and the update will be aborted. Args: transaction: The Spanner transaction to execute the request on table_name: The Spanner table being modified columns: Which columns to write on the Spanner table values: A list of rows to write to the table. The order of the values in each sublist must match the order of the columns specified in the `columns` parameter.
https://github.com/google/python-spanner-orm/blob/2d73ce62e71459fc3499e1932023704fb35ffc08/spanner_orm/table_apis.py#L116-L134
import logging from typing import Any, Dict, Iterable, List, Sequence from google.cloud import spanner from google.cloud.spanner_v1 import transaction as spanner_transaction from google.cloud.spanner_v1.proto import type_pb2 _logger = logging.getLogger(__name__) def find(transaction: spanner_transaction.Transaction, table_name: str, columns: Iterable[str], keyset: spanner.KeySet) -> List[Sequence[Any]]: _logger.debug('Find table=%s columns=%s keys=%s', table_name, columns, keyset.keys) stream_results = transaction.read( table=table_name, columns=columns, keyset=keyset) return list(stream_results) def sql_query(transaction: spanner_transaction.Transaction, query: str, parameters: Dict[str, Any], parameter_types: Dict[str, type_pb2.Type]) -> List[Sequence[Any]]: _logger.debug('Executing SQL:\n%s\n%s\n%s', query, parameters, parameter_types) stream_results = transaction.execute_sql( query, params=parameters, param_types=parameter_types) return list(stream_results) def delete(transaction: spanner_transaction.Transaction, table_name: str, keyset: spanner.KeySet) -> None: _logger.debug('Delete table=%s keys=%s', table_name, keyset.keys) transaction.delete(table=table_name, keyset=keyset) def insert(transaction: spanner_transaction.Transaction, table_name: str, columns: Iterable[str], values: Iterable[Iterable[Any]]) -> None: _logger.debug('Insert table=%s columns=%s values=%s', table_name, columns, values) transaction.insert(table=table_name, columns=columns, values=values)
Apache License 2.0
jeroenzegers/nabu-msss
nabu/neuralnetworks/models/kmeans.py
DBLSTM._get_outputs
python
def _get_outputs(self, inputs, input_seq_length, is_training): blstm = layer.BLSTMLayer( num_units=int(self.conf['num_units']), layer_norm=self.conf['layer_norm'] == 'True', recurrent_dropout=float(self.conf['recurrent_dropout'])) if len(inputs) > 1: raise 'The implementation of DBLSTM expects 1 input and not %d' %len(inputs) else: inputs=inputs[0] with tf.variable_scope(self.scope): if is_training and float(self.conf['input_noise']) > 0: inputs = inputs + tf.random_normal( tf.shape(inputs), stddev=float(self.conf['input_noise'])) logits = inputs for l in range(int(self.conf['num_layers'])): logits = blstm(logits, input_seq_length, 'layer' + str(l)) if is_training and float(self.conf['dropout']) < 1: logits = tf.nn.dropout(logits, float(self.conf['dropout'])) output = logits return output
Create the variables and do the forward computation Args: inputs: the inputs to the neural network, this is a list of [batch_size x time x ...] tensors input_seq_length: The sequence lengths of the input utterances, this is a [batch_size] vector is_training: whether or not the network is in training mode Returns: - output, which is a [batch_size x time x ...] tensors
https://github.com/jeroenzegers/nabu-msss/blob/5e862cbf846d45b8a317f87588533f3fde9f0726/nabu/neuralnetworks/models/kmeans.py#L11-L56
import tensorflow as tf import model from nabu.neuralnetworks.components import layer class DBLSTM(model.Model):
MIT License
utensor/utensor_cgen
utensor_cgen/utils.py
Configuration.__init__
python
def __init__(self, defaults=None, user_config=None): if defaults is None: defaults = {} if user_config is None: user_config = {} self._defaults = defaults self._user_config = user_config
Note ---- - any value that is in user_config should be in defaults - any value that is not in defaults should not be in user_config
https://github.com/utensor/utensor_cgen/blob/eccd6859028d0b6a350dced25ea72ff02faaf9ad/utensor_cgen/utils.py#L510-L523
import importlib import os import re import types from ast import literal_eval from collections import deque from copy import deepcopy from functools import wraps from random import choice from string import ascii_letters, digits from time import time import attr import idx2numpy as idx2np import numpy as np from click.types import ParamType from toml import loads as _parse from utensor_cgen.logger import logger __all__ = ["save_idx", "save_consts", "save_graph", "log_graph", "NamescopedKWArgsParser", "NArgsParam", "MUST_OVERWRITE"] class LazyLoader(types.ModuleType): def __init__(self, module_name='utensor_cgen', submod_name=None): self._module_name = '{}{}'.format( module_name, submod_name and '.{}'.format(submod_name) or '' ) self._mod = None super(LazyLoader, self).__init__(self._module_name) def _load(self): if self._mod is None: self._mod = importlib.import_module( self._module_name ) return self._mod def __getattr__(self, attrb): return getattr(self._load(), attrb) def __dir__(self): return dir(self._load()) tf = LazyLoader('tensorflow') tf_python = LazyLoader('tensorflow', 'python.framework') class LazyAttrib(object): def __init__(self, obj, attr_name): self._obj = obj self._attr_name = attr_name def __getattr__(self, name): return getattr(self.attrib, name) def __call__(self, *args, **kwargs): return self.attrib(*args, **kwargs) @property def attrib(self): return getattr(self._obj, self._attr_name) def log_graph(graph_or_graph_def, logdir): from tensorflow.compat.v1 import GraphDef if isinstance(graph_or_graph_def, GraphDef): graph = tf.Graph() with graph.as_default(): tf.import_graph_def(graph_or_graph_def, name='') else: graph = graph_or_graph_def tf.summary.FileWriter(logdir, graph=graph).close() def save_idx(arr, fname): if arr.shape == (): arr = np.array([arr], dtype=arr.dtype) if arr.dtype in [np.int64]: logger.warning("unsupported int format for idx detected: %s, using int32 instead", arr.dtype) arr = arr.astype(np.int32) out_dir = os.path.dirname(fname) if out_dir and not os.path.exists(out_dir): os.makedirs(out_dir) with open(fname, "wb") as fid: idx2np.convert_to_file(fid, arr) logger.info("%s saved", fname) def save_consts(sess, out_dir="."): out_dir = os.path.expanduser(out_dir) if not os.path.exists(out_dir): os.makedirs(out_dir) graph = sess.graph graph_def = sess.graph.as_graph_def() for node in graph_def.node: if node.op == "Const": op = graph.get_operation_by_name(node.name) for out_tensor in op.outputs: arr = out_tensor.eval() tname = re.sub(r'[/:]', '_', out_tensor.name) idx_fname = os.path.join(out_dir, "{}.idx".format(tname)) save_idx(arr, idx_fname) def save_graph(graph, graph_name="graph", out_dir="."): out_dir = os.path.expanduser(out_dir) graph_fname = os.path.join(out_dir, "{}.pb".format(graph_name)) with tf.gfile.FastGFile(graph_fname, "wb") as fid: fid.write(graph.as_graph_def().SerializeToString()) logger.info("%s saved", graph_fname) def prepare_meta_graph(meta_graph_path, output_nodes, chkp_path=None): graph = tf.Graph() saver = tf.train.import_meta_graph(meta_graph_path, clear_devices=True, graph=graph) if chkp_path is None: chkp_path = meta_graph_path.replace(".meta", "") with tf.Session(graph=graph) as sess: saver.restore(sess, chkp_path) graph_def = tf_python.graph_util.remove_training_nodes(sess.graph_def) sub_graph_def = tf_python.graph_util.convert_variables_to_constants(sess=sess, input_graph_def=graph_def, output_node_names=output_nodes) return sub_graph_def def _sanitize_op_name(op_name): if op_name.startswith('^'): return op_name[1:] return op_name def parse_tensor_name(tname): components = tname.split(":") if len(components) == 2: op_name = _sanitize_op_name(components[0]) try: output_index = int(components[1]) except ValueError: raise ValueError("invalid output index: {}".format(tname)) return (op_name, output_index) elif len(components) == 1: op_name = _sanitize_op_name(components[0]) return (op_name, 0) else: raise ValueError("invalid tensor name: {}".format(tname)) class NamescopedKWArgsParser: def __init__(self, namespace, kwargs): ns_pattern = re.compile(r'^([^\d\W][\w\d_]*)__([^\d\W][\w\d_]*)') self._namespace = namespace self._private_kwargs = {} self._shared_kwargs = {} for key, value in kwargs.items(): match = ns_pattern.match(key) if match: ns = match.group(1) argname = match.group(2) if ns == self._namespace: self._private_kwargs[argname] = value else: self._shared_kwargs[key] = value def get(self, argname, default=None): try: return self._private_kwargs[argname] except KeyError: return self._shared_kwargs.get(argname, default) def __repr__(self): d = dict(('%s__%s' % (self._namespace, k), v) for k, v in self._private_kwargs.items()) repr_str = ('KWArgsParser(' + '%s, ' % self._namespace + '%s)' % d) return repr_str def __getitem__(self, argname): try: return self._private_kwargs[argname] except KeyError: return self._shared_kwargs[argname] class NArgsParam(ParamType): def __init__(self, sep=','): self._sep = sep def convert(self, value, param, ctx): value = str(value) args = value.split(self._sep) aug_args = [arg for arg in args if arg[0] in ['+', '-']] if aug_args: final_args = param.default.split(self._sep) for arg in aug_args: if arg[0] == '+': final_args.append(arg[1:]) elif arg[0] == '-' and arg[1:] in final_args: final_args.remove(arg[1:]) else: final_args = args return final_args class _MustOverwrite(object): _obj = None def __new__(cls, *args, **kwargs): if cls._obj is None: cls._obj = object.__new__(cls, *args, **kwargs) return cls._obj MUST_OVERWRITE = _MustOverwrite() def topologic_order_graph(ugraph): ugraph.topo_order = get_topologic_order(ugraph, ugraph.output_nodes)[::-1] def get_topologic_order(ugraph, init_nodes=None): if init_nodes is None: init_nodes = ugraph.output_nodes queue = deepcopy(init_nodes) visited = set() perm_visit = set() ops_torder = [] def visit(node_name): if node_name in perm_visit: return if node_name in visited: raise ValueError("Input graph is not a DAG") visited.add(node_name) op_info = ugraph.ops_info.get(node_name, None) if not op_info: return for t_info in op_info.input_tensors: op_name = t_info.op_name visit(op_name) perm_visit.add(node_name) ops_torder.insert(0, node_name) while queue: node_name = queue.pop(0) visit(node_name) return ops_torder def ops_bfs_queue(ugraph, init_nodes=None): if init_nodes is None: init_nodes = [ ugraph.ops_info[name] for name in ugraph.output_nodes ] queue = deque(init_nodes) visited = set() bfs_deck = deque([]) while queue: op = queue.popleft() if op is None or op.name in visited: continue visited.add(op.name) queue.extend(op.input_nodes) bfs_deck.append(op) return bfs_deck def prune_graph(ugraph, output_nodes=None): new_ugraph = deepcopy(ugraph) if output_nodes is None: output_nodes = ugraph.output_nodes[:] else: new_ugraph.output_nodes = output_nodes[:] ops_in_need = set(output_nodes) queue = [name for name in output_nodes] visited = set([]) while queue: op_name = queue.pop(0) tensors_in = set([t.name for t in ugraph.ops_info[op_name].input_tensors]) in_ops = set() for it_node in ugraph.ops_info: if it_node == op_name: continue it_tensors_out = [t.name for t in ugraph.ops_info[it_node].output_tensors] if not tensors_in.isdisjoint(it_tensors_out): in_ops.add(it_node) queue.extend([name for name in in_ops if name not in visited]) visited.update(in_ops) ops_in_need.update(in_ops) ops_to_remove = set([]) for op_name in new_ugraph.ops_info.keys(): if op_name not in ops_in_need: ops_to_remove.add(op_name) for op_name in ops_to_remove: new_ugraph.ops_info.pop(op_name) topologic_order_graph(new_ugraph) return new_ugraph def random_str(length=8): letters = ascii_letters+digits chars = [choice(letters) for _ in range(length)] return ''.join(chars) def parse_toml(file_or_path): if isinstance(file_or_path, str): fid = open(file_or_path, 'r') doc = _parse(fid.read()) fid.close() return doc def timed(func): @wraps(func) def wrapped(*args, **kwargs): start_time = time() ret = func(*args, **kwargs) end_time = time() logger.info('collapsed time of calling %s: %0.4f seconds', func.__name__, end_time - start_time) return ret return wrapped def is_abstract(func): if isinstance(func, types.MethodType): func = func.__func__ return getattr(func, '__isabstractmethod__', False) class class_property(object): def __init__(self, getter): self._getter = getter def __get__(self, obj, objtype=None): if objtype is None: return self._getter(obj) return self._getter(objtype) @attr.s class Pipeline(object): _funcs = attr.ib(factory=list) def __call__(self, *args, **kwargs): result = None for func in self._funcs: if result is None: result = func(*args, **kwargs) else: result = func(*result) return result def __getitem__(self, slice_obj): cls = type(self) return cls(funcs=self._funcs[slice_obj]) class Configuration(object):
Apache License 2.0
ufosc/swampymud
swampymud/entity.py
Entity.__str__
python
def __str__(self): return self.classname
Return a simple representation of this item. By default, str(item) returns the name of the entity's class.
https://github.com/ufosc/swampymud/blob/2e28f9db1f0f4e1c4aafccdf7f58bf2a22b82366/swampymud/entity.py#L46-L50
import inspect from swampymud.util import camel_to_space import swampymud.character as character class EntityClass(type): def __init__(self, cls, bases, namespace): if "classname" not in namespace: self.classname = camel_to_space(cls) if "description" not in namespace: if self.__doc__ is not None: self.description = inspect.cleandoc(self.__doc__) else: self.description = "[No description provided.]" self._local_commands = {} for value in namespace.values(): if isinstance(value, character.Command): value.label = "Equipped" self._local_commands[str(value)] = value self._commands = {} for base in reversed(self.__mro__): if not isinstance(base, EntityClass): continue self._commands.update(base._local_commands) self._commands.update(self._local_commands) def __str__(cls): return cls.classname class Entity(metaclass=EntityClass): def __init__(self): self.location = None
MIT License
google/har-sanitizer
harsanitizer/harsanitizer.py
HarSanitizer.scrub
python
def scrub( self, har, wordlist=None, content_list=None, all_cookies=False, all_headers=False, all_params=False, all_content_mimetypes=False): if not isinstance(har, Har): raise TypeError("'har' must be a Har object") if WORDLIST_PATH[:4] == "http": wordlist_json = json.loads(urllib2.urlopen(WORDLIST_PATH).read()) scrub_wordlist = self.load_wordlist(wordlist=wordlist_json) else: scrub_wordlist = self.load_wordlist(wordlist_path=WORDLIST_PATH) if isinstance(wordlist, list): if all(isinstance(word, basestring) for word in wordlist): scrub_wordlist.extend(wordlist) else: raise TypeError("All words in wordlist must be strings") cond_table = {} if all_cookies: pattern = self.gen_hartype_names_pattern(har, "cookies") cond_table.update(pattern) if all_headers: pattern = self.gen_hartype_names_pattern(har, "headers") cond_table.update(pattern) if all_params: url_pattern = self.gen_hartype_names_pattern(har, "queryString") postdata_pattern = self.gen_hartype_names_pattern(har, "params") cond_table.update(url_pattern) cond_table.update(postdata_pattern) if all_content_mimetypes: content_patterns = self.gen_all_mimetypes_scrub_pattern() elif content_list: mimetypes = self.get_mimetypes(har).keys() content_list_trimmed = [mimetype for mimetype in content_list if mimetype in mimetypes] content_patterns = self.gen_content_type_scrub_patterns( content_list=content_list_trimmed) else: content_patterns = self.gen_content_type_scrub_patterns() cond_table.update(content_patterns) iter_har_dict = self.iter_eval_exec( my_iter=har.har_dict, cond_table=cond_table) har = Har(har=iter_har_dict) har_clean = self.scrub_generic(har) har = har_clean if all_cookies: scrub_wordlist.extend(self.har.category["cookies"].keys()) if all_headers: scrub_wordlist.extend(self.har.category["headers"].keys()) if all_params: scrub_wordlist.extend(self.har.category["queryString"].keys()) if self.har.category["params"]: scrub_wordlist.extend(self.har.category["params"].keys()) har_sanitized = self.scrub_wordlist(har, scrub_wordlist) return har_sanitized
Full scrub/redaction of sensitive HAR fields. Args: har: a Har() object wordlist=None, (list of strs) appends to default wordlist content_list=None, (list of strs) appends to default content_list all_cookies=False, (Boolean) Redacts all cookies all_headers=False, (Boolean) Redacts all headers all_params=False, (Boolean) Redacts all URLQuery/POSTData parameters all_content_mimetypes=False (Boolean) Redacts all content mimeTypes Returns: har: scrubbed har Typical usage: har = Har(har=har_json) hs = HarSanitizer() har_redacted = hs.scrub(har, all_cookies=True, content_list=['image/gif'])
https://github.com/google/har-sanitizer/blob/cc4599dd749ce983634669c458b8cb2a65686b33/harsanitizer/harsanitizer.py#L574-L669
import os import json import re import urllib2 CURRENT_DIR = os.path.abspath("./") try: with open("./config.json", "r") as config: STATIC_FOLDER = json.load(config)["static_folder"] except IOError: raise IOError( "'config.json' not found in '{}'. Please ensure that script is " "being run from root './har-sanitizer/' directory.".format(CURRENT_DIR)) except KeyError: raise KeyError("KeyError: 'STATIC_FOLDER' key not found in config.json") WORDLIST_PATH = "{}/wordlist.json".format(STATIC_FOLDER) MIMETYPES_PATH = "{}/mimetypesScrubList.json".format(STATIC_FOLDER) class Har(object): def __init__(self, har=None, har_path=None): super(Har, self).__init__() self.load_har(har=har, har_path=har_path) self.category = {} def load_har(self, har=None, har_path=None): try: if isinstance(har, dict): self.har_str = json.dumps(har) self.har_dict = har elif isinstance(har, basestring): self.har_dict = json.loads(har) self.har_str = har else: raise ValueError assert("request" in self.har_dict["log"]["entries"][0]) except (TypeError, ValueError, AssertionError, KeyError, IndexError): raise ValueError("Missing/Invalid HAR: Requires valid [har] (str or dict)") except Exception: raise class HarSanitizer(object): valid_hartypes = ["cookies", "headers", "queryString", "params"] def __init__(self, har=None): super(HarSanitizer, self).__init__() if isinstance(har, Har): self.har = Har def load_wordlist(self, wordlist=None, wordlist_path=None): if not ( (isinstance(wordlist, list) and all(isinstance(s, basestring) for s in wordlist)) or isinstance(wordlist_path, basestring)): raise TypeError( "Requires either wordlist_path (str of wordlist file path), " "or wordlist (list of strs).") elif isinstance(wordlist_path, basestring): try: with open(wordlist_path, "r") as wordlist_f: wordlist = json.load(wordlist_f) except IOError: raise IOError("Cannot open wordlist file at path: {}".format(wordlist_path)) return wordlist def trim_wordlist(self, har, wordlist): if not isinstance(har, Har): raise TypeError("'har' must be a Har() object") trimmedlist = [word for word in wordlist if word.lower() in har.har_str.lower()] return trimmedlist def gen_regex(self, word="word"): regex_patterns = { "single_use": { r"""(\://[\w+-.%!*()`~']*?\:)""" r"""(?P<capture>[\w+-.%!*()`~']+)(@)""": r"\g<1>[password redacted]\g<3>", }, "word_patterns": { r"""([\s";,&?]+{}=)""" r"""(?P<capture>[\w+-_/=#|.%&:!*()`~'"]+?)""" r"""(&|",|"\s|"}}|;){{1}}""".format(word): r"\g<1>[{} redacted]\g<3>".format(word), r"""("name": "{}",[\s\w+:"-\\%!*()`~'.#]*?"value": ")""" r"""(?P<capture>[\w+-_:&\+=#~/$()\\.\,\*\!|%"\s;]+?)""" r"""("[\s,}}]+){{1}}""".format(word): r"\g<1>[{} redacted]\g<3>".format(word), r"""("value": ")""" r"""(?P<capture>[\w+-_:&\+=#$~/()\\.\,\*\!|%"\s;]+){{1}}""" r"""("[\s,}}]+){{1}}""" r"""([\s\w+:"-\\%!*()`~'#.]*"name": "{}"){{1}}""".format(word): r"\g<1>[{} redacted]\g<3>\g<4>".format(word), } } return regex_patterns def iter_eval_exec(self, my_iter, cond_table): if isinstance(my_iter, dict): for key, value in my_iter.iteritems(): if any([eval(cond) for cond in cond_table.keys()]): for cond, callback in cond_table.iteritems(): if eval(cond): callback(self, my_iter, key, value) elif isinstance(value, (dict, list)): self.iter_eval_exec( value, cond_table) elif isinstance(my_iter, list): for value in my_iter: self.iter_eval_exec( value, cond_table) return my_iter def gen_hartype_names_pattern(self, har, hartype): if not isinstance(har, Har): raise TypeError("'har' must be a Har() object") if hartype not in self.valid_hartypes: raise ValueError( "'hartype' must be one of the following: {}" .format(self.valid_hartypes)) def outer_callback(self, my_iter, key, value): def inner_callback(self, my_iter, key, value): if value in har.category[hartype]: har.category[hartype][value] += 1 else: har.category[hartype][value] = 1 self.iter_eval_exec( value, {"key == 'name'": inner_callback} ) har.category[hartype] = {} cond_table = { "key == '{}'".format(hartype): outer_callback } return cond_table def get_hartype_names(self, har, hartype): if not isinstance(har, Har): raise TypeError("'har' must be a Har() object") namelist = [] pattern = self.gen_hartype_names_pattern(har, hartype) self.iter_eval_exec(my_iter=har.har_dict, cond_table=pattern) namelist = har.category[hartype] return namelist def load_keyvalue_conds(self, keyvalues): cond_table = {} table = keyvalues def callback(self, my_iter, key, value): my_iter[keyvalues["key_to_redact"]] = "[{} redacted]".format( keyvalues["value_to_match"]) cond_table.update({ "key == '{}' and '{}' in value and '{}' in my_iter.keys()" .format( keyvalues["key_to_match"], keyvalues["value_to_match"], keyvalues["key_to_redact"]): callback }) return cond_table def get_mimetypes(self, har): if not isinstance(har, Har): raise TypeError("'har' must be a Har object") def callback(self, my_iter, key, value): if value in self.har.category["mimetypes"]: self.har.category["mimetypes"][value] += 1 else: self.har.category["mimetypes"][value] = 1 namelist = [] self.har = har self.har.category["mimetypes"] = {} cond_table = { "key == 'mimeType'": callback } self.iter_eval_exec(my_iter=har.har_dict, cond_table=cond_table) namelist = har.category["mimetypes"] return namelist def gen_all_mimetypes_scrub_pattern(self): def callback(self, my_iter, key, value): value["text"] = "[{} redacted]".format(value["mimeType"]) cond_table = { "key == 'content' and 'text' in value.keys()": callback } return cond_table def gen_content_type_scrub_patterns(self, content_list=None): content_scrub_list = self.default_content_scrub_list[:] with open(MIMETYPES_PATH, "r") as mimetypes_file: default_mimetypes = json.load(mimetypes_file) if content_list: content_list = [obj for obj in content_list if isinstance(obj,basestring)] new_scrub_list = [{ "key_to_match": "mimeType", "value_to_match": mimetype, "key_to_redact": "text" } for mimetype in content_list if mimetype not in default_mimetypes] content_scrub_list.extend(new_scrub_list) cond_table = {} for table in content_scrub_list: cond_table.update(self.load_keyvalue_conds(table)) return cond_table def scrub_generic(self, har): if not isinstance(har, Har): raise TypeError("'har' must be a Har object") patterns = self.gen_regex()["single_use"] scrubbed_str = har.har_str for pattern, redacted in patterns.iteritems(): scrubbed_str = re.sub(pattern, redacted, scrubbed_str) clean_har = Har(har=scrubbed_str) return clean_har def scrub_wordlist(self, har, wordlist): if not isinstance(har, Har): raise TypeError("'har' must be a Har object") trimmedlist = self.trim_wordlist(har=har, wordlist=wordlist) har_str_scrubbed = har.har_str for word in trimmedlist: wordpatterns = self.gen_regex(word)["word_patterns"] for pattern, redacted in wordpatterns.iteritems(): har_str_scrubbed = re.sub( pattern, redacted, har_str_scrubbed, flags=re.I) clean_har = Har(har=har_str_scrubbed) return clean_har
Apache License 2.0
nuthouse01/pmx-vmd-scripting-tools
mmd_scripting/core/nuthouse01_io.py
write_list_to_txtfile
python
def write_list_to_txtfile(dest_path: str, content: List[str], use_jis_encoding=False, quiet=False) -> None: writeme = "\n".join(content) write_str_to_txtfile(dest_path, writeme, use_jis_encoding=use_jis_encoding, quiet=quiet) return None
WRITE a list of strings from memory into a TEXT file. :param dest_path: destination file path, as a string, relative from CWD or absolute :param content: list of lines, each line is a string :param use_jis_encoding: by default, assume utf-8 encoding. if this=True, use shift_jis instead. :param quiet: by default, print the absolute path being written to. if this=True, don't do this.
https://github.com/nuthouse01/pmx-vmd-scripting-tools/blob/21a397db92c5f8034494dfcc41c7814096d3dd50/mmd_scripting/core/nuthouse01_io.py#L226-L237
import csv import json import os import stat import sys from os import path from typing import Any, List import mmd_scripting.core.nuthouse01_core as core _SCRIPT_VERSION = "Script version: Nuthouse01 - v1.07.02 - 7/30/2021" MY_APP_NAME = "nuthouse01_mmd_tools" MY_JSON_NAME = "persist.txt" def get_persistent_storage_json(key:str) -> Any: persist_path = _get_persistent_storage_path(MY_JSON_NAME) str_data = read_txtfile_to_list(src_path=persist_path, use_jis_encoding=False, quiet=True) str_data_joined = "\n".join(str_data) if str_data_joined == "": return None data = json.loads(str_data_joined) try: return data[key] except KeyError: return None def write_persistent_storage_json(key:str, newval:Any) -> None: persist_path = _get_persistent_storage_path(MY_JSON_NAME) str_data = read_txtfile_to_list(src_path=persist_path, use_jis_encoding=False, quiet=True) str_data_joined = "\n".join(str_data) if str_data_joined == "": data = {key: newval} else: data = json.loads(str_data_joined) data[key] = newval str_data = json.dumps(data, ensure_ascii=False, indent="\t") write_str_to_txtfile(dest_path=persist_path, content=str_data, use_jis_encoding=False, quiet=True) return None def _get_persistent_storage_path(filename="") -> str: if sys.platform == 'win32': appdata = path.join(os.getenv('APPDATA'), MY_APP_NAME) else: appdata = path.expanduser(path.join("~", "." + MY_APP_NAME)) if not path.exists(appdata): os.makedirs(appdata) if filename: retme = path.join(appdata, filename) if not path.exists(retme): write_str_to_txtfile(retme, "", quiet=True) return retme return appdata def write_csvlist_to_file(dest_path:str, content:List[List[Any]], use_jis_encoding=False, quiet=False) -> None: buildme = [] for line in content: newline = [] if isinstance(line, str): newline_str = line else: for item in line: if isinstance(item, str): newstr = item newstr.replace('"', '""') if ('"' in newstr) or (',' in newstr) or (len(newstr) > 0 and (newstr[0].isspace() or newstr[-1].isspace())): newstr = '"%s"' % newstr newline.append(newstr) else: newline.append(str(item)) newline_str = ",".join(newline) buildme.append(newline_str) write_list_to_txtfile(dest_path, buildme, use_jis_encoding=use_jis_encoding, quiet=quiet) return None def read_file_to_csvlist(src_path:str, use_jis_encoding=False, quiet=False) -> List[List[Any]]: rb_list = read_txtfile_to_list(src_path, use_jis_encoding=use_jis_encoding, quiet=quiet) reader = csv.reader(rb_list, delimiter=',', quoting=csv.QUOTE_ALL) csv_content = [] try: for row in reader: csv_content.append(row) except csv.Error as e: core.MY_PRINT_FUNC(e.__class__.__name__, e) core.MY_PRINT_FUNC("ERROR: malformed CSV format in the text file prevented parsing from text to list form, check your commas") core.MY_PRINT_FUNC("file '{}', line #{}".format(src_path, reader.line_num)) core.MY_PRINT_FUNC("input line = '{}'".format(rb_list[reader.line_num])) raise data = [] for row in csv_content: newrow = [] for item in row: try: newrow.append(int(item)) continue except ValueError: pass try: newrow.append(float(item)) continue except ValueError: pass if item.lower() == "true": newrow.append(True) continue if item.lower() == "false": newrow.append(False) continue if item == "None": newrow.append(None) continue newrow.append(item) data.append(newrow) return data
MIT License
afmurillo/dhalsim
dhalsim/python2/entities/attack.py
TriggerBetweenAttack.apply
python
def apply(self, plc): sensor_value = plc.get_tag(self.sensor) if self.lower_value < sensor_value < self.upper_value: plc.set_attack_flag(True, self.name) plc.logger.debug(self.__str__()) plc.set_tag(self.actuator, self.command) else: plc.set_attack_flag(False, self.name)
Applies the TriggerAttack when necessary :param plc: The PLC that will apply the action
https://github.com/afmurillo/dhalsim/blob/16071598bb21f6125159678f2ac42169a4e2613d/dhalsim/python2/entities/attack.py#L167-L179
from abc import ABCMeta, abstractmethod class Attack: __metaclass__ = ABCMeta def __init__(self, name, actuator, command): self.name = name self.actuator = actuator self.command = command @abstractmethod def __str__(self): return "{type} \"{name}\" commencing, executing command {command} on actuator" " {actuator}.".format(type=self.__class__.__name__, name=self.name, command=self.command, actuator=self.actuator) @abstractmethod def apply(self, plc): pass class TimeAttack(Attack): def __init__(self, name, actuator, command, start, end): super(TimeAttack, self).__init__(name, actuator, command) self.start = start self.end = end def __str__(self): return super(TimeAttack, self).__str__() + " Triggered because {start} iterations have" " occured. Ends at {end} iterations.".format( start=self.start, end=self.end) def apply(self, plc): curr_time = plc.get_master_clock() if self.start <= curr_time <= self.end: plc.set_attack_flag(True, self.name) plc.logger.debug(self.__str__()) plc.set_tag(self.actuator, self.command) else: plc.set_attack_flag(False, self.name) class TriggerBelowAttack(Attack): def __init__(self, name, actuator, command, sensor, value): super(TriggerBelowAttack, self).__init__(name, actuator, command) self.sensor = sensor self.value = value def __str__(self): return super(TriggerBelowAttack, self).__str__() + " Triggered because sensor {sensor}" " fell below {value}." .format(sensor=self.sensor, value=self.value) def apply(self, plc): sensor_value = plc.get_tag(self.sensor) if sensor_value < self.value: plc.set_attack_flag(True, self.name) plc.logger.debug(self.__str__()) plc.set_tag(self.actuator, self.command) else: plc.set_attack_flag(False, self.name) class TriggerAboveAttack(Attack): def __init__(self, name, actuator, command, sensor, value): super(TriggerAboveAttack, self).__init__(name, actuator, command) self.sensor = sensor self.value = value def __str__(self): return super(TriggerAboveAttack, self).__str__() + " Triggered because sensor {sensor}" " fell above {value}." .format(sensor=self.sensor, value=self.value) def apply(self, plc): sensor_value = plc.get_tag(self.sensor) if sensor_value > self.value: plc.set_attack_flag(True, self.name) plc.logger.debug(self.__str__()) plc.set_tag(self.actuator, self.command) else: plc.set_attack_flag(False, self.name) class TriggerBetweenAttack(Attack): def __init__(self, name, actuator, command, sensor, lower_value, upper_value): super(TriggerBetweenAttack, self).__init__(name, actuator, command) self.sensor = sensor self.lower_value = lower_value self.upper_value = upper_value def __str__(self): return super(TriggerBetweenAttack, self).__str__() + " Triggered because sensor {sensor}" " fell between {lower} and {upper}" .format(sensor=self.sensor, lower=self.lower_value, upper=self.upper_value)
MIT License
buysdb/singlecellmultiomics
singlecellmultiomics/molecule/fourthiouridine.py
FourThiouridine.obtain_conversions
python
def obtain_conversions(self, classifier=None): aligned_reference_positions = {} for read in self.iter_reads(): for read_pos, ref_pos, ref_base in read.get_aligned_pairs( with_seq=True, matches_only=True): aligned_reference_positions[( read.reference_name, ref_pos)] = ref_base.upper() try: consensus = self.get_consensus(classifier=classifier) except ValueError: raise ValueError( 'Cannot obtain a safe consensus for this molecule') self.converted_bases = 0 conversions = {} for location, reference_base in aligned_reference_positions.items(): if location not in consensus: continue if (not self.strand and reference_base == 'T' and consensus[location] == 'C') or self.strand and reference_base == 'A' and consensus[location] in 'G': conversions[location] = { 'ref': reference_base, 'obs': consensus[location]} self.converted_bases += 1 self.set_meta('4U', self.converted_bases)
This methods obtains the amount of converted bases and stores them to self.converted_bases and the 4U tag Args: classifier : classifier used for consensus determination returns: None
https://github.com/buysdb/singlecellmultiomics/blob/8fc8455ef3109a423188cb0323dd1a185dc17b41/singlecellmultiomics/molecule/fourthiouridine.py#L42-L77
from singlecellmultiomics.molecule import Molecule class FourThiouridine(Molecule): def __init__(self, fragments=None, classifier=None, **kwargs): Molecule.__init__(self, fragments=fragments, **kwargs) self.classifier = classifier self.gene = None def __finalise__(self): super().__finalise__() self.obtain_conversions(self.classifier) for frag in self: if frag.gene is not None: self.gene = frag.gene def is_valid(self, set_rejection_reasons=False): if not super().is_valid(set_rejection_reasons=set_rejection_reasons): return False try: consensus = self.get_consensus() except ValueError: if set_rejection_reasons: self.set_rejection_reason('no_consensus') return False except TypeError: if set_rejection_reasons: self.set_rejection_reason('getPairGenomicLocations_failed') return False return True
MIT License
locationlabs/mockredis
mockredis/sortedset.py
SortedSet.rank
python
def rank(self, member): score = self._members.get(member) if score is None: return None return bisect_left(self._scores, (score, member))
Get the rank (index of a member).
https://github.com/locationlabs/mockredis/blob/1a8d82f44d217fc9c64143940a59f2f6af403fb8/mockredis/sortedset.py#L108-L115
from bisect import bisect_left, bisect_right class SortedSet(object): def __init__(self): self._scores = [] self._members = {} def clear(self): self.__init__() def __len__(self): return len(self._members) def __contains__(self, member): return member in self._members def __str__(self): return self.__repr__() def __repr__(self): return "SortedSet({})".format(self._scores) def __eq__(self, other): return self._scores == other._scores and self._members == other._members def __ne__(self, other): return not self == other def __setitem__(self, member, score): self.insert(member, score) def __delitem__(self, member): self.remove(member) def __getitem__(self, member): if isinstance(member, slice): raise TypeError("Slicing not supported") return self._members[member] def __iter__(self): return self._scores.__iter__() def __reversed__(self): return self._scores.__reversed__() def insert(self, member, score): found = self.remove(member) index = bisect_left(self._scores, (score, member)) self._scores.insert(index, (score, member)) self._members[member] = score return not found def remove(self, member): if member not in self: return False score = self._members[member] score_index = bisect_left(self._scores, (score, member)) del self._scores[score_index] del self._members[member] return True def score(self, member): return self._members.get(member)
Apache License 2.0
dwavesystems/dimod
dimod/higherorder/polynomial.py
BinaryPolynomial.relabel_variables
python
def relabel_variables(self, mapping, inplace=True): if not inplace: return self.copy().relabel_variables(mapping, inplace=True) for submap in iter_safe_relabels(mapping, self.variables): for oldterm, bias in list(self.items()): newterm = frozenset((submap.get(v, v) for v in oldterm)) if newterm != oldterm: self[newterm] = bias del self[oldterm] return self
Relabel variables of a binary polynomial as specified by mapping. Args: mapping (dict): Dict mapping current variable labels to new ones. If an incomplete mapping is provided, unmapped variables retain their current labels. inplace (bool, optional, default=True): If True, the binary polynomial is updated in-place; otherwise, a new binary polynomial is returned. Returns: :class:`.BinaryPolynomial`: A binary polynomial with the variables relabeled. If `inplace` is set to True, returns itself.
https://github.com/dwavesystems/dimod/blob/af5a722f96250034a9099043927bf8ebc5294e40/dimod/higherorder/polynomial.py#L232-L262
from __future__ import division import itertools import collections.abc as abc from numbers import Number import numpy as np from dimod.decorators import vartype_argument from dimod.sampleset import as_samples from dimod.utilities import iter_safe_relabels from dimod.vartypes import Vartype __all__ = 'BinaryPolynomial', def asfrozenset(term): return term if isinstance(term, frozenset) else frozenset(term) class BinaryPolynomial(abc.MutableMapping): @vartype_argument('vartype') def __init__(self, poly, vartype): if isinstance(poly, abc.Mapping): poly = poly.items() self._terms = terms = {} for term, bias in poly: fsterm = asfrozenset(term) if len(fsterm) < len(term) and vartype is Vartype.SPIN: new = set() term = tuple(term) for v in fsterm: if term.count(v) % 2: new.add(v) fsterm = frozenset(new) if fsterm in terms: terms[fsterm] += bias else: terms[fsterm] = bias self.vartype = vartype def __contains__(self, term): return asfrozenset(term) in self._terms def __delitem__(self, term): del self._terms[asfrozenset(term)] def __eq__(self, other): if not isinstance(other, BinaryPolynomial): try: other = type(self)(other, self.vartype) except Exception: return False self_terms = self._terms other_terms = other._terms return ( self.vartype == other.vartype and all( (not bias or other_terms.get(term, 0.) == bias) for term, bias in self.items() ) and all( (not bias or self_terms.get(term, 0.) == bias) for term, bias in other.items() ) ) def __ne__(self, other): return not (self == other) def __getitem__(self, term): return self._terms[asfrozenset(term)] def __iter__(self): return iter(self._terms) def __len__(self): return len(self._terms) def __setitem__(self, term, bias): self._terms[asfrozenset(term)] = bias def __repr__(self): return '{!s}({!r}, {!r})'.format(self.__class__.__name__, self._terms, self.vartype.name) @property def variables(self): return set().union(*self._terms) @property def degree(self): if len(self) == 0: return 0 return max(map(len, self._terms)) def copy(self): return type(self)(self, self.vartype) def energy(self, sample_like, dtype=float): energy, = self.energies(sample_like, dtype=dtype) return energy def energies(self, samples_like, dtype=float): samples, labels = as_samples(samples_like) if labels: idx, label = zip(*enumerate(labels)) labeldict = dict(zip(label, idx)) else: labeldict = {} num_samples = samples.shape[0] energies = np.zeros(num_samples, dtype=dtype) for term, bias in self.items(): if len(term) == 0: energies += bias else: energies += np.prod([samples[:, labeldict[v]] for v in term], axis=0) * bias return energies
Apache License 2.0
kristerw/spirv-tools
spirv_tools/write_il.py
output_basic_block
python
def output_basic_block(stream, module, basic_block): stream.write(str(basic_block.inst.result_id) + ':\n') for inst in basic_block.insts: output_instruction(stream, module, inst, False)
Output one basic block.
https://github.com/kristerw/spirv-tools/blob/ed2a5aca2100d28f2c61d471c42bc3dc2603ea4f/spirv_tools/write_il.py#L249-L253
import re from spirv_tools import ext_inst from spirv_tools import spirv from spirv_tools import ir def id_name(module, operand): if operand in module.id_to_symbol_name: return module.id_to_symbol_name[operand] elif operand in module.type_id_to_name: return module.type_id_to_name[operand] else: return str(operand) def format_mask(kind, mask_list): if not mask_list: return [val for val in spirv.spv[kind] if spirv.spv[kind][val] == 0][0] separator = ' | ' def stringify_mask_entry(e): if isinstance(e, tuple): return "%s(%s)" % (e[0], ", ".join(str(i) for i in e[1:])) else: return e return separator.join(stringify_mask_entry(e) for e in mask_list) def output_extinst_instruction(stream, module, inst, is_raw_mode, indent=' '): assert inst.op_name == 'OpExtInst' line = indent if inst.result_id is not None: result_id = inst.result_id if result_id in module.id_to_symbol_name: result_id = module.id_to_symbol_name[result_id] line = line + str(result_id) + ' = ' line = line + inst.op_name if inst.type_id is not None: line = line + ' ' + module.type_id_to_name[inst.type_id] if not is_raw_mode: line = line + format_decorations_for_inst(inst) operand = inst.operands[0] assert operand.inst.op_name == 'OpExtInstImport' import_name = operand.inst.operands[0] if is_raw_mode: line = line + ' ' + id_name(module, operand) + ', ' else: line = line + ' "' + import_name + '", ' operand = inst.operands[1] if import_name in ext_inst.EXT_INST: line = line + ext_inst.EXT_INST[import_name][operand]['name'] + ', ' else: line = line + str(operand) + ', ' operands = inst.operands[2:] for operand in operands: line = line + id_name(module, operand) + ', ' line = line[:-2] stream.write(line + '\n') def output_instruction(stream, module, inst, is_raw_mode, indent=' '): if inst.op_name == 'OpExtInst': output_extinst_instruction(stream, module, inst, is_raw_mode, indent=indent) return line = indent if inst.result_id is not None: result_id = inst.result_id if result_id in module.id_to_symbol_name: result_id = module.id_to_symbol_name[result_id] line = line + str(result_id) + ' = ' line = line + inst.op_name if inst.type_id is not None: line = line + ' ' + module.type_id_to_name[inst.type_id] if not is_raw_mode: line = line + format_decorations_for_inst(inst) op_format = ir.INST_FORMAT[inst.op_name] kind = None if inst.operands: line = line + ' ' operand_kind = list(zip(inst.operands, op_format['operands'])) while operand_kind: operand, kind = operand_kind[0] if kind == 'Id' or kind == 'OptionalId': line = line + id_name(module, operand) + ', ' elif kind == 'LiteralNumber' or kind == 'OptionalLiteralNumber': line = line + str(operand) + ', ' elif kind in ir.MASKS: line = line + format_mask(kind, operand) + ', ' elif kind == 'LiteralString' or kind == 'OptionalLiteralString': line = line + '"' + operand + '"' + ', ' elif kind[:8] == 'Optional' and kind[-4:] == 'Mask': line = line + format_mask(kind[8:], operand) + ', ' elif kind[:8] == 'Variable': break elif kind in spirv.spv: line = line + operand + ', ' else: raise Exception('Unhandled kind ' + kind) operand_kind = operand_kind[1:] while operand_kind: operand, kind = operand_kind.pop(0) if kind == 'VariableIdLiteralPair': operands = inst.operands[(len(op_format['operands'])-1):] while operands: line = line + id_name(module, operands.pop(0)) + ', ' line = line + str(operands.pop(0)) + ', ' elif kind == 'VariableId': operands = inst.operands[(len(op_format['operands'])-1):] for operand in operands: line = line + id_name(module, operand) + ', ' elif kind == 'VariableLiteralIdPair': operands = inst.operands[(len(op_format['operands'])-1):] while operands: line = line + str(operands.pop(0)) + ', ' line = line + id_name(module, operands.pop(0)) + ', ' elif kind == 'VariableLiteralNumber': operands = inst.operands[(len(op_format['operands'])-1):] for operand in operands: line = line + str(operand) + ', ' else: raise Exception('Unhandled kind ' + kind) line = line[:-2] stream.write(line + '\n') def get_symbol_name(module, symbol_id): if symbol_id in module.id_to_symbol_name: return module.id_to_symbol_name[symbol_id] for inst in module.global_instructions.name_insts: if inst.op_name == 'OpName' and inst.operands[0] == symbol_id: name = inst.operands[1] regex = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*') match = regex.match(name) if match is None: return '%' + str(symbol_id.value) new_name = match.group(0) symbol_name = '%' + new_name if symbol_name in module.symbol_name_to_id: symbol_name = '%' + str(symbol_id.value) break else: symbol_name = '%' + str(symbol_id.value) module.id_to_symbol_name[symbol_id] = symbol_name module.symbol_name_to_id[symbol_name] = symbol_id return symbol_name def format_decoration(decoration_inst): res = decoration_inst.operands[1] if decoration_inst.operands[2:]: res = res + '(' for param in decoration_inst.operands[2:]: res = res + str(param) + ', ' res = res[:-2] + ')' return res def format_decorations_for_inst(inst): line = '' if inst.result_id is not None: decorations = inst.get_decorations() for decoration in decorations: line = line + ' ' + format_decoration(decoration) return line def add_type_if_needed(module, inst, needed_types): if inst not in needed_types: if inst.op_name != 'OpTypeFunction': if module.type_id_to_name[inst.result_id] == str(inst.result_id): needed_types.add(inst) for operand in inst.operands: if isinstance(operand, ir.Id): if operand.inst.op_name in ir.TYPE_DECLARATION_INSTRUCTIONS: add_type_if_needed(module, operand.inst, needed_types) def get_needed_types(module): needed_types = set() for inst in module.instructions(): if inst.type_id is not None: add_type_if_needed(module, inst.type_id.inst, needed_types) return needed_types def output_instructions(stream, module, insts, is_raw_mode, newline=True): if insts and newline: stream.write('\n') for inst in insts: output_instruction(stream, module, inst, is_raw_mode, indent='')
MIT License
google/cloud-forensics-utils
libcloudforensics/providers/kubernetes/netpol.py
K8sDenyAllNetworkPolicy._spec
python
def _spec(self) -> client.V1NetworkPolicySpec: return client.V1NetworkPolicySpec( pod_selector=client.V1LabelSelector(match_labels=self.labels), policy_types=[ 'Ingress', 'Egress', ])
Override of abstract property.
https://github.com/google/cloud-forensics-utils/blob/38142cf3e00f70d976aa42aa2f9a1981c0240b19/libcloudforensics/providers/kubernetes/netpol.py#L99-L106
import abc import random import string from typing import Dict from kubernetes import client from libcloudforensics.providers.kubernetes import base class K8sNetworkPolicy(base.K8sNamespacedResource): def Delete(self, cascade: bool = True) -> None: api = self._Api(client.NetworkingV1Api) api.delete_namespaced_network_policy(self.name, self.namespace) def Read(self) -> client.V1NetworkPolicy: api = self._Api(client.NetworkingV1Api) return api.read_namespaced_network_policy(self.name, self.namespace) class K8sNetworkPolicyWithSpec(K8sNetworkPolicy, metaclass=abc.ABCMeta): @property @abc.abstractmethod def _spec(self) -> client.V1NetworkPolicySpec: @property def _metadata(self) -> client.V1ObjectMeta: return client.V1ObjectMeta(namespace=self.namespace, name=self.name) @property def _policy(self) -> client.V1NetworkPolicy: return client.V1NetworkPolicy(spec=self._spec, metadata=self._metadata) def Create(self) -> None: api = self._Api(client.NetworkingV1Api) api.create_namespaced_network_policy(self.namespace, self._policy) class K8sDenyAllNetworkPolicy(K8sNetworkPolicyWithSpec): def __init__(self, api_client: client.ApiClient, namespace: str) -> None: self._GenerateTag() name = 'cfu-netpol-{0:s}'.format(self._tag) super().__init__(api_client, name, namespace) def _GenerateTag(self) -> None: chars = random.choices(string.ascii_lowercase + string.digits, k=16) self._tag = ''.join(chars) @property def labels(self) -> Dict[str, str]: return {'quarantineId': self._tag} @property
Apache License 2.0
cleverhans-lab/cleverhans
cleverhans_v3.1.0/examples/multigpu_advtrain/evaluator.py
Evaluator.log_value
python
def log_value(self, tag, val, desc=""): logging.info("%s (%s): %.4f" % (desc, tag, val)) self.summary.value.add(tag=tag, simple_value=val)
Log values to standard output and Tensorflow summary. :param tag: summary tag. :param val: (required float or numpy array) value to be logged. :param desc: (optional) additional description to be printed.
https://github.com/cleverhans-lab/cleverhans/blob/4aed4be702be5ce13d5017b8a3c6a2cdc4fc0009/cleverhans_v3.1.0/examples/multigpu_advtrain/evaluator.py#L140-L149
import logging import tensorflow as tf from cleverhans.utils_tf import model_eval from cleverhans.attacks import FastGradientMethod from cleverhans.attacks import MadryEtAl from attacks_multigpu import MadryEtAlMultiGPU def create_adv_by_name(model, x, attack_type, sess, dataset, y=None, **kwargs): attack_names = { "FGSM": FastGradientMethod, "MadryEtAl": MadryEtAl, "MadryEtAl_y": MadryEtAl, "MadryEtAl_multigpu": MadryEtAlMultiGPU, "MadryEtAl_y_multigpu": MadryEtAlMultiGPU, } if attack_type not in attack_names: raise Exception("Attack %s not defined." % attack_type) attack_params_shared = { "mnist": { "eps": 0.3, "eps_iter": 0.01, "clip_min": 0.0, "clip_max": 1.0, "nb_iter": 40, }, "cifar10": { "eps": 8.0 / 255, "eps_iter": 0.01, "clip_min": 0.0, "clip_max": 1.0, "nb_iter": 20, }, } with tf.variable_scope(attack_type): attack_class = attack_names[attack_type] attack = attack_class(model, sess=sess) fd_kwargs = attack.feedable_kwargs.keys() + attack.structural_kwargs params = attack_params_shared[dataset].copy() params.update({k: v for k, v in kwargs.items() if v is not None}) params = {k: v for k, v in params.items() if k in fd_kwargs} if "_y" in attack_type: params["y"] = y logging.info(params) adv_x = attack.generate(x, **params) return adv_x class Evaluator(object): def __init__( self, sess, model, batch_size, x_pre, x, y, data, writer, hparams=None ): if hparams is None: hparams = {} model.set_training(False) self.preds = model.get_probs(x) self.sess = sess self.batch_size = batch_size self.x_pre = x_pre self.x = x self.y = y self.X_train, self.Y_train, self.X_test, self.Y_test = data self.writer = writer self.hparams = hparams self.eval_params = {"batch_size": batch_size} self.epoch = 0 self.attack_type_train = hparams.attack_type_train self.attack_type_test = [] for att_type in hparams.attack_type_test.split(","): if att_type == "": continue self.attack_type_test += [att_type] self.attacks = {} for att_type in self.attack_type_test: logging.info("Intializing attack %s" % att_type) adv_x = create_adv_by_name( model, x, att_type, sess, dataset=hparams.dataset, y=y ) model.set_training(False) preds_adv = model.get_probs(adv_x) self.attacks[att_type] = (adv_x, preds_adv) tf.summary.image(att_type, adv_x, max_outputs=10) self.sum_op = tf.summary.merge_all()
MIT License
jaantollander/crowddynamics
crowddynamics/simulation/field.py
Field.sample_spawn
python
def sample_spawn(self, spawn_index: int, radius: float = 0.3): return self._samples(self.spawns[spawn_index], self.obstacles, radius)
Generator for sampling points inside spawn without overlapping with obstacles
https://github.com/jaantollander/crowddynamics/blob/a5858c02c06ed72f49b7bd6aaabd7cf16b3054c3/crowddynamics/simulation/field.py#L126-L129
from functools import lru_cache import numpy as np from shapely.geometry import Polygon from shapely.geometry.base import BaseGeometry from traitlets import Instance, List, validate from crowddynamics.core.geometry import union from crowddynamics.core.sampling import polygon_sample from crowddynamics.core.steering.obstacle_handling import direction_map_obstacles, obstacle_handling from crowddynamics.core.steering.quickest_path import meshgrid, shortest_path from crowddynamics.exceptions import ValidationError, CrowdDynamicsException, InvalidType from crowddynamics.simulation.base import FieldBase class Field(FieldBase): domain = Instance( Polygon, allow_none=True, help='Domain') obstacles = Instance( BaseGeometry, allow_none=True, help='Obstacles') targets = List( Instance(BaseGeometry), help='List of targets') spawns = List( Instance(BaseGeometry), help='List of spawns') @validate('domain') def _valid_domain(self, proposal): value = proposal['value'] if not value.is_valid: raise ValidationError('{} should not be invalid'.format(value)) if value.is_empty: raise ValidationError('{} should not empty'.format(value)) return value @validate('obstacles') def _valid_obstacles(self, proposal): value = proposal['value'] if not value.is_valid: raise ValidationError('{} should not be invalid'.format(value)) if value.is_empty: raise ValidationError('{} should not empty'.format(value)) return value def convex_hull(self): field = BaseGeometry() if self.obstacles: field |= self.obstacles if self.targets: field |= union(*self.targets) if self.spawns: field |= union(*self.spawns) return field.convex_hull @staticmethod def _samples(spawn, obstacles, radius=0.3): geom = spawn - obstacles.buffer(radius) if obstacles else spawn vertices = np.asarray(geom.convex_hull.exterior) return polygon_sample(vertices)
MIT License
tune-archive/freight_forwarder
freight_forwarder/container/host_config.py
HostConfig._convert_volume_from
python
def _convert_volume_from(self, volume_from): if ':' in volume_from: container, permissions = volume_from.split(':') else: container = volume_from permissions = 'rw' if permissions not in ('ro', 'rw'): raise ValueError("only permissions supported for volumes_from are rw and ro.") return "{0}:{1}".format(container, permissions)
:param volume_from: :return:
https://github.com/tune-archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/container/host_config.py#L605-L619
from __future__ import unicode_literals import six from freight_forwarder.utils import capitalize_keys, is_valid_hostname, is_valid_ip VALID_CAPABILITIES = ( 'ALL', 'SETPCAP', 'SYS_MODULE', 'SYS_RAWIO', 'SYS_PACCT', 'SYS_ADMIN', 'SYS_NICE', 'SYS_RESOURCE', 'SYS_TIME', 'SYS_TTY_CONFIG', 'MKNOD' 'AUDIT_WRITE', 'AUDIT_CONTROL', 'MAC_OVERRIDE', 'MAC_ADMIN', 'NET_ADMIN', 'SYSLOG' 'CHOWN', 'NET_RAW', 'DAC_OVERRIDE', 'FOWNER', 'DAC_READ_SEARCH', 'FSETID' 'KILL', 'SETGID', 'SETUID', 'LINUX_IMMUTABLE' 'NET_BIND_SERVICE' 'NET_BROADCAST', 'IPC_LOCK', 'IPC_OWNER', 'SYS_CHROOT', 'SYS_PTRACE', 'SYS_BOOT', 'LEASE', 'SETFCAP', 'WAKE_ALARM', 'BLOCK_SUSPEND' ) VALID_NETWORK_MODES = ( 'bridge', 'container', 'host', "default", '', None ) VALID_LOG_DRIVER_TYPES = ( 'journald', 'json-file', 'syslog', 'none' ) class HostConfig(object): def __init__(self, properties={}): self.binds = properties.get('binds', ['/dev/log:/dev/log:rw']) self.cap_add = properties.get('cap_add', None) self.cap_drop = properties.get('cap_drop', None) self.devices = properties.get('devices', None) self.links = properties.get('links', []) self.lxc_conf = properties.get('lxc_conf', []) self.readonly_root_fs = properties.get('readonly_root_fs') or properties.get('readonly_rootfs', False) self.security_opt = properties.get('security_opt', None) self.memory = properties.get('memory', 0) self.memory_swap = properties.get('memory_swap', 0) self.cpu_shares = properties.get('cpu_shares', 0) self.port_bindings = properties.get('port_bindings') or properties.get('ports', {}) self.publish_all_ports = properties.get('publish_all_ports', False) self.privileged = properties.get('privileged', False) self.dns = properties.get('dns', None) self.dns_search = properties.get('dns_search', None) self.extra_hosts = properties.get('extra_hosts', []) self.network_mode = properties.get('network_mode', 'bridge') self.volumes_from = properties.get('volumes_from', []) self.cgroup_parent = properties.get('cgroup_parent', '') self.log_config = properties.get( 'log_config', {"config": {'max-size': '100m', 'max-file': '2'}, "type": "json-file"} ) self.ulimits = properties.get('ulimits', []) self.restart_policy = properties.get('restart_policy', {}) def to_dict(self): return dict([(name, getattr(self, name)) for name in dir(self) if not name.startswith('_') and not callable(getattr(self, name))]) def docker_py_dict(self): return { 'binds': self._binds, 'port_bindings': capitalize_keys(self._port_bindings) if self._port_bindings else self._port_bindings, 'lxc_conf': self._lxc_conf, 'publish_all_ports': self._publish_all_ports, 'links': [link.split(':') for link in self._links] if self._links else self._links, 'privileged': self._privileged, 'dns': self._dns, 'dns_search': self._dns_search, 'volumes_from': self._volumes_from, 'network_mode': self._network_mode, 'restart_policy': self._restart_policy, 'cap_add': self._cap_add, 'cap_drop': self._cap_drop, 'devices': self._devices, 'extra_hosts': self._extra_hosts, 'read_only': self._readonly_root_fs, 'security_opt': self._security_opt, 'ulimits': self._ulimits, 'log_config': self._log_config, 'mem_limit': self._memory, 'memswap_limit': self._memory_swap } @property def binds(self): return self._binds @binds.setter def binds(self, value): self._binds = self._convert_binds(value) @property def cap_add(self): return self._cap_add @cap_add.setter def cap_add(self, value): if value is None: self._cap_add = value else: self._cap_add = self._create_capabilities_list(value) for capability in self._cap_add: if hasattr(self, '_cap_drop'): if capability in self._cap_drop: raise ValueError( "circular reference in cap_add. please remove {0} from either cap_add or cap_drop".format(capability) ) @property def cap_drop(self): return self._cap_drop @cap_drop.setter def cap_drop(self, value): if value is None: self._cap_drop = value else: self._cap_drop = self._create_capabilities_list(value) for capability in self._cap_drop: if hasattr(self, '_cap_add'): if capability in self._cap_add: raise ValueError( "circular reference in cap_add. please remove {0} from either cap_add or cap_drop".format(capability) ) @property def cgroup_parent(self): return self._cgroup_parent @cgroup_parent.setter def cgroup_parent(self, value): if not isinstance(value, six.string_types): raise TypeError("cgroup parent must be a string. {0} was passed".format(value)) self._cgroup_parent = value @property def cpu_shares(self): return self._cpu_shares @cpu_shares.setter def cpu_shares(self, value): if not isinstance(value, int): raise TypeError("cpu shares must be an int. {0} was passed".format(value)) self._cpu_shares = value @property def devices(self): return self._devices @devices.setter def devices(self, value): if value is None: self._devices = None elif isinstance(value, list): results = [] delimiter = ':' for device in value: if not isinstance(device, six.string_types): raise TypeError("each device must be a str. {0} was passed".format(device)) occurrences = device.count(delimiter) permissions = 'rwm' if occurrences is 0: path_on_host = device path_in_container = device elif occurrences is 1: path_on_host, path_in_container = device.split(delimiter) elif occurrences is 2: path_on_host, path_in_container, permissions = device.split(delimiter) if permissions not in 'rwm': raise ValueError("only permissions supported for devices are any combination of 'r' 'w' 'm'.") else: raise ValueError( """When passing devices they must be in one of the following formats: path_on_host, path_on_host:path_in_container, or path_on_host:path_in_container:permissions""" ) results.append("{0}:{1}:{2}".format(path_on_host, path_in_container, permissions)) self._devices = results else: raise TypeError("devices must be a list or None.") @property def dns(self): return self._dns @dns.setter def dns(self, value): self._dns = self._create_dns_list(value) @property def dns_search(self): return self._dns_search @dns_search.setter def dns_search(self, value): self._dns_search = self._create_dns_list(value) @property def log_config(self): return self._log_config @log_config.setter def log_config(self, value): if not isinstance(value, dict): raise TypeError("log_config must be a dict. {0} was passed".format(value)) config = value.get('config') driver_type = value.get('type') if driver_type not in VALID_LOG_DRIVER_TYPES: raise ValueError("type must be one of the support drivers {0}".format(", ".join(VALID_LOG_DRIVER_TYPES))) if config and not isinstance(config, dict): raise ValueError("log_config.config must be a dict.") if driver_type == 'syslog': config = { 'syslog-facility': config.get('syslog_facility', config.get('syslog-facility')), 'syslog-tag': config.get('syslog_tag', config.get('syslog-tag')) } self._log_config = {'type': driver_type, 'config': config or {}} @property def links(self): return self._links @links.setter def links(self, value): if value is None: self._links = value elif isinstance(value, list): self._links = [] for link in value: if not isinstance(link, six.string_types): raise TypeError("links must be a string.") if ':' not in link: self._links.append(link) continue if link.count(':') is 1: self._links.append(link) else: raise AttributeError( "links must be in one of the following formats: dependency or container_name:alias" ) else: raise TypeError("links must be a list or None.") @property def lxc_conf(self): return self._lxc_conf @lxc_conf.setter def lxc_conf(self, value): self._lxc_conf = [] if value is None: return elif isinstance(value, (list, dict)): self._lxc_conf = value else: raise TypeError("lxc conf must be a dict, list, or None") @property def memory(self): return self._memory @memory.setter def memory(self, value): self._memory = value @property def memory_swap(self): return self._memory_swap @memory_swap.setter def memory_swap(self, value): self._memory_swap = value @property def network_mode(self): return self._network_mode @network_mode.setter def network_mode(self, value): if value == 'bridged': value = 'bridge' if value is None: pass elif value not in VALID_NETWORK_MODES: raise ValueError( "network mode must be one of the following values: {0}".format(VALID_NETWORK_MODES) ) self._network_mode = value @property def port_bindings(self): return self._port_bindings @port_bindings.setter def port_bindings(self, value): if isinstance(value, (list, dict)): self._port_bindings = self._convert_port_bindings(value) elif value is None: self._port_bindings = None else: raise TypeError('port bindings must be a dict, list, or None. {0} was passed.'.format(type(value))) @property def ports(self): return self.port_bindings @ports.setter def ports(self, value): self.port_bindings = value @property def privileged(self): return self._privileged @privileged.setter def privileged(self, value): if not isinstance(value, bool): raise TypeError("privileged must be a bool: {0} was passed".format(value)) self._privileged = value @property def publish_all_ports(self): return self._publish_all_ports @publish_all_ports.setter def publish_all_ports(self, value): if not isinstance(value, bool): raise TypeError("publish all ports must be a bool: {0} was passed".format(value)) self._publish_all_ports = value @property def readonly_root_fs(self): return self._readonly_root_fs @readonly_root_fs.setter def readonly_root_fs(self, value): if not isinstance(value, bool): raise TypeError("readonly_root_fs is required to be a bool.") self._readonly_root_fs = value @property def restart_policy(self): return self._restart_policy @restart_policy.setter def restart_policy(self, value): self._restart_policy = value @property def ulimits(self): return self._ulimits @ulimits.setter def ulimits(self, value): if value is None: pass elif isinstance(value, list): if value: for ulimit in value: if not isinstance(ulimit, dict): raise TypeError('each ulimit must be a dict: { "name": "nofile", "soft": 1024, "hard", 2048 }}') name = ulimit.get('name') hard = ulimit.get('hard') soft = ulimit.get('soft') if not isinstance(name, six.string_types): raise ValueError("ulimit.name must be a string: {0}".format(ulimit)) if soft and not isinstance(soft, int): raise ValueError("ulimit.soft must be an integer: {0}") if hard and not isinstance(hard, int): raise ValueError("ulimit.hard must be an integer: {0}".format(ulimit)) else: raise TypeError('ulimits most be a list or None.') self._ulimits = value @property def extra_hosts(self): return self._extra_hosts @extra_hosts.setter def extra_hosts(self, value): if value is None: self._extra_hosts = value elif isinstance(value, list): self._extra_hosts = value elif isinstance(value, dict): converted_extra_hosts = [] for k, v in sorted(six.iteritems(value)): if not is_valid_hostname(k): raise ValueError("each key in extra hosts is required to be a valid hostname. {0} was passed".format(k)) if not is_valid_ip(v): raise ValueError("each value in extra hosts is required to be a valid ip address. {0} was passed".format(v)) converted_extra_hosts.append('{0}:{1}'.format(k, v)) self._extra_hosts = converted_extra_hosts else: raise TypeError("extra hosts must be a dict, list, or None. {0} was passed".format(value)) @property def security_opt(self): return self._security_opt @security_opt.setter def security_opt(self, value): if value is None: self._security_opt = value elif not isinstance(value, list): raise TypeError('security_opt must be a list') self._security_opt = value @property def volumes_from(self): return self._volumes_from @volumes_from.setter def volumes_from(self, value): volumes_from = [] if isinstance(value, list): for volume_from in value: if not isinstance(volume_from, six.string_types): raise TypeError("each bind must be a str. {0} was passed".format(volume_from)) volumes_from.append(self._convert_volume_from(volume_from)) elif isinstance(value, six.string_types): volumes_from.append(self._convert_volume_from(value)) elif value is None: pass else: raise ValueError( """When passing binds they must be in one of the following formats: container_path, host_path:container_path, or host_path:container_path:permissions""" ) self._volumes_from = volumes_from @classmethod def allowed_config_attributes(cls): return tuple(six.text_type(name) for name in dir(cls) if not name.startswith('_') and not callable(getattr(cls, name)))
MIT License
duerrp/pyexperiment
pyexperiment/utils/sentinel.py
create
python
def create(name, description=''): if description == '': description = "Sentinel '%s'" % name class Sentinel(object): def __init__(self): self.name = str(name) self.__name__ = self.name self.__class__.__name__ = self.name self.__slots__ = () self.__class__.__module__ = inspect.stack( )[2][0].f_globals['__name__'] def __repr__(self): return description def __copy__(self): return self def __deepcopy__(self, _): return self sentinel = Sentinel() del Sentinel return sentinel
Creates a new sentinel
https://github.com/duerrp/pyexperiment/blob/c426565d870d944bd5b9712629d8f1ba2527c67f/pyexperiment/utils/sentinel.py#L17-L57
from __future__ import print_function from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import import inspect
MIT License
imageio/imageio
imageio/core/functions.py
imwrite
python
def imwrite(uri, im, format=None, **kwargs): imt = type(im) im = np.asanyarray(im) if not np.issubdtype(im.dtype, np.number): raise ValueError("Image is not numeric, but {}.".format(imt.__name__)) elif im.ndim == 2: pass elif im.ndim == 3 and im.shape[2] in [1, 3, 4]: pass else: raise ValueError("Image must be 2D (grayscale, RGB, or RGBA).") with imopen()(uri, "wi", format=format) as file: return file.write(im, **kwargs)
imwrite(uri, im, format=None, **kwargs) Write an image to the specified file. Parameters ---------- uri : {str, pathlib.Path, file} The resource to write the image to, e.g. a filename, pathlib.Path or file object, see the docs for more info. im : numpy.ndarray The image data. Must be NxM, NxMx3 or NxMx4. format : str The format to use to write the file. By default imageio selects the appropriate for you based on the filename and its contents. kwargs : ... Further keyword arguments are passed to the writer. See :func:`.help` to see what arguments are available for a particular format.
https://github.com/imageio/imageio/blob/a5ce49f1604b15f9566e89e476b3198ef57c964f/imageio/core/functions.py#L162-L195
from numbers import Number import re import numpy as np from .. import formats from .imopen import imopen MEMTEST_DEFAULT_MIM = "256MB" MEMTEST_DEFAULT_MVOL = "1GB" mem_re = re.compile(r"^(\d+\.?\d*)\s*([kKMGTPEZY]?i?)B?$") sizes = {"": 1, None: 1} for i, si in enumerate([""] + list("kMGTPEZY")): sizes[si] = 1000 ** i if si: sizes[si.upper() + "i"] = 1024 ** i def to_nbytes(arg, default=None): if not arg: return None if arg is True: return default if isinstance(arg, Number): return arg match = mem_re.match(arg) if match is None: raise ValueError( "Memory size could not be parsed " "(is your capitalisation correct?): {}".format(arg) ) num, unit = match.groups() try: return float(num) * sizes[unit] except KeyError: raise ValueError( "Memory size unit not recognised " "(is your capitalisation correct?): {}".format(unit) ) def help(name=None): if not name: print(formats) else: print(formats[name]) def get_reader(uri, format=None, mode="?", **kwargs): image_file = imopen()(uri, "r" + mode, format=format) return image_file.legacy_get_reader(**kwargs) def get_writer(uri, format=None, mode="?", **kwargs): image_file = imopen()(uri, "w" + mode, format=format) return image_file.legacy_get_writer(**kwargs) def imread(uri, format=None, **kwargs): if "mode" in kwargs: raise TypeError( 'Invalid keyword argument "mode", ' 'perhaps you mean "pilmode"?' ) with imopen()(uri, "ri", format=format) as file: return file.read(index=0, **kwargs)
BSD 2-Clause Simplified License
georgepar/slp
slp/plbind/dm.py
PLDataModuleFromCorpus.add_argparse_args
python
def add_argparse_args(cls, parent_parser): parser = super(PLDataModuleFromCorpus, cls).add_argparse_args(parent_parser) parser.add_argument( "--tokenizer", dest="data.tokenizer", type=str.lower, choices=cls.accepted_tokenizers, default="spacy", help="Token type. The tokenization will happen at this level.", ) parser.add_argument( "--limit-vocab", dest="data.limit_vocab_size", type=int, default=-1, help="Limit vocab size. -1 means use the whole vocab. Applicable only when --tokenizer=spacy", ) parser.add_argument( "--embeddings-file", dest="data.embeddings_file", type=dir_path, default=None, help="Path to file with pretrained embeddings. Applicable only when --tokenizer=spacy", ) parser.add_argument( "--embeddings-dim", dest="data.embeddings_dim", type=int, default=50, help="Embedding dim of pretrained embeddings. Applicable only when --tokenizer=spacy", ) parser.add_argument( "--lang", dest="data.lang", type=str, default="en_core_web_md", help="Language for spacy tokenizer, e.g. en_core_web_md. Applicable only when --tokenizer=spacy", ) parser.add_argument( "--no-add-specials", dest="data.add_special_tokens", action="store_false", help="Do not add special tokens for hugging face tokenizers", ) parser.add_argument( "--lower", dest="data.lower", action="store_true", help="Convert to lowercase.", ) parser.add_argument( "--prepend-bos", dest="data.prepend_bos", action="store_true", help="Prepend [BOS] token", ) parser.add_argument( "--append-eos", dest="data.append_eos", action="store_true", help="Append [EOS] token", ) parser.add_argument( "--max-sentence-length", dest="data.max_len", type=int, default=-1, help="Maximum allowed sentence length. -1 means use the whole sentence", ) return parser
Augment input parser with arguments for data loading and corpus processing Args: parent_parser (argparse.ArgumentParser): Parser created by the user Returns: argparse.ArgumentParser: Augmented parser
https://github.com/georgepar/slp/blob/ac55154f063245e0e4ed584c59f16370d228d8a7/slp/plbind/dm.py#L614-L704
import argparse from typing import Any, Callable, List, Optional, Union import numpy as np import pytorch_lightning as pl import torch from loguru import logger from sklearn.model_selection import train_test_split from torch.utils.data import BatchSampler, DataLoader, Dataset, Sampler, random_split from transformers import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP from slp.data.corpus import HfCorpus, TokenizedCorpus, WordCorpus from slp.data.datasets import CorpusDataset, CorpusLMDataset from slp.data.transforms import ToTensor from slp.util.types import dir_path DatasetType = Union[Dataset, List[Any]] def split_data(dataset, test_size, seed): train, test = None, None if isinstance(dataset, torch.utils.data.Dataset): test_len = int(test_size * len(dataset)) train_len = len(dataset) - test_len seed_generator = None if seed is not None: seed_generator = torch.Generator().manual_seed(seed) train, test = random_split( dataset, [train_len, test_len], generator=seed_generator ) else: train, test = train_test_split(dataset, test_size=test_size, random_state=seed) return train, test class PLDataModuleFromDatasets(pl.LightningDataModule): def __init__( self, train: Dataset, val: Dataset = None, test: Dataset = None, val_percent: float = 0.2, test_percent: float = 0.2, batch_size: int = 1, batch_size_eval: Optional[int] = None, seed: Optional[int] = None, num_workers: int = 1, pin_memory: bool = True, drop_last: bool = False, sampler_train: Sampler = None, sampler_val: Sampler = None, sampler_test: Sampler = None, batch_sampler_train: BatchSampler = None, batch_sampler_val: BatchSampler = None, batch_sampler_test: BatchSampler = None, shuffle_eval: bool = False, collate_fn: Optional[Callable[..., Any]] = None, no_test_set: bool = False, ): super(PLDataModuleFromDatasets, self).__init__() self.setup_has_run = False if batch_sampler_train is not None and sampler_train is not None: raise ValueError( "You provided both a sampler and a batch sampler for the train set. These are mutually exclusive" ) if batch_sampler_val is not None and sampler_val is not None: raise ValueError( "You provided both a sampler and a batch sampler for the validation set. These are mutually exclusive" ) if batch_sampler_test is not None and sampler_test is not None: raise ValueError( "You provided both a sampler and a batch sampler for the test set. These are mutually exclusive" ) self.val_percent = val_percent self.test_percent = test_percent self.sampler_train = sampler_train self.sampler_val = sampler_val self.sampler_test = sampler_test self.batch_sampler_train = batch_sampler_train self.batch_sampler_val = batch_sampler_val self.batch_sampler_test = batch_sampler_test self.num_workers = num_workers self.pin_memory = pin_memory self.drop_last = drop_last self.shuffle_eval = shuffle_eval self.collate_fn = collate_fn self.batch_size = batch_size self.seed = seed if batch_size_eval is None: batch_size_eval = self.batch_size self.no_test_set = no_test_set self.batch_size_eval = batch_size_eval self.train = train self.val = val self.test = test def prepare_data(self): return None def setup(self, stage=None): if self.val is not None: logger.info("Using provided validation set") if self.test is not None: logger.info("Using provided test set") if self.test is None and self.val is None and not self.no_test_set: assert ( self.val_percent is not None and self.val_percent > 0 ), "You should either provide a validation set or a val set percentage" assert ( self.test_percent is not None and self.test_percent > 0 ), "You should either provide a test set or a test set percentage" testval_percent = self.test_percent + self.val_percent logger.info( f"No test or validation set provided. Creating random splits using {testval_percent * 100}% of training set with seed={self.seed}" ) self.train, testval = split_data( self.train, test_size=testval_percent, seed=self.seed ) test_percent = self.test_percent / testval_percent self.val, self.test = split_data( testval, test_size=test_percent, seed=self.seed ) if self.val is None: assert ( self.val_percent is not None and self.val_percent > 0 ), "You should either provide a validation set or a val set percentage" logger.info( f"No validation set provided. Creating random split using {self.val_percent * 100}% of training set with seed={self.seed}" ) self.train, self.val = split_data( self.train, test_size=self.val_percent, seed=self.seed ) if self.test is None and not self.no_test_set: assert ( test_percent is not None and test_percent > 0 ), "You should either provide a test set or a test set percentage" logger.info( f"No test set provided. Creating random split using {self.test_percent * 100}% of training set with seed={self.seed}" ) self.train, self.test = split_data( self.train, test_size=test_percent, seed=self.seed ) logger.info(f"Using {len(self.train)} samples for training") logger.info(f"Using {len(self.val)} samples for validation") if not self.no_test_set: logger.info(f"Using {len(self.test)} samples for testing") self.setup_has_run = True def train_dataloader(self) -> DataLoader: return DataLoader( self.train, batch_size=self.batch_size if self.batch_sampler_train is None else 1, num_workers=self.num_workers, pin_memory=self.pin_memory, drop_last=self.drop_last and (self.batch_sampler_train is None), sampler=self.sampler_train, batch_sampler=self.batch_sampler_train, shuffle=(self.batch_sampler_train is None) and (self.sampler_train is None), collate_fn=self.collate_fn, ) def val_dataloader(self): val = DataLoader( self.val, batch_size=self.batch_size_eval if self.batch_sampler_val is None else 1, num_workers=self.num_workers, pin_memory=self.pin_memory, drop_last=self.drop_last and (self.batch_sampler_val is None), sampler=self.sampler_val, batch_sampler=self.batch_sampler_val, shuffle=( self.shuffle_eval and (self.batch_sampler_val is None) and (self.sampler_val is None) ), collate_fn=self.collate_fn, ) return val def test_dataloader(self): return DataLoader( self.test, batch_size=self.batch_size_eval if self.batch_sampler_test is None else 1, num_workers=self.num_workers, pin_memory=self.pin_memory, drop_last=self.drop_last and (self.batch_sampler_test is None), sampler=self.sampler_test, batch_sampler=self.batch_sampler_test, shuffle=( self.shuffle_eval and (self.batch_sampler_test is None) and (self.sampler_test is None) ), collate_fn=self.collate_fn, ) @classmethod def add_argparse_args( cls, parent_parser: argparse.ArgumentParser ) -> argparse.ArgumentParser: parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument( "--val-percent", dest="data.val_percent", type=float, default=0.2, help="Percent of validation data to be randomly split from the training set, if no validation set is provided", ) parser.add_argument( "--test-percent", dest="data.test_percent", type=float, default=0.2, help="Percent of test data to be randomly split from the training set, if no test set is provided", ) parser.add_argument( "--bsz", dest="data.batch_size", type=int, default=32, help="Training batch size", ) parser.add_argument( "--bsz-eval", dest="data.batch_size_eval", type=int, default=32, help="Evaluation batch size", ) parser.add_argument( "--num-workers", dest="data.num_workers", type=int, default=1, help="Number of workers to be used in the DataLoader", ) parser.add_argument( "--no-pin-memory", dest="data.pin_memory", action="store_false", help="Don't pin data to GPU memory when transferring", ) parser.add_argument( "--drop-last", dest="data.drop_last", action="store_true", help="Drop last incomplete batch", ) parser.add_argument( "--no-shuffle-eval", dest="data.shuffle_eval", action="store_false", help="Don't shuffle val & test sets", ) return parser class PLDataModuleFromCorpus(PLDataModuleFromDatasets): accepted_tokenizers: List[str] = ["tokenized", "spacy"] + list( ALL_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() ) def __init__( self, train: List, train_labels: Optional[List] = None, val: Optional[List] = None, val_labels: Optional[List] = None, test: Optional[List] = None, test_labels: Optional[List] = None, val_percent: float = 0.2, test_percent: float = 0.2, batch_size: int = 64, batch_size_eval: int = None, seed: int = None, num_workers: int = 1, pin_memory: bool = True, drop_last: bool = False, shuffle_eval: bool = False, sampler_train: Sampler = None, sampler_val: Sampler = None, sampler_test: Sampler = None, batch_sampler_train: BatchSampler = None, batch_sampler_val: BatchSampler = None, batch_sampler_test: BatchSampler = None, collate_fn: Optional[Callable[..., Any]] = None, language_model: bool = False, tokenizer: str = "spacy", no_test_set: bool = False, **corpus_args, ): self.language_model = language_model self.tokenizer = tokenizer self.corpus_args = corpus_args train_data, val_data, test_data = self._zip_corpus_and_labels( train, val, test, train_labels, val_labels, test_labels ) self.no_test_set = no_test_set super(PLDataModuleFromCorpus, self).__init__( train_data, val=val_data, test=test_data, val_percent=val_percent, test_percent=test_percent, batch_size=batch_size, batch_size_eval=batch_size_eval, seed=seed, num_workers=num_workers, pin_memory=pin_memory, drop_last=drop_last, shuffle_eval=shuffle_eval, sampler_train=sampler_train, sampler_val=sampler_val, sampler_test=sampler_test, batch_sampler_train=batch_sampler_train, batch_sampler_val=batch_sampler_val, batch_sampler_test=batch_sampler_test, collate_fn=collate_fn, no_test_set=no_test_set, ) def setup(self, stage=None): if self.setup_has_run: return super(PLDataModuleFromCorpus, self).setup(stage=stage) train_corpus, train_labels = zip(*self.train) val_corpus, val_labels = zip(*self.val) if not self.no_test_set: test_corpus, test_labels = zip(*self.test) self.train_corpus, self.val_corpus, self.test_corpus = self._create_corpora( train_corpus, val_corpus, test_corpus, self.corpus_args ) to_tensor = ToTensor(device="cpu") if self.language_model: self.train = CorpusLMDataset(self.train_corpus).map(to_tensor) self.val = CorpusLMDataset(self.val_corpus).map(to_tensor) if not self.no_test_set: self.test = CorpusLMDataset(self.test_corpus).map(to_tensor) else: self.train = CorpusDataset(self.train_corpus, train_labels).map(to_tensor) self.val = CorpusDataset(self.val_corpus, val_labels).map(to_tensor) if not self.no_test_set: self.test = CorpusDataset(self.test_corpus, test_labels).map(to_tensor) def _zip_corpus_and_labels( self, train, val, test, train_labels, val_labels, test_labels ): if not self.language_model and train_labels is None: raise ValueError( "You should provide train labels if not performing language modeling" ) if self.language_model: train_labels = train if val is not None: val_labels = val if test is not None: test_labels = test train_data = ( list(zip(train, train_labels)) if train_labels is not None else train ) val_data = None if val is not None: val_data = list(zip(val, val_labels)) if val_labels is not None else val test_data = None if test is not None: test_data = ( list(zip(test, test_labels)) if test_labels is not None else test ) return train_data, val_data, test_data def _select_corpus_cls(self, corpus_args): if self.tokenizer not in self.accepted_tokenizers: raise ValueError( f"tokenizer kwarg in {self.__class__.__name__} should be one of {self.accepted_tokenizers}" ) if self.tokenizer not in self.accepted_tokenizers: raise ValueError( f"tokenizer kwarg in {self.__class__.__name__} should be one of {self.accepted_tokenizers}" ) if self.tokenizer == "spacy": logger.info('Selecting WordCorpus because tokenizer="spacy" was provided') corpus_cls = WordCorpus elif self.tokenizer == "tokenized": logger.info( 'Selecting TokenizedCorpus because tokenizer="tokenized" was provided' ) corpus_cls = TokenizedCorpus else: logger.info( "Selecting HfCorpus because a huggingface tokenizer was provided" ) corpus_cls = HfCorpus corpus_args["tokenizer_model"] = self.tokenizer return corpus_cls, corpus_args def _force_train_vocab_on_val_and_test(self, corpus_args, train_corpus): if self.tokenizer in {"spacy", "tokenized"}: corpus_args["word2idx"] = train_corpus.word2idx if self.tokenizer == "spacy": corpus_args["embeddings"] = train_corpus.embeddings corpus_args["idx2word"] = train_corpus.word2idx logger.info( "Forcing vocabulary from training set for validation and test sets." ) return corpus_args def _create_corpora(self, train_corpus, val_corpus, test_corpus, corpus_args): corpus_cls, corpus_args = self._select_corpus_cls(corpus_args) train_corpus = corpus_cls(train_corpus, **corpus_args) corpus_args = self._force_train_vocab_on_val_and_test(corpus_args, train_corpus) val_corpus = corpus_cls(val_corpus, **corpus_args) if not self.no_test_set: test_corpus = corpus_cls(test_corpus, **corpus_args) else: test_corpus = None return train_corpus, val_corpus, test_corpus @property def embeddings(self) -> Optional[np.ndarray]: emb: Optional[np.ndarray] = self.train_corpus.embeddings return emb @property def vocab_size(self) -> int: vsz: int = self.train_corpus.vocab_size return vsz @classmethod
MIT License
mozilla/betafarm
vendor-local/lib/python/fudge/__init__.py
Fake.is_a_stub
python
def is_a_stub(self): self._is_a_stub = True return self
Turns this fake into a stub. When a stub, any method is allowed to be called on the Fake() instance and any attribute can be accessed. When an unknown attribute or call is made, a new Fake() is returned. You can of course override any of this with :meth:`Fake.expects` and the other methods.
https://github.com/mozilla/betafarm/blob/9b0669be88933257c7e6463a913928829dff1166/vendor-local/lib/python/fudge/__init__.py#L798-L807
__version__ = '1.0.3' import os import re import sys import thread import warnings from fudge.exc import FakeDeclarationError from fudge.patcher import * from fudge.util import wraps, fmt_val, fmt_dict_vals __all__ = ['Fake', 'patch', 'test', 'clear_calls', 'verify', 'clear_expectations'] class Registry(object): def __init__(self): self.expected_calls = {} self.expected_call_order = {} self.call_stacks = [] def __contains__(self, obj): return obj in self.get_expected_calls() def clear_actual_calls(self): for exp in self.get_expected_calls(): exp.was_called = False def clear_all(self): self.clear_actual_calls() self.clear_expectations() def clear_calls(self): self.clear_actual_calls() for stack in self.call_stacks: stack.reset() for fake, call_order in self.get_expected_call_order().items(): call_order.reset_calls() def clear_expectations(self): c = self.get_expected_calls() c[:] = [] d = self.get_expected_call_order() d.clear() def expect_call(self, expected_call): c = self.get_expected_calls() c.append(expected_call) call_order = self.get_expected_call_order() if expected_call.fake in call_order: this_call_order = call_order[expected_call.fake] this_call_order.add_expected_call(expected_call) def get_expected_calls(self): self.expected_calls.setdefault(thread.get_ident(), []) return self.expected_calls[thread.get_ident()] def get_expected_call_order(self): self.expected_call_order.setdefault(thread.get_ident(), {}) return self.expected_call_order[thread.get_ident()] def remember_expected_call_order(self, expected_call_order): ordered_fakes = self.get_expected_call_order() fake = expected_call_order.fake ordered_fakes.setdefault(fake, expected_call_order) def register_call_stack(self, call_stack): self.call_stacks.append(call_stack) def verify(self): try: for exp in self.get_expected_calls(): exp.assert_called() exp.assert_times_called() for fake, call_order in self.get_expected_call_order().items(): call_order.assert_order_met(finalize=True) finally: self.clear_calls() registry = Registry() def clear_calls(): registry.clear_calls() def verify(): registry.verify() def start(): warnings.warn( "fudge.start() has been deprecated. Use fudge.clear_calls() instead", DeprecationWarning, 3) clear_calls() def stop(): warnings.warn( "fudge.stop() has been deprecated. Use fudge.verify() instead", DeprecationWarning, 3) verify() def clear_expectations(): registry.clear_expectations() def with_fakes(method): @wraps(method) def apply_clear_and_verify(*args, **kw): clear_calls() method(*args, **kw) verify() return apply_clear_and_verify def test(method): @wraps(method) def clear_and_verify(*args, **kw): clear_expectations() clear_calls() try: v = method(*args, **kw) verify() finally: clear_expectations() return v return clear_and_verify test.__test__ = False class Call(object): def __init__(self, fake, call_name=None, index=None, callable=False, call_order=None): self.fake = fake self.call_name = call_name self.call_replacement = None self.expected_arg_count = None self.expected_kwarg_count = None self.expected_args = None self.expected_kwargs = None self.expected_matching_args = None self.expected_matching_kwargs = None self.index = index self.exception_to_raise = None self.return_val = None self.was_called = False self.expected_times_called = None self.actual_times_called = 0 self.callable = callable self.call_order = call_order def __call__(self, *args, **kwargs): self.was_called = True self.actual_times_called += 1 if self.call_order: self.call_order.add_actual_call(self) self.call_order.assert_order_met(finalize=False) if self.expected_times_called is not None and self.actual_times_called > self.expected_times_called: raise AssertionError( '%s was called %s time(s). Expected %s.' % ( self, self.actual_times_called, self.expected_times_called)) return_val = None replacement_return = None if self.call_replacement: replacement_return = self.call_replacement(*args, **kwargs) if self.return_val is not None: return_value = self.return_val else: return_value = replacement_return with_args = (self.expected_args or self.expected_kwargs) if with_args: if self.expected_kwargs is None: self.expected_kwargs = {} if self.expected_kwargs != kwargs: raise AssertionError( "%s was called unexpectedly with args %s" % ( self, self._repr_call(args, kwargs, shorten_long_vals=False))) if self.expected_args is None: self.expected_args = tuple([]) if self.expected_args != args: raise AssertionError( "%s was called unexpectedly with args %s" % ( self, self._repr_call(args, kwargs, shorten_long_vals=False))) if self.expected_matching_kwargs: for expected_arg, expected_value in self.expected_matching_kwargs.items(): if expected_arg in kwargs: if expected_value != kwargs[expected_arg]: raise AssertionError( "%s was called unexpectedly with args %s" % ( self, self._repr_call(args, {expected_arg: kwargs[expected_arg]}, shorten_long_vals=False)) ) if self.expected_matching_args: if self.expected_matching_args != args: raise AssertionError( "%s was called unexpectedly with args %s" % ( self, self._repr_call(args, kwargs, shorten_long_vals=False))) with_arg_counts = (self.expected_arg_count is not None or self.expected_kwarg_count is not None) if with_arg_counts: if self.expected_arg_count is None: self.expected_arg_count = 0 if len(args) != self.expected_arg_count: raise AssertionError( "%s was called with %s arg(s) but expected %s" % ( self, len(args), self.expected_arg_count)) if self.expected_kwarg_count is None: self.expected_kwarg_count = 0 if len(kwargs.keys()) != self.expected_kwarg_count: raise AssertionError( "%s was called with %s keyword arg(s) but expected %s" % ( self, len(kwargs.keys()), self.expected_kwarg_count)) if self.exception_to_raise is not None: raise self.exception_to_raise return return_value def _repr_call(self, expected_args, expected_kwargs, shorten_long_vals=True): args = [] if expected_args: args.extend([fmt_val(a, shorten=shorten_long_vals) for a in expected_args]) if expected_kwargs: args.extend(fmt_dict_vals(expected_kwargs, shorten=shorten_long_vals)) if args: call = "(%s)" % ", ".join(args) else: call = "()" return call def __repr__(self): cls_name = repr(self.fake) if self.call_name and not self.callable: call = "%s.%s" % (cls_name, self.call_name) else: call = "%s" % cls_name call = "%s%s" % (call, self._repr_call(self.expected_args, self.expected_kwargs)) if self.index is not None: call = "%s[%s]" % (call, self.index) return call def get_call_object(self): return self def assert_times_called(self): if self.expected_times_called is not None and self.actual_times_called != self.expected_times_called: raise AssertionError( '%s was called %s time(s). Expected %s.' % ( self, self.actual_times_called, self.expected_times_called)) class ExpectedCall(Call): def __init__(self, *args, **kw): super(ExpectedCall, self).__init__(*args, **kw) registry.expect_call(self) def assert_called(self): if not self.was_called: raise AssertionError("%s was not called" % (self)) class ExpectedCallOrder(object): def __init__(self, fake): self.fake = fake self._call_order = [] self._actual_calls = [] def __repr__(self): return "%r(%r)" % (self.fake, self._call_order) __str__ = __repr__ def _repr_call_list(self, call_list): if not len(call_list): return "no calls" else: stack = ["#%s %r" % (i+1,c) for i,c in enumerate(call_list)] stack.append("end") return ", ".join(stack) def add_expected_call(self, call): self._call_order.append(call) def add_actual_call(self, call): self._actual_calls.append(call) def assert_order_met(self, finalize=False): error = None actual_call_len = len(self._actual_calls) expected_call_len = len(self._call_order) if actual_call_len == 0: error = "Not enough calls were made" else: for i,call in enumerate(self._call_order): if actual_call_len < i+1: if not finalize: continue calls_made = len(self._actual_calls) if calls_made == 1: error = "Only 1 call was made" else: error = "Only %s calls were made" % calls_made break ac_call = self._actual_calls[i] if ac_call is not call: error = "Call #%s was %r" % (i+1, ac_call) break if not error: if actual_call_len > expected_call_len: error = "#%s %s was unexpected" % ( expected_call_len+1, self._actual_calls[expected_call_len] ) if error: msg = "%s; Expected: %s" % ( error, self._repr_call_list(self._call_order)) raise AssertionError(msg) def reset_calls(self): self._actual_calls[:] = [] class CallStack(object): def __init__(self, fake, initial_calls=None, expected=False, call_name=None): self.fake = fake self._pointer = 0 self._calls = [] if initial_calls is not None: for c in initial_calls: self.add_call(c) self.expected = expected self.call_name = call_name registry.register_call_stack(self) def __iter__(self): for c in self._calls: yield c def __repr__(self): return "<%s for %r>" % (self.__class__.__name__, self._calls) __str__ = __repr__ def add_call(self, call): self._calls.append(call) call.index = len(self._calls)-1 def get_call_object(self): return self._calls[len(self._calls)-1] def reset(self): self._pointer = 0 def __call__(self, *args, **kw): try: current_call = self._calls[self._pointer] except IndexError: raise AssertionError( "This attribute of %s can only be called %s time(s). " "Call reset() if necessary or fudge.clear_calls()." % ( self.fake, len(self._calls))) self._pointer += 1 return current_call(*args, **kw) class Fake(object): def __init__(self, name=None, allows_any_call=False, callable=False, expect_call=False): self._attributes = {} self._declared_calls = {} self._name = (name or self._guess_name()) self._last_declared_call_name = None self._is_a_stub = False if allows_any_call: warnings.warn('Fake(allows_any_call=True) is deprecated;' ' use Fake.is_a_stub()') self.is_a_stub() self._call_stack = None if expect_call: self.expects_call() elif callable or allows_any_call: self.is_callable() else: self._callable = None self._expected_call_order = None def __getattribute__(self, name): def g(n): return object.__getattribute__(self, n) if name in g('_declared_calls'): return g('_declared_calls')[name] elif name in g('_attributes'): return g('_attributes')[name] else: try: self_call = g(name) except AttributeError: pass else: return self_call if g('_is_a_stub'): stub = Fake(name=self._endpoint_name(name)).is_a_stub() self.has_attr(**{name: stub}) return getattr(self, name) raise AttributeError( "%s object does not allow call or attribute '%s' " "(maybe you want %s.is_a_stub() ?)" % ( self, name, self.__class__.__name__)) def __call__(self, *args, **kwargs): if '__init__' in self._declared_calls: call = self._declared_calls['__init__'] result = call(*args, **kwargs) if result is None: return self else: return result elif self._callable: return self._callable(*args, **kwargs) elif self._is_a_stub: self.is_callable().returns_fake().is_a_stub() return self.__call__(*args, **kwargs) else: raise RuntimeError( "%s object cannot be called (maybe you want " "%s.is_callable() ?)" % (self, self.__class__.__name__)) def __setattr__(self, name, val): if hasattr(self, '_attributes') and name in self._attributes: self._attributes[name] = val else: object.__setattr__(self, name, val) def __repr__(self): return "fake:%s" % (self._name or "unnamed") def _declare_call(self, call_name, call): self._declared_calls[call_name] = call _assignment = re.compile(r"\s*(?P<name>[a-zA-Z0-9_]+)\s*=\s*(fudge\.)?Fake\(.*") def _guess_asn_from_file(self, frame): if frame.f_code.co_filename: if os.path.exists(frame.f_code.co_filename): cofile = open(frame.f_code.co_filename,'r') try: for ln, line in enumerate(cofile): if ln==frame.f_lineno-1: possible_asn = line m = self._assignment.match(possible_asn) if m: return m.group('name') finally: cofile.close() def _guess_name(self): if not hasattr(sys, '_getframe'): return None if sys.platform.startswith('java'): return None frame = sys._getframe(2) if len(frame.f_code.co_varnames): co_names = frame.f_code.co_varnames else: co_names = frame.f_code.co_names candidates = [n for n in co_names if n not in frame.f_locals] if len(candidates)==0: return self._guess_asn_from_file(frame) elif len(candidates)==1: return candidates[0] else: return self._guess_asn_from_file(frame) def _get_current_call(self): if not self._last_declared_call_name: if not self._callable: raise FakeDeclarationError( "Call to a method that expects a predefined call but no such call exists. " "Maybe you forgot expects('method') or provides('method') ?") return self._callable.get_call_object() exp = self._declared_calls[self._last_declared_call_name].get_call_object() return exp def _endpoint_name(self, endpoint): p = [self._name or 'unnamed'] if endpoint != self._name: p.append(str(endpoint)) return '.'.join(p) def expects_call(self): self._callable = ExpectedCall(self, call_name=self._name, callable=True) return self def is_callable(self): self._callable = Call(self, call_name=self._name, callable=True) return self
BSD 3-Clause New or Revised License
aliyun/aliyun-log-python-sdk
aliyun/log/getlogsresponse.py
GetLogsResponse.get_has_sql
python
def get_has_sql(self): return self.has_sql
Get whether has sql from the response :return: has_sql, boolean
https://github.com/aliyun/aliyun-log-python-sdk/blob/49b7b92798729d962268252dbbae9d7c098e60f8/aliyun/log/getlogsresponse.py#L89-L94
from .logresponse import LogResponse from .queriedlog import QueriedLog from .logexception import LogException import six from .util import Util class GetLogsResponse(LogResponse): def __init__(self, resp, header): LogResponse.__init__(self, header, resp) try: self.progress = Util.h_v_t(header, 'x-log-progress') self.processed_rows = Util.h_v_td(header, 'x-log-processed-rows', '0') self.elapsed_mills = Util.h_v_td(header, 'x-log-elapsed-millisecond', '0') self.has_sql = Util.h_v_td(header, 'x-log-has-sql', 'False') self.where_query = Util.h_v_td(header, 'x-log-where-query', '') self.agg_query = Util.h_v_td(header, 'x-log-agg-query', '') self.cpu_sec = Util.h_v_td(header, 'x-log-cpu-sec', '0') self.cpu_cores = Util.h_v_td(header, 'x-log-cpu-cores', '0') self.logs = [] for data in resp: contents = {} source = "" if "__source__" in data: source = data['__source__'] for key in six.iterkeys(data): if key != '__time__' and key != '__source__': contents[key] = data[key] self.logs.append(QueriedLog(data['__time__'], source, contents)) except Exception as ex: raise LogException("InvalidResponse", "Failed to parse GetLogResponse, \nheader: " + str(header) + " \nBody:" + str(resp) + " \nOther: " + str(ex), resp_header=header, resp_body=resp) def get_count(self): return len(self.logs) def is_completed(self): return self.progress == 'Complete' def get_logs(self): return self.logs def get_processed_rows(self): return self.processed_rows def get_elapsed_mills(self): return self.elapsed_mills
MIT License
pfnet-research/chainer-graph-cnn
lib/coarsening.py
metis
python
def metis(W, levels): N, N = W.shape degree = W.sum(axis=0) rid = np.random.permutation(six.moves.range(N)) parents = [] pooling_inds = [] graphs = [] graphs.append(W) for _ in six.moves.range(levels): weights = degree weights = np.array(weights).squeeze() idx_row, idx_col, val = scipy.sparse.find(W) perm = np.argsort(idx_row) rr = idx_row[perm] cc = idx_col[perm] vv = val[perm] cluster_id, pooling_ind = metis_one_level( rr, cc, vv, rid, weights) parents.append(cluster_id) pooling_inds.append(pooling_ind) nrr = cluster_id[rr] ncc = cluster_id[cc] nvv = vv Nnew = cluster_id.max() + 1 W = scipy.sparse.csr_matrix((nvv, (nrr, ncc)), shape=(Nnew, Nnew)) W.eliminate_zeros() graphs.append(W) N, N = W.shape degree = W.sum(axis=0) ss = np.array(W.sum(axis=0)).squeeze() rid = np.argsort(ss) return graphs, parents, np.array(pooling_inds)
Coarsen a graph multiple times using the METIS algorithm. INPUT W: symmetric sparse weight (adjacency) matrix levels: the number of coarsened graphs OUTPUT graph[0]: original graph of size N_1 graph[2]: coarser graph of size N_2 < N_1 graph[levels]: coarsest graph of Size N_levels < ... < N_2 < N_1 parents[i] is a vector of size N_i with entries ranging from 1 to N_{i+1} which indicate the parents in the coarser graph[i+1] NOTE if "graph" is a list of length k, then parents will be a list of length k-1
https://github.com/pfnet-research/chainer-graph-cnn/blob/d7dc17d1a6e86624366a6f8ae993d8cbb2d1cc90/lib/coarsening.py#L22-L89
import numpy as np import scipy.sparse import six def coarsen(A, levels, self_connections=False): graphs, parents, pooling_inds, = metis(A, levels) return graphs, pooling_inds
MIT License
rapid7/vm-console-client-python
rapid7vmconsole/models/scan_template_web_spider_paths.py
ScanTemplateWebSpiderPaths.boostrap
python
def boostrap(self): return self._boostrap
Gets the boostrap of this ScanTemplateWebSpiderPaths. # noqa: E501 Paths to bootstrap spidering with. # noqa: E501 :return: The boostrap of this ScanTemplateWebSpiderPaths. # noqa: E501 :rtype: str
https://github.com/rapid7/vm-console-client-python/blob/55e1f573967bce27cc9a2d10c12a949b1142c2b3/rapid7vmconsole/models/scan_template_web_spider_paths.py#L59-L67
import pprint import re import six class ScanTemplateWebSpiderPaths(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'boostrap': 'str', 'excluded': 'str', 'honor_robot_directives': 'bool' } attribute_map = { 'boostrap': 'boostrap', 'excluded': 'excluded', 'honor_robot_directives': 'honorRobotDirectives' } def __init__(self, boostrap=None, excluded=None, honor_robot_directives=None): self._boostrap = None self._excluded = None self._honor_robot_directives = None self.discriminator = None if boostrap is not None: self.boostrap = boostrap if excluded is not None: self.excluded = excluded if honor_robot_directives is not None: self.honor_robot_directives = honor_robot_directives @property
MIT License
tilezen/tilequeue
tilequeue/query/rawr.py
OsmRawrLookup.relations_using_rel
python
def relations_using_rel(self, rel_id): return self._relations_using_rel.get(rel_id, [])
Returns a list of relation IDs which contain the relation with that ID.
https://github.com/tilezen/tilequeue/blob/911e618a3162877aea1c13fb0e2632c264c8e724/tilequeue/query/rawr.py#L189-L195
from collections import namedtuple, defaultdict from shapely.geometry import box from shapely.geometry import MultiLineString from shapely.geometry import MultiPolygon from shapely.geometry.polygon import orient from shapely.wkb import loads as wkb_loads from tilequeue.query.common import layer_properties from tilequeue.query.common import is_station_or_stop from tilequeue.query.common import is_station_or_line from tilequeue.query.common import deassoc from tilequeue.query.common import mz_is_interesting_transit_relation from tilequeue.query.common import shape_type_lookup from tilequeue.query.common import name_keys from tilequeue.query.common import wkb_shape_type from tilequeue.query.common import ShapeType from tilequeue.transform import calculate_padded_bounds from tilequeue.utils import CoordsByParent from raw_tiles.tile import shape_tile_coverage from math import floor class Relation(object): def __init__(self, rel_id, way_off, rel_off, parts, members, tags): self.id = rel_id self.tags = deassoc(tags) self.node_ids = parts[0:way_off] self.way_ids = parts[way_off:rel_off] self.rel_ids = parts[rel_off:] class TilePyramid(namedtuple('TilePyramid', 'z x y max_z')): def tile(self): from raw_tiles.tile import Tile return Tile(self.z, self.x, self.y) def bounds(self): from ModestMaps.Core import Coordinate from tilequeue.tile import coord_to_mercator_bounds coord = Coordinate(zoom=self.z, column=self.x, row=self.y) bounds = coord_to_mercator_bounds(coord) return bounds def bbox(self): return box(*self.bounds()) def _match_type(values, types): if len(values) != len(types): return False for val, typ in zip(values, types): if not isinstance(val, typ): return False return True def _is_gate(props): return props.get('barrier') == 'gate' def _is_routeable(props): return props.get('whitewater') == 'portage_way' or 'highway' in props class OsmRawrLookup(object): def __init__(self): self.nodes = {} self.ways = {} self.relations = {} self._ways_using_node = defaultdict(list) self._relations_using_node = defaultdict(list) self._relations_using_way = defaultdict(list) self._relations_using_rel = defaultdict(list) def add_row(self, *args): num = (int, long) if _match_type(args, (num, (str, bytes), dict)): self.add_feature(*args) elif _match_type(args, (num, list, list)): self.add_way(*args) elif _match_type(args, (num, num, num, list, list, list)): self.add_relation(*args) else: raise Exception("Unknown row shape for OsmRawrLookup.add_row: %s" % (repr(map(type, args)),)) def add_feature(self, fid, shape_wkb, props): if fid < 0: return shape_type = wkb_shape_type(shape_wkb) if is_station_or_stop(fid, None, props) and shape_type == ShapeType.point: self.nodes[fid] = (fid, shape_wkb, props) elif _is_gate(props) and shape_type == ShapeType.point: self.nodes[fid] = (fid, shape_wkb, props) elif (is_station_or_line(fid, None, props) and shape_type != ShapeType.point): self.ways[fid] = (fid, shape_wkb, props) elif _is_routeable(props) and shape_type == ShapeType.line: self.ways[fid] = (fid, shape_wkb, props) def add_way(self, way_id, nodes, tags): for node_id in nodes: if node_id in self.nodes: if way_id in self.ways: self._ways_using_node[node_id].append(way_id) def add_relation(self, rel_id, way_off, rel_off, parts, members, tags): r = Relation(rel_id, way_off, rel_off, parts, members, tags) is_transit_relation = mz_is_interesting_transit_relation(r.tags) is_route = 'route' in r.tags and ('network' in r.tags or 'ref' in r.tags) if is_route or is_transit_relation: self.relations[r.id] = r for node_id in r.node_ids: if node_id in self.nodes: self._relations_using_node[node_id].append(rel_id) for way_id in r.way_ids: if way_id in self.ways: self._relations_using_way[way_id].append(rel_id) for member_rel_id in r.rel_ids: self._relations_using_rel[member_rel_id].append(rel_id) def relations_using_node(self, node_id): return self._relations_using_node.get(node_id, []) def relations_using_way(self, way_id): return self._relations_using_way.get(way_id, [])
MIT License
qxf2/makemework
page_objects/Main_Page.py
Main_Page.start
python
def start(self): url = '' self.open(url)
Use this method to go to specific URL -- if needed
https://github.com/qxf2/makemework/blob/57bc8679ef7748f2e18f51c55ed6410892aee9b8/page_objects/Main_Page.py#L16-L19
from .Base_Page import Base_Page from utils.Wrapit import Wrapit import conf.locators_conf as locators class Main_Page(Base_Page): TEMPERATURE_FIELD = locators.TEMPERATURE_FIELD BUY_BUTTON = locators.BUY_BUTTON PAGE_HEADING = locators.PAGE_HEADING
MIT License
googleapis/python-iot
google/cloud/iot_v1/services/device_manager/client.py
DeviceManagerClient.parse_common_location_path
python
def parse_common_location_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path) return m.groupdict() if m else {}
Parse a location path into its component segments.
https://github.com/googleapis/python-iot/blob/87df16600d419be91ae91dc9600e9d31c3b267f0/google/cloud/iot_v1/services/device_manager/client.py#L250-L253
from collections import OrderedDict from distutils import util import os import re from typing import Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.transport import mtls from google.auth.transport.grpc import SslCredentials from google.auth.exceptions import MutualTLSChannelError from google.oauth2 import service_account from google.cloud.iot_v1.services.device_manager import pagers from google.cloud.iot_v1.types import device_manager from google.cloud.iot_v1.types import resources from google.iam.v1 import iam_policy_pb2 from google.iam.v1 import policy_pb2 from google.protobuf import field_mask_pb2 from google.protobuf import timestamp_pb2 from google.rpc import status_pb2 from .transports.base import DeviceManagerTransport, DEFAULT_CLIENT_INFO from .transports.grpc import DeviceManagerGrpcTransport from .transports.grpc_asyncio import DeviceManagerGrpcAsyncIOTransport class DeviceManagerClientMeta(type): _transport_registry = OrderedDict() _transport_registry["grpc"] = DeviceManagerGrpcTransport _transport_registry["grpc_asyncio"] = DeviceManagerGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[DeviceManagerTransport]: if label: return cls._transport_registry[label] return next(iter(cls._transport_registry.values())) class DeviceManagerClient(metaclass=DeviceManagerClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "cloudiot.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> DeviceManagerTransport: return self._transport @staticmethod def device_path(project: str, location: str, registry: str, device: str,) -> str: return "projects/{project}/locations/{location}/registries/{registry}/devices/{device}".format( project=project, location=location, registry=registry, device=device, ) @staticmethod def parse_device_path(path: str) -> Dict[str, str]: m = re.match( r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/registries/(?P<registry>.+?)/devices/(?P<device>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def registry_path(project: str, location: str, registry: str,) -> str: return "projects/{project}/locations/{location}/registries/{registry}".format( project=project, location=location, registry=registry, ) @staticmethod def parse_registry_path(path: str) -> Dict[str, str]: m = re.match( r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/registries/(?P<registry>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str,) -> str: return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str, str]: m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str,) -> str: return "folders/{folder}".format(folder=folder,) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str,) -> str: return "organizations/{organization}".format(organization=organization,) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str,) -> str: return "projects/{project}".format(project=project,) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str,) -> str: return "projects/{project}/locations/{location}".format( project=project, location=location, ) @staticmethod
Apache License 2.0
aparo/pyes
pyes/aggs.py
AggFactory.reset
python
def reset(self): self.aggs = []
Reset the aggs
https://github.com/aparo/pyes/blob/96965174760cb5aa5c92eac7ccff346fb5d53cf1/pyes/aggs.py#L14-L16
from .utils import EqualityComparableUsingAttributeDictionary from .filters import Filter, TermFilter, TermsFilter, ANDFilter, NotFilter class AggFactory(EqualityComparableUsingAttributeDictionary): def __init__(self): self.aggs = [] def add(self, agg): self.aggs.append(agg)
BSD 3-Clause New or Revised License
turner-townsend/flask-pydantic-spec
flask_pydantic_spec/utils.py
parse_request
python
def parse_request(func: Callable) -> Mapping[str, Any]: if hasattr(func, "body"): request_body = getattr(func, "body", None) if isinstance(request_body, RequestBase): result: Mapping[str, Any] = request_body.generate_spec() elif issubclass(request_body, BaseModel): result = Request(request_body).generate_spec() else: result = {} return result return {}
Generate spec from body parameter on the view function validation decorator
https://github.com/turner-townsend/flask-pydantic-spec/blob/3eb572219ab3ea4e10b30c2c1e5ef96efcecf335/flask_pydantic_spec/utils.py#L31-L44
import inspect import logging from typing import Callable, Mapping, Any, Tuple, Optional, List, Dict from werkzeug.datastructures import MultiDict from flask import Request as FlaskRequest from pydantic import BaseModel from .types import Response, RequestBase, Request logger = logging.getLogger(__name__) def parse_comments(func: Callable) -> Tuple[Optional[str], Optional[str]]: doc = inspect.getdoc(func) if doc is None: return None, None docs = doc.split("\n", 1) if len(docs) == 1: return docs[0], None return docs[0], docs[1].strip()
Apache License 2.0
wavefronthq/python-client
wavefront_api_client/models/stats_model_internal_use.py
StatsModelInternalUse.keys
python
def keys(self): return self._keys
Gets the keys of this StatsModelInternalUse. # noqa: E501 :return: The keys of this StatsModelInternalUse. # noqa: E501 :rtype: int
https://github.com/wavefronthq/python-client/blob/e410ce0dd8a2334e995456f4f3d44e0f04664a3a/wavefront_api_client/models/stats_model_internal_use.py#L314-L321
import pprint import re import six from wavefront_api_client.configuration import Configuration class StatsModelInternalUse(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'buffer_keys': 'int', 'cached_compacted_keys': 'int', 'compacted_keys': 'int', 'compacted_points': 'int', 'cpu_ns': 'int', 'distributions': 'int', 'edges': 'int', 'hosts_used': 'int', 'keys': 'int', 'latency': 'int', 'metrics': 'int', 'metrics_used': 'int', 'points': 'int', 'queries': 'int', 'query_tasks': 'int', 's3_keys': 'int', 'skipped_compacted_keys': 'int', 'spans': 'int', 'summaries': 'int' } attribute_map = { 'buffer_keys': 'buffer_keys', 'cached_compacted_keys': 'cached_compacted_keys', 'compacted_keys': 'compacted_keys', 'compacted_points': 'compacted_points', 'cpu_ns': 'cpu_ns', 'distributions': 'distributions', 'edges': 'edges', 'hosts_used': 'hosts_used', 'keys': 'keys', 'latency': 'latency', 'metrics': 'metrics', 'metrics_used': 'metrics_used', 'points': 'points', 'queries': 'queries', 'query_tasks': 'query_tasks', 's3_keys': 's3_keys', 'skipped_compacted_keys': 'skipped_compacted_keys', 'spans': 'spans', 'summaries': 'summaries' } def __init__(self, buffer_keys=None, cached_compacted_keys=None, compacted_keys=None, compacted_points=None, cpu_ns=None, distributions=None, edges=None, hosts_used=None, keys=None, latency=None, metrics=None, metrics_used=None, points=None, queries=None, query_tasks=None, s3_keys=None, skipped_compacted_keys=None, spans=None, summaries=None, _configuration=None): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._buffer_keys = None self._cached_compacted_keys = None self._compacted_keys = None self._compacted_points = None self._cpu_ns = None self._distributions = None self._edges = None self._hosts_used = None self._keys = None self._latency = None self._metrics = None self._metrics_used = None self._points = None self._queries = None self._query_tasks = None self._s3_keys = None self._skipped_compacted_keys = None self._spans = None self._summaries = None self.discriminator = None if buffer_keys is not None: self.buffer_keys = buffer_keys if cached_compacted_keys is not None: self.cached_compacted_keys = cached_compacted_keys if compacted_keys is not None: self.compacted_keys = compacted_keys if compacted_points is not None: self.compacted_points = compacted_points if cpu_ns is not None: self.cpu_ns = cpu_ns if distributions is not None: self.distributions = distributions if edges is not None: self.edges = edges if hosts_used is not None: self.hosts_used = hosts_used if keys is not None: self.keys = keys if latency is not None: self.latency = latency if metrics is not None: self.metrics = metrics if metrics_used is not None: self.metrics_used = metrics_used if points is not None: self.points = points if queries is not None: self.queries = queries if query_tasks is not None: self.query_tasks = query_tasks if s3_keys is not None: self.s3_keys = s3_keys if skipped_compacted_keys is not None: self.skipped_compacted_keys = skipped_compacted_keys if spans is not None: self.spans = spans if summaries is not None: self.summaries = summaries @property def buffer_keys(self): return self._buffer_keys @buffer_keys.setter def buffer_keys(self, buffer_keys): self._buffer_keys = buffer_keys @property def cached_compacted_keys(self): return self._cached_compacted_keys @cached_compacted_keys.setter def cached_compacted_keys(self, cached_compacted_keys): self._cached_compacted_keys = cached_compacted_keys @property def compacted_keys(self): return self._compacted_keys @compacted_keys.setter def compacted_keys(self, compacted_keys): self._compacted_keys = compacted_keys @property def compacted_points(self): return self._compacted_points @compacted_points.setter def compacted_points(self, compacted_points): self._compacted_points = compacted_points @property def cpu_ns(self): return self._cpu_ns @cpu_ns.setter def cpu_ns(self, cpu_ns): self._cpu_ns = cpu_ns @property def distributions(self): return self._distributions @distributions.setter def distributions(self, distributions): self._distributions = distributions @property def edges(self): return self._edges @edges.setter def edges(self, edges): self._edges = edges @property def hosts_used(self): return self._hosts_used @hosts_used.setter def hosts_used(self, hosts_used): self._hosts_used = hosts_used @property
Apache License 2.0
rebiocoder/bioforum
venv/Lib/site-packages/django/contrib/auth/base_user.py
BaseUserManager.normalize_email
python
def normalize_email(cls, email): email = email or '' try: email_name, domain_part = email.strip().rsplit('@', 1) except ValueError: pass else: email = email_name + '@' + domain_part.lower() return email
Normalize the email address by lowercasing the domain part of it.
https://github.com/rebiocoder/bioforum/blob/08c8ff2f07ae667d37ce343f537e878d78ac8fe2/venv/Lib/site-packages/django/contrib/auth/base_user.py#L19-L30
import unicodedata from django.contrib.auth import password_validation from django.contrib.auth.hashers import ( check_password, is_password_usable, make_password, ) from django.db import models from django.utils.crypto import get_random_string, salted_hmac from django.utils.translation import gettext_lazy as _ class BaseUserManager(models.Manager): @classmethod
MIT License
bigmlcom/bigmler
bigmler/tests/test_01_predictions.py
setup_module
python
def setup_module(): common_setup_module() test = TestPrediction() test.setup_scenario02() test.setup_scenario06()
Setup for the module
https://github.com/bigmlcom/bigmler/blob/91973ca1e752954302bf26bb22aa6874dc34ce69/bigmler/tests/test_01_predictions.py#L31-L38
from bigmler.tests.world import world, common_setup_module, common_teardown_module, teardown_class, show_doc import bigmler.tests.basic_tst_prediction_steps as test_pred
Apache License 2.0
ibm/spectrum-protect-sppmon
python/influx/database_tables.py
Table.retention_policy
python
def retention_policy(self) -> RetentionPolicy: return self.__retention_policy
retention policy associated with this table
https://github.com/ibm/spectrum-protect-sppmon/blob/189ee19ea63e80dee0068bf87ba263584a49666c/python/influx/database_tables.py#L234-L236
from __future__ import annotations from enum import Enum, unique import re import json from typing import Any, Dict, List, Set, Tuple, Union import influx.influx_queries as Queries from utils.execption_utils import ExceptionUtils from utils.influx_utils import InfluxUtils from utils.spp_utils import SppUtils @unique class Datatype(Enum): NONE = type(None) STRING = str BOOL = bool INT = int FLOAT = float TIMESTAMP = type(int) @staticmethod def get_auto_datatype(value: Any) -> Datatype: for enum in Datatype: if(enum is Datatype.TIMESTAMP): continue if(isinstance(value, enum.value)): return enum ExceptionUtils.error_message(f"No auto type found for {value}") return Datatype.NONE class RetentionPolicy: @property def name(self) -> str: return self.__name @property def database(self) -> Database: return self.__database @property def duration(self) -> str: return self.__duration @property def replication(self) -> int: return self.__replication @property def shard_duration(self) -> str: return self.__shard_duration @property def default(self) -> bool: return self.__default def __init__(self, name: str, database: Database, duration: str, replication: int = 1, shard_duration: str = "0s", default: bool = False) -> None: if(not name): raise ValueError("need retention policy name for creation") if(not database): raise ValueError("need retention policy database for creation") if(not duration): raise ValueError("need retention policy duration for creation") if(not replication): raise ValueError("need retention policy replication factor for creation") if(not shard_duration): raise ValueError("need retention policy shard duration for creation") if(default is None): raise ValueError("need retention policy default setting for creation") self.__name = name self.__database = database self.__replication = replication self.__shard_duration = shard_duration self.__default = default try: self.__duration: str = InfluxUtils.transform_time_literal(duration, single_vals=False) except ValueError as error: ExceptionUtils.exception_info(error) raise ValueError(f"duration for retention policy {name} is not in the correct time format") try: self.__shard_duration: str = InfluxUtils.transform_time_literal(shard_duration, single_vals=False) except ValueError as error: ExceptionUtils.exception_info(error) raise ValueError(f"shard duration for retention policy {name} is not in the correct time format") def to_dict(self) -> Dict[str, Union[str, int, bool]]: return { 'name': self.name, 'duration': self.duration, 'shardGroupDuration': self.__shard_duration, 'replicaN': self.__replication, 'default': self.default } def __str__(self) -> str: return f"{self.database.name}.{self.name}" def __repr__(self) -> str: return f"Retention Policy: {self.name}" def __eq__(self, o: object) -> bool: if(isinstance(o, RetentionPolicy)): return o.to_dict() == self.to_dict() return False def __hash__(self) -> int: return hash(json.dumps(self.to_dict(), sort_keys=True)) class Table: @property def fields(self) -> Dict[str, Datatype]: return self.__fields @property def tags(self) -> List[str]: return self.__tags @property def time_key(self) -> str: return self.__time_key @property def name(self) -> str: return self.__name @property
Apache License 2.0
yutoyazaki/hass-nature-remo
climate.py
NatureRemoAC.swing_modes
python
def swing_modes(self): return self._modes[self._remo_mode]["dir"]
List of available swing modes.
https://github.com/yutoyazaki/hass-nature-remo/blob/3b7771af416ce58df94cfc1dd792d617b73961b0/climate.py#L160-L162
import logging from homeassistant.core import callback from homeassistant.components.climate import ClimateEntity from homeassistant.components.climate.const import ( DEFAULT_MAX_TEMP, DEFAULT_MIN_TEMP, HVAC_MODE_AUTO, HVAC_MODE_COOL, HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY, HVAC_MODE_HEAT, HVAC_MODE_OFF, SUPPORT_FAN_MODE, SUPPORT_SWING_MODE, SUPPORT_TARGET_TEMPERATURE, ) from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS from . import DOMAIN, CONF_COOL_TEMP, CONF_HEAT_TEMP, NatureRemoBase _LOGGER = logging.getLogger(__name__) SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE | SUPPORT_SWING_MODE MODE_HA_TO_REMO = { HVAC_MODE_AUTO: "auto", HVAC_MODE_FAN_ONLY: "blow", HVAC_MODE_COOL: "cool", HVAC_MODE_DRY: "dry", HVAC_MODE_HEAT: "warm", HVAC_MODE_OFF: "power-off", } MODE_REMO_TO_HA = { "auto": HVAC_MODE_AUTO, "blow": HVAC_MODE_FAN_ONLY, "cool": HVAC_MODE_COOL, "dry": HVAC_MODE_DRY, "warm": HVAC_MODE_HEAT, "power-off": HVAC_MODE_OFF, } async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): if discovery_info is None: return _LOGGER.debug("Setting up climate platform.") coordinator = hass.data[DOMAIN]["coordinator"] api = hass.data[DOMAIN]["api"] config = hass.data[DOMAIN]["config"] appliances = coordinator.data["appliances"] async_add_entities( [ NatureRemoAC(coordinator, api, appliance, config) for appliance in appliances.values() if appliance["type"] == "AC" ] ) class NatureRemoAC(NatureRemoBase, ClimateEntity): def __init__(self, coordinator, api, appliance, config): super().__init__(coordinator, appliance) self._api = api self._default_temp = { HVAC_MODE_COOL: config[CONF_COOL_TEMP], HVAC_MODE_HEAT: config[CONF_HEAT_TEMP], } self._modes = appliance["aircon"]["range"]["modes"] self._hvac_mode = None self._current_temperature = None self._target_temperature = None self._remo_mode = None self._fan_mode = None self._swing_mode = None self._last_target_temperature = {v: None for v in MODE_REMO_TO_HA} self._update(appliance["settings"]) @property def supported_features(self): return SUPPORT_FLAGS @property def current_temperature(self): return self._current_temperature @property def temperature_unit(self): return TEMP_CELSIUS @property def min_temp(self): temp_range = self._current_mode_temp_range() if len(temp_range) == 0: return 0 return min(temp_range) @property def max_temp(self): temp_range = self._current_mode_temp_range() if len(temp_range) == 0: return 0 return max(temp_range) @property def target_temperature(self): _LOGGER.debug("Current target temperature: %s", self._target_temperature) return self._target_temperature @property def target_temperature_step(self): temp_range = self._current_mode_temp_range() if len(temp_range) >= 2: step = round(temp_range[1] - temp_range[0], 1) if step in [1.0, 0.5]: return step return 1 @property def hvac_mode(self): return self._hvac_mode @property def hvac_modes(self): remo_modes = list(self._modes.keys()) ha_modes = list(map(lambda mode: MODE_REMO_TO_HA[mode], remo_modes)) ha_modes.append(HVAC_MODE_OFF) return ha_modes @property def fan_mode(self): return self._fan_mode @property def fan_modes(self): return self._modes[self._remo_mode]["vol"] @property def swing_mode(self): return self._swing_mode @property
MIT License
qiskit/qiskit-aqua
qiskit/aqua/algorithms/classifiers/vqc.py
VQC._gradient_function_wrapper
python
def _gradient_function_wrapper(self, theta): epsilon = 1e-8 f_orig = self._loss(theta) grad = np.zeros((len(theta),), float) for k, _ in enumerate(theta): theta[k] += epsilon f_new = self._loss(theta) grad[k] = (f_new - f_orig) / epsilon theta[k] -= epsilon if self.is_gradient_really_supported(): self._batch_index += 1 return grad
Compute and return the gradient at the point theta. Args: theta (numpy.ndarray): 1-d array Returns: numpy.ndarray: 1-d array with the same shape as theta. The gradient computed
https://github.com/qiskit/qiskit-aqua/blob/5ccf0e20129880e78a57f2f78c59b9a362ebb208/qiskit/aqua/algorithms/classifiers/vqc.py#L363-L382
from typing import Optional, Callable, Dict, Union, Any import warnings import logging import math import numpy as np from sklearn.utils import shuffle from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister from qiskit.circuit import ParameterVector, ParameterExpression from qiskit.providers import BaseBackend from qiskit.providers import Backend from qiskit.aqua import QuantumInstance, AquaError, aqua_globals from qiskit.aqua.utils import map_label_to_class_name from qiskit.aqua.utils import split_dataset_to_data_and_labels from qiskit.aqua.algorithms import VQAlgorithm from qiskit.aqua.components.optimizers import Optimizer from qiskit.aqua.components.feature_maps import FeatureMap from qiskit.aqua.components.variational_forms import VariationalForm from ...deprecation import warn_package logger = logging.getLogger(__name__) class VQC(VQAlgorithm): def __init__( self, optimizer: Optimizer, feature_map: Union[QuantumCircuit, FeatureMap], var_form: Union[QuantumCircuit, VariationalForm], training_dataset: Dict[str, np.ndarray], test_dataset: Optional[Dict[str, np.ndarray]] = None, datapoints: Optional[np.ndarray] = None, max_evals_grouped: int = 1, minibatch_size: int = -1, callback: Optional[Callable[[int, np.ndarray, float, int], None]] = None, use_sigmoid_cross_entropy: bool = False, quantum_instance: Optional[ Union[QuantumInstance, BaseBackend, Backend]] = None) -> None: warn_package('aqua.algorithms.classifiers', 'qiskit_machine_learning.algorithms.classifiers', 'qiskit-machine-learning') if isinstance(var_form, VariationalForm): warnings.warn(""" The {} object as input for the VQC is deprecated as of 0.7.0 and will be removed no earlier than 3 months after the release. You should pass a QuantumCircuit object instead. See also qiskit.circuit.library.n_local for a collection of suitable circuits.""".format(type(feature_map)), DeprecationWarning, stacklevel=2) super().__init__( var_form=var_form, optimizer=optimizer, cost_fn=self._loss, quantum_instance=quantum_instance ) self._batches = None self._label_batches = None self._batch_index = None self._eval_time = None self.batch_num = None self._optimizer.set_max_evals_grouped(max_evals_grouped) self._callback = callback if use_sigmoid_cross_entropy: self.cost_function = cost_estimate_sigmoid else: self.cost_function = cost_estimate if feature_map is None: raise AquaError('Missing feature map.') if training_dataset is None: raise AquaError('Missing training dataset.') self._training_dataset, self._class_to_label = split_dataset_to_data_and_labels( training_dataset) self._label_to_class = {label: class_name for class_name, label in self._class_to_label.items()} self._num_classes = len(list(self._class_to_label.keys())) if test_dataset is not None: self._test_dataset = split_dataset_to_data_and_labels(test_dataset, self._class_to_label) else: self._test_dataset = test_dataset if datapoints is not None and not isinstance(datapoints, np.ndarray): datapoints = np.asarray(datapoints) if len(datapoints) == 0: datapoints = None self._datapoints = datapoints self._minibatch_size = minibatch_size self._eval_count = 0 self._ret = {} self._parameterized_circuits = None self.feature_map = feature_map def construct_circuit(self, x, theta, measurement=False): x_names = [param.name for param in x if isinstance(param, ParameterExpression)] theta_names = [param.name for param in theta if isinstance(param, ParameterExpression)] if any(x_name in theta_names for x_name in x_names): raise AquaError('Variational form and feature map are not allowed to share parameters ' 'with the same name!') qr = QuantumRegister(self._num_qubits, name='q') cr = ClassicalRegister(self._num_qubits, name='c') qc = QuantumCircuit(qr, cr) if isinstance(self.feature_map, QuantumCircuit): param_dict = dict(zip(self._feature_map_params, x)) circuit = self._feature_map.assign_parameters(param_dict, inplace=False) qc.append(circuit.to_instruction(), qr) else: qc.append(self._feature_map.construct_circuit(x, qr).to_instruction(), qr) if isinstance(self.var_form, QuantumCircuit): param_dict = dict(zip(self._var_form_params, theta)) circuit = self._var_form.assign_parameters(param_dict, inplace=False) qc.append(circuit.to_instruction(), qr) else: qc += self._var_form.construct_circuit(theta, qr) if measurement: qc.barrier(qr) qc.measure(qr, cr) return qc def _get_prediction(self, data, theta): from qiskit.ml.circuit.library import RawFeatureVector circuits = [] num_theta_sets = len(theta) // self._var_form.num_parameters theta_sets = np.split(theta, num_theta_sets) def _build_parameterized_circuits(): var_form_support = isinstance(self._var_form, QuantumCircuit) or self._var_form.support_parameterized_circuit feat_map_support = isinstance(self._feature_map, QuantumCircuit) or self._feature_map.support_parameterized_circuit if isinstance(self._feature_map, RawFeatureVector): feat_map_support = False if var_form_support and feat_map_support and self._parameterized_circuits is None: parameterized_circuits = self.construct_circuit( self._feature_map_params, self._var_form_params, measurement=not self._quantum_instance.is_statevector) self._parameterized_circuits = self._quantum_instance.transpile(parameterized_circuits)[0] _build_parameterized_circuits() for thet in theta_sets: for datum in data: if self._parameterized_circuits is not None: curr_params = dict(zip(self._feature_map_params, datum)) curr_params.update(dict(zip(self._var_form_params, thet))) circuit = self._parameterized_circuits.assign_parameters(curr_params) else: circuit = self.construct_circuit( datum, thet, measurement=not self._quantum_instance.is_statevector) circuits.append(circuit) results = self._quantum_instance.execute( circuits, had_transpiled=self._parameterized_circuits is not None) circuit_id = 0 predicted_probs = [] predicted_labels = [] for _ in theta_sets: counts = [] for _ in data: if self._quantum_instance.is_statevector: temp = results.get_statevector(circuit_id) outcome_vector = (temp * temp.conj()).real outcome_dict = {} bitstr_size = int(math.log2(len(outcome_vector))) for i, _ in enumerate(outcome_vector): bitstr_i = format(i, '0' + str(bitstr_size) + 'b') outcome_dict[bitstr_i] = outcome_vector[i] else: outcome_dict = results.get_counts(circuit_id) counts.append(outcome_dict) circuit_id += 1 probs = return_probabilities(counts, self._num_classes) predicted_probs.append(probs) predicted_labels.append(np.argmax(probs, axis=1)) if len(predicted_probs) == 1: predicted_probs = predicted_probs[0] if len(predicted_labels) == 1: predicted_labels = predicted_labels[0] return predicted_probs, predicted_labels def batch_data(self, data, labels=None, minibatch_size=-1): label_batches = None if 0 < minibatch_size < len(data): batch_size = min(minibatch_size, len(data)) if labels is not None: shuffled_samples, shuffled_labels = shuffle(data, labels, random_state=aqua_globals.random_seed) label_batches = np.array_split(shuffled_labels, batch_size) else: shuffled_samples = shuffle(data, random_state=aqua_globals.random_seed) batches = np.array_split(shuffled_samples, batch_size) else: batches = np.asarray([data]) label_batches = np.asarray([labels]) return batches, label_batches def is_gradient_really_supported(self): return self.optimizer.is_gradient_supported and not self.optimizer.is_gradient_ignored def train(self, data, labels, quantum_instance=None, minibatch_size=-1): self._quantum_instance = self._quantum_instance if quantum_instance is None else quantum_instance minibatch_size = minibatch_size if minibatch_size > 0 else self._minibatch_size self._batches, self._label_batches = self.batch_data(data, labels, minibatch_size) self._batch_index = 0 if self.initial_point is None: self.initial_point = self.random.standard_normal(self._var_form.num_parameters) self._eval_count = 0 grad_fn = None if minibatch_size > 0 and self.is_gradient_really_supported(): grad_fn = self._gradient_function_wrapper result = self.find_minimum(initial_point=self.initial_point, var_form=self.var_form, cost_fn=self._loss, optimizer=self.optimizer, gradient_fn=grad_fn) self._ret = {} self._ret['num_optimizer_evals'] = result.optimizer_evals self._ret['min_val'] = result.optimal_value self._ret['opt_params'] = result.optimal_point self._ret['eval_time'] = result.optimizer_time if self._ret['num_optimizer_evals'] is not None and self._eval_count >= self._ret['num_optimizer_evals']: self._eval_count = self._ret['num_optimizer_evals'] self._eval_time = self._ret['eval_time'] logger.info('Optimization complete in %s seconds.\nFound opt_params %s in %s evals', self._eval_time, self._ret['opt_params'], self._eval_count) self._ret['eval_count'] = self._eval_count del self._batches del self._label_batches del self._batch_index self._ret['training_loss'] = self._ret['min_val']
Apache License 2.0
plusmultiply/mprm
datasets/Scannet_subcloud.py
ScannetDataset.get_batch_gen
python
def get_batch_gen(self, split, config): if split == 'training': epoch_n = config.epoch_steps * config.batch_num random_pick_n = None elif split == 'validation': epoch_n = config.validation_size * config.batch_num elif split == 'test': epoch_n = config.validation_size * config.batch_num elif split == 'ERF': epoch_n = 1000000 self.batch_limit = 1 np.random.seed(42) else: raise ValueError('Split argument in data generator should be "training", "validation" or "test"') if not hasattr(self, 'potentials'): self.potentials = {} self.min_potentials = {} if not hasattr(self, 'anchors'): self.anchors = [] def get_anchors(points): n_anchors = [] x_max = points[:, 0].max() x_min = points[:, 0].min() y_max = points[:, 1].max() y_min = points[:, 1].min() z_max = points[:, 2].max() z_min = points[:, 2].min() x_step = np.floor((x_max - x_min) / config.in_radius) + 1 y_step = np.floor((y_max - y_min) / config.in_radius) + 1 z_step = np.floor((z_max - z_min) / config.in_radius) + 1 x_num = np.linspace(x_min, x_max, x_step) y_num = np.linspace(y_min, y_max, y_step) z_num = np.linspace(z_min, z_max, z_step) for x in x_num: for y in y_num: for z in z_num: n_anchors.append([x, y, z]) return np.array(n_anchors) if split == 'training': self.anchors = [] self.potentials[split] = [] self.min_potentials[split] = [] data_split = split if split == 'ERF': data_split = 'test' if split == 'training': for i, tree in enumerate(self.input_trees[data_split]): points = np.array(tree.data) anchor = get_anchors(points) self.anchors += [anchor] self.potentials[split] += [np.random.rand(len(anchor)) * 1e-3] self.min_potentials[split] += [float(np.min(self.potentials[split][-1]))] print(len(self.anchors)) print(len(self.potentials[split])) print(len(self.min_potentials[split])) else: for i, tree in enumerate(self.input_trees[data_split]): self.potentials[split] += [np.random.rand(tree.data.shape[0]) * 1e-3] self.min_potentials[split] += [float(np.min(self.potentials[split][-1]))] def get_random_epoch_inds(): all_epoch_inds = np.zeros((2, 0), dtype=np.int32) for cloud_ind, cloud_labels in enumerate(self.input_labels[split]): epoch_indices = np.empty((0,), dtype=np.int32) for label_ind, label in enumerate(self.label_values): if label not in self.ignored_labels: label_indices = np.where(np.equal(cloud_labels, label))[0] if len(label_indices) <= random_pick_n: epoch_indices = np.hstack((epoch_indices, label_indices)) elif len(label_indices) < 50 * random_pick_n: new_randoms = np.random.choice(label_indices, size=random_pick_n, replace=False) epoch_indices = np.hstack((epoch_indices, new_randoms.astype(np.int32))) else: rand_inds = [] while len(rand_inds) < random_pick_n: rand_inds = np.unique(np.random.choice(label_indices, size=5 * random_pick_n, replace=True)) epoch_indices = np.hstack((epoch_indices, rand_inds[:random_pick_n].astype(np.int32))) epoch_indices = np.vstack((np.full(epoch_indices.shape, cloud_ind, dtype=np.int32), epoch_indices)) all_epoch_inds = np.hstack((all_epoch_inds, epoch_indices)) return all_epoch_inds def random_balanced_gen(): if split == 'training': all_epoch_inds = get_random_epoch_inds() elif split == 'validation': all_epoch_inds = get_random_epoch_inds() else: raise ValueError('generator to be defined for test split.') p_list = [] c_list = [] pl_list = [] pi_list = [] ci_list = [] cl_list = [] cla_list = [] batch_n = 0 for i, rand_i in enumerate(np.random.permutation(all_epoch_inds.shape[1])): cloud_ind = all_epoch_inds[0, rand_i] point_ind = all_epoch_inds[1, rand_i] points = np.array(self.input_trees[split][cloud_ind].data, copy=False) center_point = points[point_ind, :].reshape(1, -1) noise = np.random.normal(scale=config.in_radius/10, size=center_point.shape) pick_point = center_point + noise.astype(center_point.dtype) input_inds = self.input_trees[split][cloud_ind].query_radius(pick_point, r=config.in_radius)[0] n = input_inds.shape[0] if n > self.batch_limit: input_inds = np.random.choice(input_inds, size=int(self.batch_limit)-1, replace=False) n = input_inds.shape[0] input_points = (points[input_inds] - pick_point).astype(np.float32) input_colors = self.input_colors[split][cloud_ind][input_inds] input_labels = self.input_labels[split][cloud_ind][input_inds] input_labels = np.array([self.label_to_idx[l] for l in input_labels]) cloud_labels_idx = np.unique(input_labels) cloud_labels = np.zeros((1, config.num_classes)) cloud_labels[0][cloud_labels_idx] = 1 cloud_labels_all = np.ones((len(input_labels), config.num_classes)) cloud_labels_all = cloud_labels_all * cloud_labels if batch_n + n > self.batch_limit and batch_n > 0: yield (np.concatenate(p_list, axis=0), np.concatenate(c_list, axis=0), np.concatenate(pl_list, axis=0), np.array([tp.shape[0] for tp in p_list]), np.concatenate(pi_list, axis=0), np.array(ci_list, dtype=np.int32), np.concatenate(cl_list, axis=0), np.concatenate(cla_list, axis=0)) p_list = [] c_list = [] pl_list = [] pi_list = [] ci_list = [] cl_list = [] cla_list = [] batch_n = 0 if n > 0: p_list += [input_points] c_list += [np.hstack((input_colors, input_points + pick_point))] pl_list += [input_labels] pi_list += [input_inds] ci_list += [cloud_ind] cl_list += [cloud_labels] cla_list += [cloud_labels_all] batch_n += n if batch_n > 0: yield (np.concatenate(p_list, axis=0), np.concatenate(c_list, axis=0), np.concatenate(pl_list, axis=0), np.array([tp.shape[0] for tp in p_list]), np.concatenate(pi_list, axis=0), np.array(ci_list, dtype=np.int32), np.concatenate(cl_list, axis=0), np.concatenate(cla_list, axis=0) ) def spatially_regular_gen(): p_list = [] c_list = [] pl_list = [] pi_list = [] ci_list = [] cl_list = [] cla_list = [] batch_n = 0 for i in range(epoch_n): cloud_ind = int(np.argmin(self.min_potentials[split])) point_ind = np.argmin(self.potentials[split][cloud_ind]) points = np.array(self.input_trees[data_split][cloud_ind].data, copy=False) if split=='training': center_point = self.anchors[cloud_ind][point_ind].reshape(1, -1) else: center_point = points[point_ind, :].reshape(1, -1) if split != 'ERF': noise = np.random.normal(scale=config.in_radius/10, size=center_point.shape) pick_point = center_point + noise.astype(center_point.dtype) else: pick_point = center_point input_inds = self.input_trees[data_split][cloud_ind].query_radius(pick_point, r=config.in_radius)[0] n = input_inds.shape[0] if n == 0: self.potentials[split][cloud_ind][point_ind] += 1 self.min_potentials[split][cloud_ind] = float(np.min(self.potentials[split][cloud_ind])) continue if split != 'ERF': dists = np.sum(np.square((points[input_inds] - pick_point).astype(np.float32)), axis=1) tukeys = np.square(1 - dists / np.square(config.in_radius)) tukeys[dists > np.square(config.in_radius)] = 0 if split != 'training': self.potentials[split][cloud_ind][input_inds] += tukeys self.min_potentials[split][cloud_ind] = float(np.min(self.potentials[split][cloud_ind])) else: self.potentials[split][cloud_ind][point_ind] += 0.01 self.min_potentials[split][cloud_ind] = float(np.min(self.potentials[split][cloud_ind])) if n > self.batch_limit: input_inds = np.random.choice(input_inds, size=int(self.batch_limit)-1, replace=False) n = input_inds.shape[0] input_points = (points[input_inds] - pick_point).astype(np.float32) input_colors = self.input_colors[data_split][cloud_ind][input_inds] if split in ['test', 'ERF']: input_labels = np.zeros(input_points.shape[0]) else: input_labels = self.input_labels[data_split][cloud_ind][input_inds] input_labels = np.array([self.label_to_idx[l] for l in input_labels]) cloud_labels_idx = np.unique(input_labels) cloud_labels_idx = cloud_labels_idx[cloud_labels_idx!=0].astype('int32') cloud_labels = np.zeros((1, config.num_classes)) cloud_labels[0][cloud_labels_idx-1] = 1 cloud_labels_all = np.ones((len(input_labels), config.num_classes)) cloud_labels_all = cloud_labels_all * cloud_labels if batch_n + n > self.batch_limit and batch_n > 0: yield (np.concatenate(p_list, axis=0), np.concatenate(c_list, axis=0), np.concatenate(pl_list, axis=0), np.array([tp.shape[0] for tp in p_list]), np.concatenate(pi_list, axis=0), np.array(ci_list, dtype=np.int32), np.concatenate(cl_list, axis=0), np.concatenate(cla_list, axis=0)) p_list = [] c_list = [] pl_list = [] pi_list = [] ci_list = [] cl_list = [] cla_list = [] batch_n = 0 if n > 0: p_list += [input_points] c_list += [np.hstack((input_colors, input_points + pick_point))] pl_list += [input_labels] pi_list += [input_inds] ci_list += [cloud_ind] cl_list += [cloud_labels] cla_list += [cloud_labels_all] batch_n += n if batch_n > 0: yield (np.concatenate(p_list, axis=0), np.concatenate(c_list, axis=0), np.concatenate(pl_list, axis=0), np.array([tp.shape[0] for tp in p_list]), np.concatenate(pi_list, axis=0), np.array(ci_list, dtype=np.int32), np.concatenate(cl_list, axis=0), np.concatenate(cla_list, axis=0)) if split == 'training': gen_func = spatially_regular_gen elif split == 'validation': gen_func = spatially_regular_gen elif split in ['test', 'ERF']: gen_func = spatially_regular_gen else: raise ValueError('Split argument in data generator should be "training", "validation" or "test"') gen_types = (tf.float32, tf.float32, tf.int32, tf.int32, tf.int32, tf.int32, tf.float32, tf.float32) gen_shapes = ([None, 3], [None, 6], [None], [None], [None], [None], [None, 20], [None, 20]) return gen_func, gen_types, gen_shapes
A function defining the batch generator for each split. Should return the generator, the generated types and generated shapes :param split: string in "training", "validation" or "test" :param config: configuration file :return: gen_func, gen_types, gen_shapes
https://github.com/plusmultiply/mprm/blob/9783dc179f0bfca8ca7316b638269769f11027aa/datasets/Scannet_subcloud.py#L510-L915
import json import os import tensorflow as tf import numpy as np import time import pickle from sklearn.neighbors import KDTree from utils.ply import read_ply, write_ply from utils.mesh import rasterize_mesh from os import makedirs, listdir from os.path import exists, join, isfile, isdir from datasets.common import Dataset import cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0): if (features is None) and (labels is None): return cpp_subsampling.compute(points, sampleDl=sampleDl, verbose=verbose) elif (labels is None): return cpp_subsampling.compute(points, features=features, sampleDl=sampleDl, verbose=verbose) elif (features is None): return cpp_subsampling.compute(points, classes=labels, sampleDl=sampleDl, verbose=verbose) else: return cpp_subsampling.compute(points, features=features, classes=labels, sampleDl=sampleDl, verbose=verbose) class ScannetDataset(Dataset): def __init__(self, input_threads=8, load_test=False): Dataset.__init__(self, 'Scannet') self.label_to_names = {0: 'unclassified', 1: 'wall', 2: 'floor', 3: 'cabinet', 4: 'bed', 5: 'chair', 6: 'sofa', 7: 'table', 8: 'door', 9: 'window', 10: 'bookshelf', 11: 'picture', 12: 'counter', 14: 'desk', 16: 'curtain', 24: 'refridgerator', 28: 'shower curtain', 33: 'toilet', 34: 'sink', 36: 'bathtub', 39: 'otherfurniture'} self.init_labels() self.ignored_labels = np.sort([0]) self.network_model = 'cloud_segmentation' self.num_threads = input_threads self.path = '' self.train_path = join(self.path, 'training_points') self.test_path = join(self.path, 'test_points') self.validation_clouds = np.loadtxt(join(self.path, 'scannetv2_val.txt'), dtype=np.str) self.validation_split = 1 self.training_split = 0 self.all_splits = [] self.load_test = load_test def prepare_pointcloud_ply(self): print('\nPreparing ply files') t0 = time.time() paths = [join(self.path, 'scans'), join(self.path, 'scans_test')] new_paths = [self.train_path, self.test_path] mesh_paths = [join(self.path, 'training_meshes'), join(self.path, 'test_meshes')] label_files = join(self.path, 'scannetv2-labels.combined.tsv') with open(label_files, 'r') as f: lines = f.readlines() names1 = [line.split('\t')[1] for line in lines[1:]] IDs = [int(line.split('\t')[4]) for line in lines[1:]] annot_to_nyuID = {n: id for n, id in zip(names1, IDs)} for path, new_path, mesh_path in zip(paths, new_paths, mesh_paths): if not exists(new_path): makedirs(new_path) if not exists(mesh_path): makedirs(mesh_path) scenes = np.sort([f for f in listdir(path)]) N = len(scenes) for i, scene in enumerate(scenes): if exists(join(new_path, scene + '.ply')): continue t1 = time.time() vertex_data, faces = read_ply(join(path, scene, scene + '_vh_clean_2.ply'), triangular_mesh=True) vertices = np.vstack((vertex_data['x'], vertex_data['y'], vertex_data['z'])).T vertices_colors = np.vstack((vertex_data['red'], vertex_data['green'], vertex_data['blue'])).T vertices_labels = np.zeros(vertices.shape[0], dtype=np.int32) if new_path == self.train_path: with open(join(path, scene, scene + '_vh_clean_2.0.010000.segs.json'), 'r') as f: segmentations = json.load(f) segIndices = np.array(segmentations['segIndices']) with open(join(path, scene, scene + '_vh_clean.aggregation.json'), 'r') as f: aggregation = json.load(f) for segGroup in aggregation['segGroups']: c_name = segGroup['label'] if c_name in names1: nyuID = annot_to_nyuID[c_name] if nyuID in self.label_values: for segment in segGroup['segments']: vertices_labels[segIndices == segment] = nyuID write_ply(join(mesh_path, scene + '_mesh.ply'), [vertices, vertices_colors, vertices_labels], ['x', 'y', 'z', 'red', 'green', 'blue', 'class'], triangular_faces=faces) else: write_ply(join(mesh_path, scene + '_mesh.ply'), [vertices, vertices_colors], ['x', 'y', 'z', 'red', 'green', 'blue'], triangular_faces=faces) points, associated_vert_inds = rasterize_mesh(vertices, faces, 0.003) sub_points, sub_vert_inds = grid_subsampling(points, labels=associated_vert_inds, sampleDl=0.01) sub_colors = vertices_colors[sub_vert_inds.ravel(), :] if new_path == self.train_path: sub_labels = vertices_labels[sub_vert_inds.ravel()] write_ply(join(new_path, scene + '.ply'), [sub_points, sub_colors, sub_labels, sub_vert_inds], ['x', 'y', 'z', 'red', 'green', 'blue', 'class', 'vert_ind']) else: write_ply(join(new_path, scene + '.ply'), [sub_points, sub_colors, sub_vert_inds], ['x', 'y', 'z', 'red', 'green', 'blue', 'vert_ind']) print('{:s} {:.1f} sec / {:.1f}%'.format(scene, time.time() - t1, 100 * i / N)) print('Done in {:.1f}s'.format(time.time() - t0)) def load_subsampled_clouds(self, subsampling_parameter): if 0 < subsampling_parameter <= 0.01: raise ValueError('subsampling_parameter too low (should be over 1 cm') tree_path = join(self.path, 'input_{:.3f}'.format(subsampling_parameter)) if not exists(tree_path): makedirs(tree_path) self.train_files = np.sort([join(self.train_path, f) for f in listdir(self.train_path) if f[-4:] == '.ply']) self.test_files = np.sort([join(self.test_path, f) for f in listdir(self.test_path) if f[-4:] == '.ply']) files = np.hstack((self.train_files, self.test_files)) self.input_trees = {'training': [], 'validation': [], 'test': []} self.input_colors = {'training': [], 'validation': [], 'test': []} self.input_vert_inds = {'training': [], 'validation': [], 'test': []} self.input_labels = {'training': [], 'validation': []} N = len(files) progress_n = 30 fmt_str = '[{:<' + str(progress_n) + '}] {:5.1f}%' print('\nPreparing KDTree for all scenes, subsampled at {:.3f}'.format(subsampling_parameter)) for i, file_path in enumerate(files): t0 = time.time() cloud_name = file_path.split('/')[-1][:-4] cloud_folder = file_path.split('/')[-2] if 'train' in cloud_folder: if cloud_name in self.validation_clouds: self.all_splits += [1] cloud_split = 'validation' else: self.all_splits += [0] cloud_split = 'training' else: cloud_split = 'test' if (cloud_split != 'test' and self.load_test) or (cloud_split == 'test' and not self.load_test): continue KDTree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name)) sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name)) if isfile(KDTree_file): data = read_ply(sub_ply_file) sub_colors = np.vstack((data['red'], data['green'], data['blue'])).T sub_vert_inds = data['vert_ind'] if cloud_split == 'test': sub_labels = None else: sub_labels = data['class'] with open(KDTree_file, 'rb') as f: search_tree = pickle.load(f) else: data = read_ply(file_path) points = np.vstack((data['x'], data['y'], data['z'])).T colors = np.vstack((data['red'], data['green'], data['blue'])).T if cloud_split == 'test': int_features = data['vert_ind'] else: int_features = np.vstack((data['vert_ind'], data['class'])).T sub_points, sub_colors, sub_int_features = grid_subsampling(points, features=colors, labels=int_features, sampleDl=subsampling_parameter) sub_colors = sub_colors / 255 if cloud_split == 'test': sub_vert_inds = np.squeeze(sub_int_features) sub_labels = None else: sub_vert_inds = sub_int_features[:, 0] sub_labels = sub_int_features[:, 1] search_tree = KDTree(sub_points, leaf_size=50) with open(KDTree_file, 'wb') as f: pickle.dump(search_tree, f) if cloud_split == 'test': write_ply(sub_ply_file, [sub_points, sub_colors, sub_vert_inds], ['x', 'y', 'z', 'red', 'green', 'blue', 'vert_ind']) else: write_ply(sub_ply_file, [sub_points, sub_colors, sub_labels, sub_vert_inds], ['x', 'y', 'z', 'red', 'green', 'blue', 'class', 'vert_ind']) self.input_trees[cloud_split] += [search_tree] self.input_colors[cloud_split] += [sub_colors] self.input_vert_inds[cloud_split] += [sub_vert_inds] if cloud_split in ['training', 'validation']: self.input_labels[cloud_split] += [sub_labels] print('', end='\r') print(fmt_str.format('#' * ((i * progress_n) // N), 100 * i / N), end='', flush=True) self.input_trees['validation'] = self.input_trees['training'] self.input_colors['validation'] = self.input_colors['training'] self.input_vert_inds['validation'] = self.input_colors['training'] self.input_labels['validation'] = self.input_labels['training'] self.num_training = len(self.input_trees['training']) self.num_validation = len(self.input_trees['validation']) self.num_test = len(self.input_trees['test']) print('number of training sample:', self.num_training) print('number of validation sample:', self.num_validation) print('number of test sample:', self.num_test) self.validation_proj = [] self.validation_labels = [] self.test_proj = [] self.test_labels = [] i_val = 0 i_test = 0 N = self.num_validation + self.num_test print('', end='\r') print(fmt_str.format('#' * progress_n, 100), flush=True) print('\nPreparing reprojection indices for validation and test') for i, file_path in enumerate(files): cloud_name = file_path.split('/')[-1][:-4] cloud_folder = file_path.split('/')[-2] if (not self.load_test) and 'train' in cloud_folder and cloud_name not in self.validation_clouds: proj_file = join(tree_path, '{:s}_proj_train.pkl'.format(cloud_name)) if isfile(proj_file): with open(proj_file, 'rb') as f: proj_inds, labels = pickle.load(f) else: tree_path = join(self.path, 'input_{:.3f}'.format(subsampling_parameter)) data = read_ply(join(tree_path, '{:s}.ply'.format(cloud_name))) vertices = np.vstack((data['x'], data['y'], data['z'])).T labels = data['class'] proj_inds = np.squeeze(self.input_trees['validation'][i_val].query(vertices, return_distance=False)) proj_inds = proj_inds.astype(np.int32) with open(proj_file, 'wb') as f: pickle.dump([proj_inds, labels], f) self.validation_proj += [proj_inds] self.validation_labels += [labels] i_val += 1 if self.load_test and 'test' in cloud_folder: proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name)) if isfile(proj_file): with open(proj_file, 'rb') as f: proj_inds, labels = pickle.load(f) else: mesh_path = file_path.split('/') mesh_path[-2] = 'test_meshes' mesh_path = '/'.join(mesh_path) vertex_data, faces = read_ply(mesh_path[:-4] + '_mesh.ply', triangular_mesh=True) vertices = np.vstack((vertex_data['x'], vertex_data['y'], vertex_data['z'])).T labels = np.zeros(vertices.shape[0], dtype=np.int32) proj_inds = np.squeeze(self.input_trees['test'][i_test].query(vertices, return_distance=False)) proj_inds = proj_inds.astype(np.int32) with open(proj_file, 'wb') as f: pickle.dump([proj_inds, labels], f) self.test_proj += [proj_inds] self.test_labels += [labels] i_test += 1 print('', end='\r') print(fmt_str.format('#' * (((i_val + i_test) * progress_n) // N), 100 * (i_val + i_test) / N), end='', flush=True) print('\n') return
MIT License
allenporter/python-google-nest-sdm
google_nest_sdm/auth.py
AbstractAuth.post
python
async def post( self, url: str, **kwargs: Mapping[str, Any] ) -> aiohttp.ClientResponse: try: resp = await self.request("post", url, **kwargs) except ClientError as err: raise ApiException(f"Error connecting to API: {err}") from err return await AbstractAuth._raise_for_status(resp)
Make a post request.
https://github.com/allenporter/python-google-nest-sdm/blob/b3e41cbaa6961f4eda1b4375cebeaba34239ef91/google_nest_sdm/auth.py#L73-L81
import logging from abc import ABC, abstractmethod from typing import Any, List, Mapping, Optional import aiohttp from aiohttp.client_exceptions import ClientError, ClientResponseError from google.auth.credentials import Credentials from google.oauth2.credentials import Credentials as OAuthCredentials from .exceptions import ApiException, AuthException _LOGGER = logging.getLogger(__name__) HTTP_UNAUTHORIZED = 401 AUTHORIZATION_HEADER = "Authorization" ERROR = "error" STATUS = "status" MESSAGE = "message" class AbstractAuth(ABC): def __init__(self, websession: aiohttp.ClientSession, host: str): self._websession = websession self._host = host @abstractmethod async def async_get_access_token(self) -> str: async def async_get_creds(self) -> Credentials: token = await self.async_get_access_token() return OAuthCredentials(token=token) async def request( self, method: str, url: str, **kwargs: Optional[Mapping[str, Any]] ) -> aiohttp.ClientResponse: headers = kwargs.get("headers") if headers is None: headers = {} else: headers = dict(headers) del kwargs["headers"] if AUTHORIZATION_HEADER not in headers: try: access_token = await self.async_get_access_token() except ClientError as err: raise AuthException(f"Access token failure: {err}") from err headers[AUTHORIZATION_HEADER] = f"Bearer {access_token}" if not (url.startswith("http://") or url.startswith("https://")): url = f"{self._host}/{url}" _LOGGER.debug("request[%s]=%s", method, url) if method == "post" and "json" in kwargs: _LOGGER.debug("request[post json]=%s", kwargs["json"]) return await self._websession.request(method, url, **kwargs, headers=headers) async def get( self, url: str, **kwargs: Mapping[str, Any] ) -> aiohttp.ClientResponse: try: resp = await self.request("get", url, **kwargs) except ClientError as err: raise ApiException(f"Error connecting to API: {err}") from err return await AbstractAuth._raise_for_status(resp)
Apache License 2.0
prince-mendiratta/x-tra-telegram
userbot/utils.py
progress
python
async def progress(current, total, event, start, type_of_ps, file_name=None): now = time.time() diff = now - start if round(diff % 10.00) == 0 or current == total: percentage = current * 100 / total speed = current / diff elapsed_time = round(diff) * 1000 time_to_completion = round((total - current) / speed) * 1000 estimated_total_time = elapsed_time + time_to_completion progress_str = "[{0}{1}]\nProgress: {2}%\n".format( ''.join(["█" for i in range(math.floor(percentage / 5))]), ''.join(["░" for i in range(20 - math.floor(percentage / 5))]), round(percentage, 2)) tmp = progress_str + "{0} of {1}\nETA: {2}".format( humanbytes(current), humanbytes(total), time_formatter(estimated_total_time) ) if file_name: await event.edit("{}\nFile Name: `{}`\n{}".format( type_of_ps, file_name, tmp)) else: await event.edit("{}\n{}".format(type_of_ps, tmp))
Generic progress_callback for both upload.py and download.py
https://github.com/prince-mendiratta/x-tra-telegram/blob/6c12599248ef22250ed135d35d524c69da41738e/userbot/utils.py#L263-L288
from userbot import bot from telethon import events from var import Var from pathlib import Path from userbot.uniborgConfig import Config from userbot import LOAD_PLUG from userbot import CMD_LIST import re import logging import inspect def command(**args): args["func"] = lambda e: e.via_bot_id is None stack = inspect.stack() previous_stack_frame = stack[1] file_test = Path(previous_stack_frame.filename) file_test = file_test.stem.replace(".py", "") if 1 == 0: return print("stupidity at its best") else: pattern = args.get("pattern", None) allow_sudo = args.get("allow_sudo", None) allow_edited_updates = args.get('allow_edited_updates', False) args["incoming"] = args.get("incoming", False) args["outgoing"] = True if bool(args["incoming"]): args["outgoing"] = False try: if pattern is not None and not pattern.startswith('(?i)'): args['pattern'] = '(?i)' + pattern except: pass reg = re.compile('(.*)') if not pattern == None: try: cmd = re.search(reg, pattern) try: cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "") except: pass try: CMD_LIST[file_test].append(cmd) except: CMD_LIST.update({file_test: [cmd]}) except: pass if allow_sudo: args["from_users"] = list(Var.SUDO_USERS) args["incoming"] = True del allow_sudo try: del args["allow_sudo"] except: pass if "allow_edited_updates" in args: del args['allow_edited_updates'] def decorator(func): if allow_edited_updates: bot.add_event_handler(func, events.MessageEdited(**args)) bot.add_event_handler(func, events.NewMessage(**args)) try: LOAD_PLUG[file_test].append(func) except: LOAD_PLUG.update({file_test: [func]}) return func return decorator def load_module(shortname): if shortname.startswith("__"): pass elif shortname.endswith("_"): import userbot.utils import sys import importlib from pathlib import Path path = Path(f"userbot/plugins/{shortname}.py") name = "userbot.plugins.{}".format(shortname) spec = importlib.util.spec_from_file_location(name, path) mod = importlib.util.module_from_spec(spec) spec.loader.exec_module(mod) print("Successfully (re)imported "+shortname) else: import userbot.utils import sys import importlib from pathlib import Path path = Path(f"userbot/plugins/{shortname}.py") name = "userbot.plugins.{}".format(shortname) spec = importlib.util.spec_from_file_location(name, path) mod = importlib.util.module_from_spec(spec) mod.bot = bot mod.tgbot = bot.tgbot mod.Var = Var mod.command = command mod.logger = logging.getLogger(shortname) sys.modules["uniborg.util"] = userbot.utils mod.Config = Config mod.borg = bot sys.modules["userbot.events"] = userbot.utils spec.loader.exec_module(mod) sys.modules["userbot.plugins."+shortname] = mod print("Successfully (re)imported "+shortname) def remove_plugin(shortname): try: try: for i in LOAD_PLUG[shortname]: bot.remove_event_handler(i) del LOAD_PLUG[shortname] except: name = f"userbot.plugins.{shortname}" for i in reversed(range(len(bot._event_builders))): ev, cb = bot._event_builders[i] if cb.__module__ == name: del bot._event_builders[i] except: raise ValueError def admin_cmd(pattern=None, **args): args["func"] = lambda e: e.via_bot_id is None stack = inspect.stack() previous_stack_frame = stack[1] file_test = Path(previous_stack_frame.filename) file_test = file_test.stem.replace(".py", "") allow_sudo = args.get("allow_sudo", False) if pattern is not None: if pattern.startswith("\#"): args["pattern"] = re.compile(pattern) else: args["pattern"] = re.compile("\." + pattern) cmd = "." + pattern try: CMD_LIST[file_test].append(cmd) except: CMD_LIST.update({file_test: [cmd]}) args["outgoing"] = True if allow_sudo: args["from_users"] = list(Var.SUDO_USERS) args["incoming"] = True del args["allow_sudo"] elif "incoming" in args and not args["incoming"]: args["outgoing"] = True allow_edited_updates = False if "allow_edited_updates" in args and args["allow_edited_updates"]: allow_edited_updates = args["allow_edited_updates"] del args["allow_edited_updates"] is_message_enabled = True return events.NewMessage(**args) from telethon import events import asyncio from userbot import bot from traceback import format_exc from time import gmtime, strftime import math import subprocess import sys import traceback import datetime def register(**args): args["func"] = lambda e: e.via_bot_id is None stack = inspect.stack() previous_stack_frame = stack[1] file_test = Path(previous_stack_frame.filename) file_test = file_test.stem.replace(".py", "") pattern = args.get('pattern', None) disable_edited = args.get('disable_edited', True) if pattern is not None and not pattern.startswith('(?i)'): args['pattern'] = '(?i)' + pattern if "disable_edited" in args: del args['disable_edited'] reg = re.compile('(.*)') if not pattern == None: try: cmd = re.search(reg, pattern) try: cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "") except: pass try: CMD_LIST[file_test].append(cmd) except: CMD_LIST.update({file_test: [cmd]}) except: pass def decorator(func): if not disable_edited: bot.add_event_handler(func, events.MessageEdited(**args)) bot.add_event_handler(func, events.NewMessage(**args)) try: LOAD_PLUG[file_test].append(func) except Exception as e: LOAD_PLUG.update({file_test: [func]}) return func return decorator def errors_handler(func): async def wrapper(event): try: return await func(event) except Exception: pass return wrapper
Apache License 2.0
python-openxml/python-docx
docx/oxml/styles.py
CT_Styles.get_by_name
python
def get_by_name(self, name): xpath = 'w:style[w:name/@w:val="%s"]' % name try: return self.xpath(xpath)[0] except IndexError: return None
Return the ``<w:style>`` child element having ``<w:name>`` child element with value *name*, or |None| if not found.
https://github.com/python-openxml/python-docx/blob/36cac78de080d412e9e50d56c2784e33655cad59/docx/oxml/styles.py#L336-L345
from ..enum.style import WD_STYLE_TYPE from .simpletypes import ST_DecimalNumber, ST_OnOff, ST_String from .xmlchemy import ( BaseOxmlElement, OptionalAttribute, RequiredAttribute, ZeroOrMore, ZeroOrOne ) def styleId_from_name(name): return { 'caption': 'Caption', 'heading 1': 'Heading1', 'heading 2': 'Heading2', 'heading 3': 'Heading3', 'heading 4': 'Heading4', 'heading 5': 'Heading5', 'heading 6': 'Heading6', 'heading 7': 'Heading7', 'heading 8': 'Heading8', 'heading 9': 'Heading9', }.get(name, name.replace(' ', '')) class CT_LatentStyles(BaseOxmlElement): lsdException = ZeroOrMore('w:lsdException', successors=()) count = OptionalAttribute('w:count', ST_DecimalNumber) defLockedState = OptionalAttribute('w:defLockedState', ST_OnOff) defQFormat = OptionalAttribute('w:defQFormat', ST_OnOff) defSemiHidden = OptionalAttribute('w:defSemiHidden', ST_OnOff) defUIPriority = OptionalAttribute('w:defUIPriority', ST_DecimalNumber) defUnhideWhenUsed = OptionalAttribute('w:defUnhideWhenUsed', ST_OnOff) def bool_prop(self, attr_name): value = getattr(self, attr_name) if value is None: return False return value def get_by_name(self, name): found = self.xpath('w:lsdException[@w:name="%s"]' % name) if not found: return None return found[0] def set_bool_prop(self, attr_name, value): setattr(self, attr_name, bool(value)) class CT_LsdException(BaseOxmlElement): locked = OptionalAttribute('w:locked', ST_OnOff) name = RequiredAttribute('w:name', ST_String) qFormat = OptionalAttribute('w:qFormat', ST_OnOff) semiHidden = OptionalAttribute('w:semiHidden', ST_OnOff) uiPriority = OptionalAttribute('w:uiPriority', ST_DecimalNumber) unhideWhenUsed = OptionalAttribute('w:unhideWhenUsed', ST_OnOff) def delete(self): self.getparent().remove(self) def on_off_prop(self, attr_name): return getattr(self, attr_name) def set_on_off_prop(self, attr_name, value): setattr(self, attr_name, value) class CT_Style(BaseOxmlElement): _tag_seq = ( 'w:name', 'w:aliases', 'w:basedOn', 'w:next', 'w:link', 'w:autoRedefine', 'w:hidden', 'w:uiPriority', 'w:semiHidden', 'w:unhideWhenUsed', 'w:qFormat', 'w:locked', 'w:personal', 'w:personalCompose', 'w:personalReply', 'w:rsid', 'w:pPr', 'w:rPr', 'w:tblPr', 'w:trPr', 'w:tcPr', 'w:tblStylePr' ) name = ZeroOrOne('w:name', successors=_tag_seq[1:]) basedOn = ZeroOrOne('w:basedOn', successors=_tag_seq[3:]) next = ZeroOrOne('w:next', successors=_tag_seq[4:]) uiPriority = ZeroOrOne('w:uiPriority', successors=_tag_seq[8:]) semiHidden = ZeroOrOne('w:semiHidden', successors=_tag_seq[9:]) unhideWhenUsed = ZeroOrOne('w:unhideWhenUsed', successors=_tag_seq[10:]) qFormat = ZeroOrOne('w:qFormat', successors=_tag_seq[11:]) locked = ZeroOrOne('w:locked', successors=_tag_seq[12:]) pPr = ZeroOrOne('w:pPr', successors=_tag_seq[17:]) rPr = ZeroOrOne('w:rPr', successors=_tag_seq[18:]) del _tag_seq type = OptionalAttribute('w:type', WD_STYLE_TYPE) styleId = OptionalAttribute('w:styleId', ST_String) default = OptionalAttribute('w:default', ST_OnOff) customStyle = OptionalAttribute('w:customStyle', ST_OnOff) @property def basedOn_val(self): basedOn = self.basedOn if basedOn is None: return None return basedOn.val @basedOn_val.setter def basedOn_val(self, value): if value is None: self._remove_basedOn() else: self.get_or_add_basedOn().val = value @property def base_style(self): basedOn = self.basedOn if basedOn is None: return None styles = self.getparent() base_style = styles.get_by_id(basedOn.val) if base_style is None: return None return base_style def delete(self): self.getparent().remove(self) @property def locked_val(self): locked = self.locked if locked is None: return False return locked.val @locked_val.setter def locked_val(self, value): self._remove_locked() if bool(value) is True: locked = self._add_locked() locked.val = value @property def name_val(self): name = self.name if name is None: return None return name.val @name_val.setter def name_val(self, value): self._remove_name() if value is not None: name = self._add_name() name.val = value @property def next_style(self): next = self.next if next is None: return None styles = self.getparent() return styles.get_by_id(next.val) @property def qFormat_val(self): qFormat = self.qFormat if qFormat is None: return False return qFormat.val @qFormat_val.setter def qFormat_val(self, value): self._remove_qFormat() if bool(value): self._add_qFormat() @property def semiHidden_val(self): semiHidden = self.semiHidden if semiHidden is None: return False return semiHidden.val @semiHidden_val.setter def semiHidden_val(self, value): self._remove_semiHidden() if bool(value) is True: semiHidden = self._add_semiHidden() semiHidden.val = value @property def uiPriority_val(self): uiPriority = self.uiPriority if uiPriority is None: return None return uiPriority.val @uiPriority_val.setter def uiPriority_val(self, value): self._remove_uiPriority() if value is not None: uiPriority = self._add_uiPriority() uiPriority.val = value @property def unhideWhenUsed_val(self): unhideWhenUsed = self.unhideWhenUsed if unhideWhenUsed is None: return False return unhideWhenUsed.val @unhideWhenUsed_val.setter def unhideWhenUsed_val(self, value): self._remove_unhideWhenUsed() if bool(value) is True: unhideWhenUsed = self._add_unhideWhenUsed() unhideWhenUsed.val = value class CT_Styles(BaseOxmlElement): _tag_seq = ('w:docDefaults', 'w:latentStyles', 'w:style') latentStyles = ZeroOrOne('w:latentStyles', successors=_tag_seq[2:]) style = ZeroOrMore('w:style', successors=()) del _tag_seq def add_style_of_type(self, name, style_type, builtin): style = self.add_style() style.type = style_type style.customStyle = None if builtin else True style.styleId = styleId_from_name(name) style.name_val = name return style def default_for(self, style_type): default_styles_for_type = [ s for s in self._iter_styles() if s.type == style_type and s.default ] if not default_styles_for_type: return None return default_styles_for_type[-1] def get_by_id(self, styleId): xpath = 'w:style[@w:styleId="%s"]' % styleId try: return self.xpath(xpath)[0] except IndexError: return None
MIT License
make-all/tuya-local
custom_components/tuya_local/generic/climate.py
TuyaLocalClimate.target_temperature_step
python
def target_temperature_step(self): dps = self._temperature_dps if dps is None: dps = self._temp_high_dps if dps is None: dps = self._temp_low_dps if dps is None: return 1 return dps.step(self._device)
Return the supported step of target temperature.
https://github.com/make-all/tuya-local/blob/636d0cd4cb2432676d862d290d2f6deea7328437/custom_components/tuya_local/generic/climate.py#L179-L188
import logging from homeassistant.components.climate import ClimateEntity from homeassistant.components.climate.const import ( ATTR_AUX_HEAT, ATTR_CURRENT_HUMIDITY, ATTR_CURRENT_TEMPERATURE, ATTR_FAN_MODE, ATTR_HUMIDITY, ATTR_HVAC_ACTION, ATTR_HVAC_MODE, ATTR_PRESET_MODE, ATTR_SWING_MODE, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW, DEFAULT_MAX_HUMIDITY, DEFAULT_MAX_TEMP, DEFAULT_MIN_HUMIDITY, DEFAULT_MIN_TEMP, HVAC_MODE_AUTO, HVAC_MODE_OFF, SUPPORT_AUX_HEAT, SUPPORT_FAN_MODE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE, SUPPORT_TARGET_HUMIDITY, SUPPORT_TARGET_TEMPERATURE, SUPPORT_TARGET_TEMPERATURE_RANGE, ) from homeassistant.const import ( ATTR_TEMPERATURE, STATE_UNAVAILABLE, TEMP_CELSIUS, TEMP_FAHRENHEIT, TEMP_KELVIN, ) from ..device import TuyaLocalDevice from ..helpers.device_config import TuyaEntityConfig _LOGGER = logging.getLogger(__name__) class TuyaLocalClimate(ClimateEntity): def __init__(self, device: TuyaLocalDevice, config: TuyaEntityConfig): self._device = device self._config = config self._support_flags = 0 self._attr_dps = [] dps_map = {c.name: c for c in config.dps()} self._aux_heat_dps = dps_map.pop(ATTR_AUX_HEAT, None) self._current_temperature_dps = dps_map.pop(ATTR_CURRENT_TEMPERATURE, None) self._current_humidity_dps = dps_map.pop(ATTR_CURRENT_HUMIDITY, None) self._fan_mode_dps = dps_map.pop(ATTR_FAN_MODE, None) self._humidity_dps = dps_map.pop(ATTR_HUMIDITY, None) self._hvac_mode_dps = dps_map.pop(ATTR_HVAC_MODE, None) self._hvac_action_dps = dps_map.pop(ATTR_HVAC_ACTION, None) self._preset_mode_dps = dps_map.pop(ATTR_PRESET_MODE, None) self._swing_mode_dps = dps_map.pop(ATTR_SWING_MODE, None) self._temperature_dps = dps_map.pop(ATTR_TEMPERATURE, None) self._temp_high_dps = dps_map.pop(ATTR_TARGET_TEMP_HIGH, None) self._temp_low_dps = dps_map.pop(ATTR_TARGET_TEMP_LOW, None) self._unit_dps = dps_map.pop("temperature_unit", None) for d in dps_map.values(): if not d.hidden: self._attr_dps.append(d) if self._aux_heat_dps: self._support_flags |= SUPPORT_AUX_HEAT if self._fan_mode_dps: self._support_flags |= SUPPORT_FAN_MODE if self._humidity_dps: self._support_flags |= SUPPORT_TARGET_HUMIDITY if self._preset_mode_dps: self._support_flags |= SUPPORT_PRESET_MODE if self._swing_mode_dps: self._support_flags |= SUPPORT_SWING_MODE if self._temp_high_dps and self._temp_low_dps: self._support_flags |= SUPPORT_TARGET_TEMPERATURE_RANGE elif self._temperature_dps is not None: self._support_flags |= SUPPORT_TARGET_TEMPERATURE @property def supported_features(self): return self._support_flags @property def should_poll(self): return True @property def name(self): return self._config.name(self._device.name) @property def unique_id(self): return self._config.unique_id(self._device.unique_id) @property def device_info(self): return self._device.device_info @property def icon(self): icon = self._config.icon(self._device) if icon: return icon else: return super().icon @property def temperature_unit(self): if self._unit_dps is not None: unit = self._unit_dps.get_value(self._device) if unit == "C": return TEMP_CELSIUS elif unit == "F": return TEMP_FAHRENHEIT elif unit == "K": return TEMP_KELVIN if self._temperature_dps: unit = self._temperature_dps.unit if unit == "C": return TEMP_CELSIUS elif unit == "F": return TEMP_FAHRENHEIT elif unit == "K": return TEMP_KELVIN return self._device.temperature_unit @property def target_temperature(self): if self._temperature_dps is None: raise NotImplementedError() return self._temperature_dps.get_value(self._device) @property def target_temperature_high(self): if self._temp_high_dps is None: raise NotImplementedError() return self._temp_high_dps.get_value(self._device) @property def target_temperature_low(self): if self._temp_low_dps is None: raise NotImplementedError() return self._temp_low_dps.get_value(self._device) @property
MIT License
openstate-sdn/ryu
ryu/contrib/ovs/stream.py
Stream.send
python
def send(self, buf): retval = self.connect() if retval != 0: return -retval elif len(buf) == 0: return 0 try: return self.socket.send(buf) except socket.error, e: return -ovs.socket_util.get_exception_errno(e)
Tries to send 'buf' on this stream. If successful, returns the number of bytes sent, between 1 and len(buf). 0 is only a valid return value if len(buf) is 0. On error, returns a negative errno value. Will not block. If no bytes can be immediately accepted for transmission, returns -errno.EAGAIN immediately.
https://github.com/openstate-sdn/ryu/blob/b4a7f6c3615a934eaf42894bcb1cc809fce96e93/ryu/contrib/ovs/stream.py#L202-L222
import errno import os import socket import ovs.poller import ovs.socket_util import ovs.vlog vlog = ovs.vlog.Vlog("stream") def stream_or_pstream_needs_probes(name): if PassiveStream.is_valid_name(name) or Stream.is_valid_name(name): return 0 else: return -1 class Stream(object): __S_CONNECTING = 0 __S_CONNECTED = 1 __S_DISCONNECTED = 2 W_CONNECT = 0 W_RECV = 1 W_SEND = 2 _SOCKET_METHODS = {} @staticmethod def register_method(method, cls): Stream._SOCKET_METHODS[method + ":"] = cls @staticmethod def _find_method(name): for method, cls in Stream._SOCKET_METHODS.items(): if name.startswith(method): return cls return None @staticmethod def is_valid_name(name): return bool(Stream._find_method(name)) def __init__(self, socket, name, status): self.socket = socket self.name = name if status == errno.EAGAIN: self.state = Stream.__S_CONNECTING elif status == 0: self.state = Stream.__S_CONNECTED else: self.state = Stream.__S_DISCONNECTED self.error = 0 IPTOS_PREC_INTERNETCONTROL = 0xc0 DSCP_DEFAULT = IPTOS_PREC_INTERNETCONTROL >> 2 @staticmethod def open(name, dscp=DSCP_DEFAULT): cls = Stream._find_method(name) if not cls: return errno.EAFNOSUPPORT, None suffix = name.split(":", 1)[1] error, sock = cls._open(suffix, dscp) if error: return error, None else: status = ovs.socket_util.check_connection_completion(sock) return 0, Stream(sock, name, status) @staticmethod def _open(suffix, dscp): raise NotImplementedError("This method must be overrided by subclass") @staticmethod def open_block((error, stream)): if not error: while True: error = stream.connect() if error != errno.EAGAIN: break stream.run() poller = ovs.poller.Poller() stream.run_wait(poller) stream.connect_wait(poller) poller.block() assert error != errno.EINPROGRESS if error and stream: stream.close() stream = None return error, stream def close(self): self.socket.close() def __scs_connecting(self): retval = ovs.socket_util.check_connection_completion(self.socket) assert retval != errno.EINPROGRESS if retval == 0: self.state = Stream.__S_CONNECTED elif retval != errno.EAGAIN: self.state = Stream.__S_DISCONNECTED self.error = retval def connect(self): if self.state == Stream.__S_CONNECTING: self.__scs_connecting() if self.state == Stream.__S_CONNECTING: return errno.EAGAIN elif self.state == Stream.__S_CONNECTED: return 0 else: assert self.state == Stream.__S_DISCONNECTED return self.error def recv(self, n): retval = self.connect() if retval != 0: return (retval, "") elif n == 0: return (0, "") try: return (0, self.socket.recv(n)) except socket.error, e: return (ovs.socket_util.get_exception_errno(e), "")
Apache License 2.0
docnow/dnflow
summarize.py
url_filename
python
def url_filename(url, include_extension=True): parsed_url = urlparse(url) fname = parsed_url.path.split('/')[-1] if not include_extension: fname = fname.split('.')[0] return fname
Given a full URL, return just the filename after the last slash.
https://github.com/docnow/dnflow/blob/3e43cb4a0062af2cdaf3d4948725aaddfcbd84f8/summarize.py#L51-L57
import bisect from collections import Counter import csv import hashlib import json import logging import math import os import time import zipfile import tempfile from urllib.parse import urlparse import numpy as np import imagehash from jinja2 import Environment, PackageLoader import luigi from luigi.contrib import redis_store import networkx as nx from PIL import Image from flask.config import Config import requests import twarc import json2csv config = Config(os.path.dirname(__file__)) config.from_pyfile('dnflow.cfg') logging.getLogger().setLevel(logging.WARN) logging.getLogger('').setLevel(logging.WARN) logging.getLogger('luigi-interface').setLevel(logging.WARN) def time_hash(digits=6): hash = hashlib.sha1() hash.update(str(time.time()).encode()) t = time.localtime() dt = '%s%02d%02d%02d%02d' % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min) return '%s-%s' % (dt, hash.hexdigest()[:digits])
MIT License
fabiommendes/sidekick
sidekick-seq/sidekick/seq/lib_augmenting.py
interpose
python
def interpose(elem, seq: Seq) -> Iter: return Iter(toolz.interpose(elem, seq))
Introduce element between each pair of elements in seq. Examples: >>> sk.interpose("a", [1, 2, 3]) sk.iter([1, 'a', 2, 'a', 3])
https://github.com/fabiommendes/sidekick/blob/993ae7b8496347ad9720d3ff11e10ab946c3a800/sidekick-seq/sidekick/seq/lib_augmenting.py#L15-L23
import itertools from collections import deque from .iter import Iter, generator from .lib_basic import uncons from .. import _toolz as toolz from ..functions import fn from ..typing import Seq, TYPE_CHECKING, NOT_GIVEN, Func, T if TYPE_CHECKING: from .. import api as sk @fn.curry(2)
MIT License
markvdw/gpflow-inter-domain
GPflow/param.py
Parameterized._html_table_rows
python
def _html_table_rows(self, name_prefix=''): name_prefix += self.name + '.' return ''.join([p._html_table_rows(name_prefix) for p in self.sorted_params])
Get the rows of the html table for this object
https://github.com/markvdw/gpflow-inter-domain/blob/0cf621e1896a3e1996f863b586c6cd2f795dd9f0/GPflow/param.py#L917-L923
from __future__ import absolute_import from contextlib import contextmanager from functools import wraps import numpy as np import pandas as pd import tensorflow as tf from . import transforms, session from ._settings import settings from .scoping import NameScoped float_type = settings.dtypes.float_type np_float_type = np.float32 if float_type is tf.float32 else np.float64 recompile_keys = ['prior', 'transform', 'fixed'] class Parentable(object): def __init__(self): self._parent = None @property def highest_parent(self): if self._parent is None: return self else: return self._parent.highest_parent @property def name(self): if self._parent is None: return 'unnamed' if isinstance(self._parent, ParamList): return 'item%i' % self._parent._list.index(self) matches = [key for key, value in self._parent.__dict__.items() if value is self] if len(matches) == 0: raise ValueError("mis-specified parent. This Param's\ _parent does not contain a reference to it.") if len(matches) > 1: raise ValueError("This Param appears to be doubly\ referenced by a parent") return matches[0] @property def long_name(self): if self._parent is None: return self.name return self._parent.long_name + '.' + self.name def __getstate__(self): d = self.__dict__.copy() d.pop('_parent') return d def __setstate__(self, d): self.__dict__.update(d) self._parent = None class Param(Parentable): def __init__(self, array, transform=transforms.Identity()): Parentable.__init__(self) self._array = np.asarray(np.atleast_1d(array), dtype=np_float_type) self.transform = transform self._tf_array = None self._log_jacobian = None self.prior = None self.fixed = False @property def value(self): return self._array.copy() def get_parameter_dict(self, d): d[self.long_name] = self.value def set_parameter_dict(self, d): self._array[...] = d[self.long_name] def get_samples_df(self, samples): if self.fixed: return pd.Series([self.value for _ in range(samples.shape[0])], name=self.long_name) start, _ = self.highest_parent.get_param_index(self) end = start + self.size samples = samples[:, start:end] samples = samples.reshape((samples.shape[0],) + (self.transform.free_state_size(self.shape),)) samples = np.atleast_1d(np.concatenate( [self.transform.forward(s).reshape((1,) + self.shape) for s in samples], 0)) return pd.Series([v for v in samples], name=self.long_name) def make_tf_array(self, free_array): if self.fixed: self._tf_array = tf.placeholder(dtype=float_type, shape=self._array.shape, name=self.name) self._log_jacobian = 0.0 return 0 free_size = self.transform.free_state_size(self.shape) x_free = free_array[:free_size] mapped_array = self.transform.tf_forward(x_free) self._tf_array = tf.reshape(mapped_array, self.shape) self._log_jacobian = self.transform.tf_log_jacobian(x_free) return free_size def get_free_state(self): if self.fixed: return np.empty((0,), np_float_type) return self.transform.backward(self.value.flatten()) def get_feed_dict_keys(self): d = {} if self.fixed: d[self] = self._tf_array return d def update_feed_dict(self, key_dict, feed_dict): if self.fixed: feed_dict[key_dict[self]] = self.value def set_state(self, x): if self.fixed: return 0 free_size = self.transform.free_state_size(self.shape) new_array = self.transform.forward(x[:free_size]).reshape(self.shape) assert new_array.shape == self.shape self._array[...] = new_array return free_size def randomize(self, distributions={}, skipfixed=True): if not (skipfixed and self.fixed): if self in distributions.keys(): self._array = distributions[self].sample(self.shape) else: try: self._array = self.prior.sample(self.shape) except AttributeError: randn = np.random.randn( self.transform.free_state_size(self.shape)) self._array = self.transform.forward(randn).reshape(self.shape) def build_prior(self): if self.prior is None: return tf.constant(0.0, float_type) elif self._tf_array is None: raise ValueError("tensorflow array has not been initialized") else: return self.prior.logp(self._tf_array) + self._log_jacobian def __setattr__(self, key, value): object.__setattr__(self, key, value) if key in recompile_keys: self.highest_parent._needs_recompile = True def __str__(self, prepend=''): return prepend + '\033[1m' + self.name + '\033[0m' + ' transform:' + str(self.transform) + ' prior:' + str(self.prior) + (' [FIXED]' if self.fixed else '') + '\n' + str(self.value) @property def size(self): return self._array.size @property def shape(self): return self._array.shape def _html_table_rows(self, name_prefix=''): html = "<tr>" html += "<td>{0}</td>".format(name_prefix + self.name) html += "<td>{0}</td>".format(str(self._array).replace('\n', '</br>')) html += "<td>{0}</td>".format(str(self.prior)) html += "<td>{0}</td>".format('[FIXED]' if self.fixed else str(self.transform)) html += "</tr>" return html def __getstate__(self): d = Parentable.__getstate__(self) for key in ['_tf_array', '_log_jacobian']: d.pop(key, None) return d def __setstate__(self, d): Parentable.__setstate__(self, d) self._log_jacobian = None self.fixed = self.fixed class DataHolder(Parentable): def __init__(self, array, on_shape_change='raise'): Parentable.__init__(self) dt = self._get_type(array) self._array = np.asarray(array, dtype=dt) assert on_shape_change in ['raise', 'pass', 'recompile'] self.on_shape_change = on_shape_change def _get_type(self, array): if any([array.dtype == np.dtype(t) for t in [np.float32, np.float64]]): return np_float_type elif any([array.dtype == np.dtype(t) for t in [np.int16, np.int32, np.int64]]): return np.int32 else: raise NotImplementedError("unknown dtype") def get_feed_dict_keys(self): return {self: self._tf_array} def update_feed_dict(self, key_dict, feed_dict): feed_dict[key_dict[self]] = self._array def __getstate__(self): d = Parentable.__getstate__(self) try: d.pop('_tf_array') except KeyError: pass return d def make_tf_array(self): self._tf_array = tf.placeholder(dtype=self._get_type(self._array), shape=[None] * self._array.ndim, name=self.name) def set_data(self, array): if self.shape == array.shape: self._array[...] = array else: if self.on_shape_change == 'raise': raise ValueError("The shape of this data must not change. \ (perhaps make the model again from scratch?)") elif self.on_shape_change == 'recompile': self._array = array.copy() self.highest_parent._needs_recompile = True elif self.on_shape_change == 'pass': self._array = array.copy() else: raise ValueError('invalid option') @property def value(self): return self._array.copy() @property def size(self): return self._array.size @property def shape(self): return self._array.shape def __str__(self, prepend='Data:'): return prepend + '\033[1m' + self.name + '\033[0m' + '\n' + str(self.value) class AutoFlow: def __init__(self, *tf_arg_tuples): self.tf_arg_tuples = tf_arg_tuples def __call__(self, tf_method): @wraps(tf_method) def runnable(instance, *np_args): storage_name = '_' + tf_method.__name__ + '_AF_storage' if hasattr(instance, storage_name): storage = getattr(instance, storage_name) else: storage = {} setattr(instance, storage_name, storage) storage['graph'] = tf.Graph() storage['session'] = session.get_session( graph=storage['graph'], output_file_name=settings.profiling.output_file_name + "_" + tf_method.__name__, output_directory=settings.profiling.output_directory, each_time=settings.profiling.each_time ) with storage['graph'].as_default(): storage['tf_args'] = [tf.placeholder(*a) for a in self.tf_arg_tuples] storage['free_vars'] = tf.placeholder(float_type, [None]) instance.make_tf_array(storage['free_vars']) with instance.tf_mode(): storage['tf_result'] = tf_method(instance, *storage['tf_args']) storage['feed_dict_keys'] = instance.get_feed_dict_keys() feed_dict = {} instance.update_feed_dict(storage['feed_dict_keys'], feed_dict) storage['session'].run(tf.global_variables_initializer(), feed_dict=feed_dict) feed_dict = dict(zip(storage['tf_args'], np_args)) feed_dict[storage['free_vars']] = instance.get_free_state() instance.update_feed_dict(storage['feed_dict_keys'], feed_dict) return storage['session'].run(storage['tf_result'], feed_dict=feed_dict) return runnable class Parameterized(Parentable): def __init__(self): Parentable.__init__(self) self.scoped_keys = [] self._tf_mode = False def get_parameter_dict(self, d=None): if d is None: d = {} for p in self.sorted_params: p.get_parameter_dict(d) return d def set_parameter_dict(self, d): for p in self.sorted_params: p.set_parameter_dict(d) def get_samples_df(self, samples): d = pd.DataFrame() for p in self.sorted_params: d = pd.concat([d, p.get_samples_df(samples)], axis=1) return d def __getattribute__(self, key): o = object.__getattribute__(self, key) try: if not object.__getattribute__(self, '_tf_mode'): return o except AttributeError: return o if isinstance(o, (Param, DataHolder)): return o._tf_array elif key in object.__getattribute__(self, 'scoped_keys'): return NameScoped(self.long_name + '.' + key)(o) return o def __setattr__(self, key, value): if key in self.__dict__.keys(): p = getattr(self, key) if isinstance(p, Param) and isinstance(value, (np.ndarray, float, int)): p._array[...] = value return if isinstance(p, (Param, Parameterized)) and isinstance(value, (Param, Parameterized)): p._parent = None if hasattr(self.highest_parent, '_needs_recompile'): self.highest_parent._needs_recompile = True if isinstance(p, DataHolder) and isinstance(value, np.ndarray): p.set_data(value) return if key is not '_parent' and isinstance(value, (Param, Parameterized)): if not hasattr(self, key) or not self.__getattribute__(key) is value: def _raise_for_existing_param(node): if node is value: raise ValueError('The Param(eterized) object {0} is already present in the tree'.format(value)) if isinstance(node, Parameterized): for child in node.sorted_params: _raise_for_existing_param(child) root = self.highest_parent _raise_for_existing_param(root) object.__setattr__(self, key, value) if isinstance(value, Parentable) and key is not '_parent': value._parent = self if key == '_needs_recompile': self._kill_autoflow() def _kill_autoflow(self): for key in list(self.__dict__.keys()): if key[0] == '_' and key[-11:] == '_AF_storage': if 'session' in getattr(self, key): getattr(self, key)['session'].close() delattr(self, key) [p._kill_autoflow() for p in self.sorted_params if isinstance(p, Parameterized)] def __getstate__(self): d = Parentable.__getstate__(self) for key in list(d.keys()): if key[0] == '_' and key[-11:] == '_AF_storage': d.pop(key) return d def make_tf_array(self, X): count = 0 for dh in self.data_holders: dh.make_tf_array() for p in self.sorted_params: count += p.make_tf_array(X[count:]) return count def get_param_index(self, param_to_index): found = False count = 0 for p in self.sorted_params: if isinstance(p, Param): if p is param_to_index: found = True break else: count += p.get_free_state().size elif isinstance(p, Parameterized): extra, found = p.get_param_index(param_to_index) count += extra if found: break return count, found @property def sorted_params(self): params = [child for key, child in self.__dict__.items() if isinstance(child, (Param, Parameterized)) and key is not '_parent'] return sorted(params, key=lambda x: x.long_name) @property def data_holders(self): return [child for key, child in self.__dict__.items() if isinstance(child, DataHolder)] @property def fixed(self): return all(p.fixed for p in self.sorted_params) @fixed.setter def fixed(self, val): for p in self.sorted_params: p.fixed = val def get_free_state(self): return np.hstack([p.get_free_state() for p in self.sorted_params] + [np.empty(0, np_float_type)]) def get_feed_dict_keys(self): d = {} for p in self.sorted_params + self.data_holders: d.update(p.get_feed_dict_keys()) return d def update_feed_dict(self, key_dict, feed_dict): for p in self.sorted_params + self.data_holders: p.update_feed_dict(key_dict, feed_dict) return feed_dict def set_state(self, x): count = 0 for p in self.sorted_params: count += p.set_state(x[count:]) return count @contextmanager def tf_mode(self): self._begin_tf_mode() yield self._end_tf_mode() def _begin_tf_mode(self): [child._begin_tf_mode() for child in self.sorted_params if isinstance(child, Parameterized)] self._tf_mode = True def _end_tf_mode(self): [child._end_tf_mode() for child in self.sorted_params if isinstance(child, Parameterized)] self._tf_mode = False def randomize(self, distributions={}, skipfixed=True): for param in self.sorted_params: param.randomize(distributions, skipfixed) def build_prior(self): return sum([p.build_prior() for p in self.sorted_params]) def __str__(self, prepend=''): prepend += self.name + '.' return '\n'.join([p.__str__(prepend) for p in self.sorted_params])
Apache License 2.0
afedynitch/mceq
MCEq/data.py
Decays.get_matrix
python
def get_matrix(self, parent, child): info(20, 'entering with', parent, child) if child not in self.relations[parent]: raise Exception(("trying to get empty matrix {0} -> {1}").format( parent, child)) return self.index_d[(parent, child)]
Returns a ``DIM x DIM`` decay matrix. Args: parent (int): PDG ID of parent particle child (int): PDG ID of final state child particle Returns: numpy.array: decay matrix
https://github.com/afedynitch/mceq/blob/23108b123bb5173a08761add0e95678e93776ba9/MCEq/data.py#L816-L830
import six import numpy as np import h5py from collections import defaultdict import mceq_config as config from os.path import join, isfile from .misc import normalize_hadronic_model_name, info equivalences = { 'SIBYLL23': { -4132: 4122, -4122: 4122, -3334: -3312, -3322: -2112, -3212: -3122, -413: -411, 113: 211, 221: 211, 111: 211, 310: 130, 413: 411, 3212: 3122, 3334: 3312 }, 'SIBYLL21': { -3322: 2112, -3312: 2212, -3222: 2212, -3212: 2112, -3122: 2112, -3112: 2212, -2212: 2212, -2112: 2112, 310: 130, 111: 211, 3112: 2212, 3122: 2112, 3212: 2112, 3222: 2212, 3312: 2212, 3322: 2112 }, 'QGSJET01': { -4122: 2212, -3322: 2212, -3312: 2212, -3222: 2212, -3122: 2212, -3112: 2212, -2212: 2212, -2112: 2212, -421: 321, -411: 321, -211: 211, -321: 321, 111: 211, 221: 211, 130: 321, 310: 321, 411: 321, 421: 321, 2112: 2212, 3112: 2212, 3122: 2212, 3222: 2212, 3312: 2212, 3322: 2212, 4122: 2212 }, 'QGSJETII': { -3122: -2112, 111: 211, 113: 211, 221: 211, 310: 130, 3122: 2112, }, 'DPMJET': { -4122: -3222, -3334: -3312, -3212: -3122, -431: -321, -421: -321, -413: -321, -411: -321, 310: 130, 113: 211, 221: 211, 111: 211, 411: 321, 413: 321, 421: 321, 431: 321, 3212: 3122, 3334: 3312, 4122: 3222, }, 'EPOSLHC': { -3334: 2212, -3322: -3122, -3312: 2212, -3222: -2212, -3212: -3122, -3112: 2212, 111: 211, 113: 211, 221: 211, 310: 130, 3112: -2212, 3212: 3122, 3222: 2212, 3312: -2212, 3322: 3122, 3334: -2212 }, 'PYTHIA8': { -3122: -2112, -431: -321, -421: -321, -413: -321, -411: -321, 111: 211, 113: 211, 221: 211, 310: 321, 130: 321, 411: 321, 413: 321, 421: 321, 431: 321, 3122: 2112, } } class HDF5Backend(object): def __init__(self): info(2, 'Opening HDF5 file', config.mceq_db_fname) self.had_fname = join(config.data_dir, config.mceq_db_fname) if not isfile(self.had_fname): raise Exception( 'MCEq DB file {0} not found in "data" directory.'.format( config.mceq_db_fname)) self.em_fname = join(config.data_dir, config.em_db_fname) if config.enable_em and not isfile(self.had_fname): raise Exception( 'Electromagnetic DB file {0} not found in "data" directory.'. format(config.em_db_fname)) with h5py.File(self.had_fname, 'r') as mceq_db: from MCEq.misc import energy_grid ca = mceq_db['common'].attrs self.version = (mceq_db.attrs['version'] if 'version' in mceq_db.attrs else '1.0.0') self.min_idx, self.max_idx, self._cuts = self._eval_energy_cuts( ca['e_grid']) self._energy_grid = energy_grid( ca['e_grid'][self._cuts], ca['e_bins'][self.min_idx:self.max_idx + 1], ca['widths'][self._cuts], self.max_idx - self.min_idx) self.dim_full = ca['e_dim'] @property def energy_grid(self): return self._energy_grid def _eval_energy_cuts(self, e_centers): min_idx, max_idx = 0, len(e_centers) slice0, slice1 = None, None if config.e_min is not None: min_idx = slice0 = np.argmin(np.abs(e_centers - config.e_min)) if config.e_max is not None: max_idx = slice1 = np.argmin( np.abs(e_centers - config.e_max)) + 1 return min_idx, max_idx, slice(slice0, slice1) def _gen_db_dictionary(self, hdf_root, indptrs, equivalences={}): from scipy.sparse import csr_matrix index_d = {} relations = defaultdict(lambda: []) particle_list = [] if 'description' in hdf_root.attrs: description = hdf_root.attrs['description'] else: description = None mat_data = hdf_root[:, :] indptr_data = indptrs[:] len_data = hdf_root.attrs['len_data'] if hdf_root.attrs['tuple_idcs'].shape[1] == 4: model_particles = sorted( list( set(hdf_root.attrs['tuple_idcs'][:, (0, 2)].flatten().tolist()))) else: model_particles = sorted( list(set(hdf_root.attrs['tuple_idcs'].flatten().tolist()))) exclude = config.adv_set["disabled_particles"] read_idx = 0 available_parents = [ (pdg, parity) for (pdg, parity) in (hdf_root.attrs['tuple_idcs'][:, :2]) ] available_parents = sorted(list(set(available_parents))) eqv_lookup = defaultdict(lambda: []) for k in equivalences: eqv_lookup[(equivalences[k], 0)].append((k, 0)) for tupidx, tup in enumerate(hdf_root.attrs['tuple_idcs']): if len(tup) == 4: parent_pdg, child_pdg = tuple(tup[:2]), tuple(tup[2:]) elif len(tup) == 2: parent_pdg, child_pdg = (tup[0], 0), (tup[1], 0) else: raise Exception('Failed decoding parent-child relation.') if (abs(parent_pdg[0]) in exclude) or (abs( child_pdg[0]) in exclude): read_idx += len_data[tupidx] continue parent_pdg = int(parent_pdg[0]), (parent_pdg[1]) child_pdg = int(child_pdg[0]), (child_pdg[1]) particle_list.append(parent_pdg) particle_list.append(child_pdg) index_d[(parent_pdg, child_pdg)] = (csr_matrix( (mat_data[0, read_idx:read_idx + len_data[tupidx]], mat_data[1, read_idx:read_idx + len_data[tupidx]], indptr_data[tupidx, :]), shape=(self.dim_full, self.dim_full ))[self._cuts, self.min_idx:self.max_idx]).toarray() relations[parent_pdg].append(child_pdg) info(20, 'This parent {0} is used for interactions of'.format( parent_pdg[0]), [p[0] for p in eqv_lookup[parent_pdg]], condition=len(equivalences) > 0) if config.assume_nucleon_interactions_for_exotics: for eqv_parent in eqv_lookup[parent_pdg]: if eqv_parent[0] not in model_particles: info(10, 'No equiv. replacement needed of', eqv_parent, 'for', parent_pdg, 'parent.') continue elif eqv_parent in available_parents: info( 10, 'Parent {0} has dedicated simulation.'.format( eqv_parent[0])) continue particle_list.append(eqv_parent) index_d[(eqv_parent, child_pdg)] = index_d[(parent_pdg, child_pdg)] relations[eqv_parent] = relations[parent_pdg] info( 15, 'equivalence of {0} and {1} interactions'.format( eqv_parent[0], parent_pdg[0])) read_idx += len_data[tupidx] return { 'parents': sorted(list(relations)), 'particles': sorted(list(set(particle_list))), 'relations': dict(relations), 'index_d': dict(index_d), 'description': description } def _check_subgroup_exists(self, subgroup, mname): available_models = list(subgroup) if mname not in available_models: info(0, 'Invalid choice/model', mname) info(0, 'Choose from:\n', '\n'.join(available_models)) raise Exception('Unknown selections.') def interaction_db(self, interaction_model_name): mname = normalize_hadronic_model_name(interaction_model_name) info(10, 'Generating interaction db. mname={0}'.format(mname)) with h5py.File(self.had_fname, 'r') as mceq_db: self._check_subgroup_exists(mceq_db['hadronic_interactions'], mname) if 'SIBYLL21' in mname: eqv = equivalences['SIBYLL21'] elif 'SIBYLL23' in mname: eqv = equivalences['SIBYLL23'] elif 'QGSJET01' in mname: eqv = equivalences['QGSJET01'] elif 'QGSJETII' in mname: eqv = equivalences['QGSJETII'] elif 'DPMJET' in mname: eqv = equivalences['DPMJET'] elif 'EPOSLHC' in mname: eqv = equivalences['EPOSLHC'] elif 'PYTHIA8' in mname: eqv = equivalences['PYTHIA8'] int_index = self._gen_db_dictionary( mceq_db['hadronic_interactions'][mname], mceq_db['hadronic_interactions'][mname + '_indptrs'], equivalences=eqv) if config.enable_em: with h5py.File(self.em_fname, 'r') as em_db: info(2, 'Injecting EmCA matrices into interaction_db.') self._check_subgroup_exists(em_db, 'electromagnetic') em_index = self._gen_db_dictionary( em_db['electromagnetic']['emca_mats'], em_db['electromagnetic']['emca_mats' + '_indptrs']) int_index['parents'] = sorted(int_index['parents'] + em_index['parents']) int_index['particles'] = sorted( list(set(int_index['particles'] + em_index['particles']))) int_index['relations'].update(em_index['relations']) int_index['index_d'].update(em_index['index_d']) if int_index['description'] is not None: int_index['description'] += '\nInteraction model name: ' + mname else: int_index['description'] = 'Interaction model name: ' + mname return int_index def decay_db(self, decay_dset_name): info(10, 'Generating decay db. dset_name={0}'.format(decay_dset_name)) with h5py.File(self.had_fname, 'r') as mceq_db: self._check_subgroup_exists(mceq_db['decays'], decay_dset_name) dec_index = self._gen_db_dictionary( mceq_db['decays'][decay_dset_name], mceq_db['decays'][decay_dset_name + '_indptrs']) if config.muon_helicity_dependence: info(2, 'Using helicity dependent decays.') custom_index = self._gen_db_dictionary( mceq_db['decays']['custom_decays'], mceq_db['decays']['custom_decays' + '_indptrs']) info(5, 'Replacing decay from custom decay_db.') dec_index['index_d'].update(custom_index['index_d']) _ = dec_index['index_d'].pop(((211, 0), (-13, 0))) _ = dec_index['index_d'].pop(((-211, 0), (13, 0))) _ = dec_index['index_d'].pop(((321, 0), (-13, 0))) _ = dec_index['index_d'].pop(((-321, 0), (13, 0))) dec_index['relations'] = defaultdict(lambda: []) dec_index['particles'] = [] for idx_tup in dec_index['index_d']: parent, child = idx_tup dec_index['relations'][parent].append(child) dec_index['particles'].append(parent) dec_index['particles'].append(child) dec_index['parents'] = sorted(list(dec_index['relations'])) dec_index['particles'] = sorted( list(set(dec_index['particles']))) return dec_index def cs_db(self, interaction_model_name): mname = normalize_hadronic_model_name(interaction_model_name) with h5py.File(self.had_fname, 'r') as mceq_db: self._check_subgroup_exists(mceq_db['cross_sections'], mname) cs_db = mceq_db['cross_sections'][mname] cs_data = cs_db[:] index_d = {} parents = list(cs_db.attrs['projectiles']) for ip, p in enumerate(parents): index_d[p] = cs_data[self._cuts, ip] if config.enable_em: with h5py.File(self.em_fname, 'r') as em_db: info(2, 'Injecting EmCA matrices into interaction_db.') self._check_subgroup_exists(em_db, 'electromagnetic') em_cs = em_db["electromagnetic"]['cs'][:] em_parents = list( em_db["electromagnetic"]['cs'].attrs['projectiles']) for ip, p in enumerate(em_parents): if p in index_d: raise Exception( 'EM cross sections already in database?') index_d[p] = em_cs[ip, self._cuts] parents += em_parents return {'parents': parents, 'index_d': index_d} def continuous_loss_db(self, medium='air'): with h5py.File(self.had_fname, 'r') as mceq_db: self._check_subgroup_exists(mceq_db['continuous_losses'], medium) cl_db = mceq_db['continuous_losses'][medium] index_d = {} for pstr in list(cl_db): for hel in [0, 1, -1]: index_d[(int(pstr), hel)] = cl_db[pstr][self._cuts] if config.enable_em: with h5py.File(self.em_fname, 'r') as em_db: info(2, 'Injecting EmCA matrices into interaction_db.') self._check_subgroup_exists(em_db, 'electromagnetic') for hel in [0, 1, -1]: index_d[(11, hel)] = em_db["electromagnetic"]['dEdX 11'][ self._cuts] index_d[(-11, hel)] = em_db["electromagnetic"]['dEdX -11'][ self._cuts] return {'parents': sorted(list(index_d)), 'index_d': index_d} class Interactions(object): def __init__(self, mceq_hdf_db): from collections import defaultdict self.mceq_db = mceq_hdf_db self.energy_grid = mceq_hdf_db.energy_grid self.parents = None self.particles = None self.relations = None self.index_d = None self.description = None self.iam = None self.mod_pprod = defaultdict(lambda: {}) def load(self, interaction_model, parent_list=None): from MCEq.misc import is_charm_pdgid self.iam = normalize_hadronic_model_name(interaction_model) index = self.mceq_db.interaction_db(self.iam) disabled_particles = config.adv_set['disabled_particles'] self.parents = [p for p in index['parents'] if p[0] not in disabled_particles] self.relations = index['relations'] self.index_d = index['index_d'] self.description = index['description'] if parent_list is not None: self.parents = [p for p in self.parents if p in parent_list and p[0] not in disabled_particles] if (config.adv_set['disable_charm_pprod']): self.parents = [ p for p in self.parents if not is_charm_pdgid(p[0]) ] if (config.adv_set['disable_interactions_of_unstable']): self.parents = [ p for p in self.parents if p[0] not in [2212, 2112, -2212, -2112] ] if (config.adv_set['allowed_projectiles']): self.parents = [ p for p in self.parents if p[0] in config.adv_set['allowed_projectiles'] ] self.particles = [] for p in list(self.relations): if p not in self.parents: _ = self.relations.pop(p, None) continue self.particles.append(p) self.particles += [d for d in self.relations[p] if d not in disabled_particles] self.particles = sorted(list(set(self.particles))) if config.adv_set['disable_direct_leptons']: for p in list(self.relations): self.relations[p] = [ c for c in self.relations[p] if not 10 < abs(c[0]) < 20 ] if len(disabled_particles) > 0: for p in list(self.relations): self.relations[p] = [ c for c in self.relations[p] if c[0] not in disabled_particles ] if not self.particles: info(2, 'None of the parent_list particles interact. Returning custom list.') self.particles = parent_list def __getitem__(self, key): return self.get_matrix(*key) def __contains__(self, key): return key in self.parents def _gen_mod_matrix(self, x_func, *args): from MCEq.misc import gen_xmat info(2, 'Generating modification matrix for', x_func.__name__, args) xmat = gen_xmat(self.energy_grid) modmat = x_func(xmat, self.energy_grid.c, *args) modmat[np.tril_indices(self.energy_grid.d, -1)] = 0. return modmat def _set_mod_pprod(self, prim_pdg, sec_pdg, x_func, args): mpli = self.mod_pprod pstup = (prim_pdg, sec_pdg) if config.use_isospin_sym and prim_pdg not in [2212, 2112]: raise Exception('Unsupported primary for isospin symmetries.') if (x_func.__name__, args) in mpli[(pstup)]: info( 5, ' no changes to particle production' + ' modification matrix of {0}/{1} for {2},{3}'.format( prim_pdg, sec_pdg, x_func.__name__, args)) return False for (xf_name, fargs) in list(mpli[pstup]): if (xf_name == x_func.__name__) and (fargs[0] == args[0]): info(1, 'Warning. If you modify only the value of a function,', 'unset and re-apply all changes') return False info( 2, 'modifying modify particle production' + ' matrix of {0}/{1} for {2},{3}'.format(prim_pdg, sec_pdg, x_func.__name__, args)) kmat = self._gen_mod_matrix(x_func, *args) mpli[pstup][(x_func.__name__, args)] = kmat info(5, 'modification "strength"', np.sum(kmat) / np.count_nonzero(kmat)) if not config.use_isospin_sym: return True prim_pdg, symm_pdg = 2212, 2112 if prim_pdg == 2112: prim_pdg = 2112 symm_pdg = 2212 if abs(sec_pdg) == 211: mpli[(symm_pdg, -sec_pdg)][('isospin', args)] = kmat if np.any([p in self.parents for p in [221, 223, 333]]): unflv_arg = None if (prim_pdg, -sec_pdg) not in mpli: unflv_arg = (args[0], 0.5 * args[1]) if (prim_pdg, -sec_pdg) in mpli: for arg_name, arg_val in mpli[(prim_pdg, -sec_pdg)]: if arg_name == args[0]: unflv_arg = (args[0], 0.5 * (args[1] + arg_val)) unflmat = self._gen_mod_matrix(x_func, *unflv_arg) for t in [(prim_pdg, 221), (prim_pdg, 223), (prim_pdg, 333), (symm_pdg, 221), (symm_pdg, 223), (symm_pdg, 333)]: mpli[t][('isospin', unflv_arg)] = unflmat elif abs(sec_pdg) == 321: mpli[(symm_pdg, sec_pdg)][('isospin', args)] = kmat k0_arg = (args[0], 0.5 * args[1]) if (prim_pdg, -sec_pdg) in mpli: for arg_name, arg_val in mpli[(prim_pdg, -sec_pdg)]: if arg_name == args[0]: k0_arg = (args[0], 0.5 * (args[1] + arg_val)) k0mat = self._gen_mod_matrix(x_func, *k0_arg) for t in [(prim_pdg, 310), (prim_pdg, 130), (symm_pdg, 310), (symm_pdg, 130)]: mpli[t][('isospin', k0_arg)] = k0mat elif abs(sec_pdg) == 411: ssec = np.sign(sec_pdg) mpli[(prim_pdg, ssec * 421)][('isospin', args)] = kmat mpli[(prim_pdg, ssec * 431)][('isospin', args)] = kmat mpli[(symm_pdg, sec_pdg)][('isospin', args)] = kmat mpli[(symm_pdg, ssec * 421)][('isospin', args)] = kmat mpli[(symm_pdg, ssec * 431)][('isospin', args)] = kmat elif abs(sec_pdg) == prim_pdg: mpli[(symm_pdg, symm_pdg)][('isospin', args)] = kmat elif abs(sec_pdg) == symm_pdg: mpli[(symm_pdg, prim_pdg)][('isospin', args)] = kmat else: raise Exception('No isospin relation found for secondary' + str(sec_pdg)) return True def print_mod_pprod(self): for i, (prim_pdg, sec_pdg) in enumerate(sorted(self.mod_pprod)): for j, (argname, argv) in enumerate(self.mod_pprod[(prim_pdg, sec_pdg)]): info(2, '{0}: {1} -> {2}, func: {3}, arg: {4}'.format( i + j, prim_pdg, sec_pdg, argname, argv), no_caller=True) def get_matrix(self, parent, child): info(10, 'Called for', parent, child) if child not in self.relations[parent]: raise Exception(("trying to get empty matrix {0} -> {1}").format( parent, child)) m = self.index_d[(parent, child)] if config.adv_set['disable_leading_mesons'] and abs(child) < 2000 and (parent, -child) in list(self.index_d): m_anti = self.index_d[(parent, -child)] ie = 50 info(5, 'sum in disable_leading_mesons', (np.sum(m[:, ie - 30:ie]) - np.sum(m_anti[:, ie - 30:ie]))) if (np.sum(m[:, ie - 30:ie]) - np.sum(m_anti[:, ie - 30:ie])) > 0: info(5, 'inverting meson due to leading particle veto.', child, '->', -child) m = m_anti else: info(5, 'no inversion since child not leading', child) else: info(20, 'no meson inversion in leading particle veto.', parent, child) if (parent[0], child[0]) in list(self.mod_pprod): info( 5, 'using modified particle production for {0}/{1}'.format( parent[0], child[0])) i = 0 m = np.copy(m) for args, mmat in six.iteritems(self.mod_pprod[(parent[0], child[0])]): info(10, i, (parent[0], child[0]), args, np.sum(mmat), np.sum(m)) i += 1 m *= mmat return m class Decays(object): def __init__(self, mceq_hdf_db, default_decay_dset='full_decays'): self.mceq_db = mceq_hdf_db self.parent_list = [] self._default_decay_dset = default_decay_dset def load(self, parent_list=None, decay_dset=None): if decay_dset is None: decay_dset = self._default_decay_dset index = self.mceq_db.decay_db(decay_dset) self.parents = index['parents'] self.particles = index['particles'] self.relations = index['relations'] self.index_d = index['index_d'] self.description = index['description'] regenerate_index = False if (parent_list): def _follow_decay_chain(p, plist): if p in self.relations: plist.append(p) for d in self.relations[p]: _follow_decay_chain(d, plist) else: return plist plist = [] for p in parent_list: _follow_decay_chain(p, plist) self.parents = sorted(list(set(plist))) regenerate_index = True if regenerate_index: self.particles = [] for p in list(self.relations): if p not in self.parents: _ = self.relations.pop(p, None) continue self.particles.append(p) self.particles += self.relations[p] self.particles = sorted(list(set(self.particles))) def __getitem__(self, key): return self.get_matrix(*key) def __contains__(self, key): return key in self.parents def children(self, parent_pdg): if parent_pdg not in self.relations: raise Exception( 'Parent {0} not in decay database.'.format(parent_pdg)) return self.relations[parent_pdg]
BSD 3-Clause New or Revised License
l0sg/grouped-ssd-pytorch
lib/utils.py
read_liver_seg_masks_raw
python
def read_liver_seg_masks_raw(masks_dirname, img_shape): label_volume = None rawfile = np.fromfile(masks_dirname, dtype='uint8', sep="") shape_raw = np.array(img_shape) order = [2, 0, 1] shape_raw = shape_raw[order] num_slice = rawfile.shape[0] / shape_raw[1] / shape_raw[2] print(os.path.basename(masks_dirname) + ' slices raw vs dicom: ' + str(num_slice) + ' '+ str(shape_raw[0])) label_volume = rawfile.reshape(shape_raw) label_volume = label_volume.transpose([1, 2, 0]) label_volume = np.flipud(label_volume) return label_volume
read 3d liver segmentation raw file 0's for background pixels, 1's for liver pixels :param masks_dirname: :return:
https://github.com/l0sg/grouped-ssd-pytorch/blob/27d27e63770f1bf922ccf9df1c80d46ad3bc0eab/lib/utils.py#L43-L67
import pydicom as dicom import os import numpy as np import glob import natsort import scipy import scipy.misc import unittest from PIL import Image IMG_DTYPE = np.float32 SEG_DTYPE = np.uint8 def read_dicom_series(directory, filepattern="P_*"): if not os.path.exists(directory) or not os.path.isdir(directory): raise ValueError("Given directory does not exist or is a file : " + str(directory)) lstFilesDCM = natsort.natsorted(glob.glob(os.path.join(directory, filepattern))) RefDs = dicom.read_file(lstFilesDCM[0]) ConstPixelDims = (int(RefDs.Rows), int(RefDs.Columns), len(lstFilesDCM)) ArrayDicom = np.zeros(ConstPixelDims, dtype=RefDs.pixel_array.dtype) for filenameDCM in lstFilesDCM: ds = dicom.read_file(filenameDCM) ArrayDicom[:, :, lstFilesDCM.index(filenameDCM)] = ds.pixel_array return ArrayDicom
MIT License
twidi/py-dataql
dataql/resources.py
Filter.__repr__
python
def __repr__(self): result = '.%(name)s' + ('' if self.args is None else '(%(args)s)') return result % { 'name': self.name, 'args': ', '.join(map(str, self.args)) if self.args else '', }
String representation of a ``Filter`` instance. Returns ------- str The string representation of the current ``Filter`` instance. Example ------- >>> Filter('foo') .foo >>> Filter('foo', args=[ ... PosArg(1), ... NamedArg('a', '=', 2) ... ]) .foo(1, a=2)
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/resources.py#L328-L353
__all__ = ('Field', 'List', 'Object', 'Filter', 'NamedArg', 'PosArg') from abc import ABCMeta class WithParent(metaclass=ABCMeta): __slots__ = ( 'parent', ) def __init__(self): self.parent = None def set_parent(self, parent): self.parent = parent def get_level(self): if self.parent: return self.parent.get_level() + 1 return 0 class Resource(WithParent, metaclass=ABCMeta): __slots__ = WithParent.__slots__ + ( 'name', 'is_root', 'filters', ) def __init__(self, name, filters=None, is_root=False): super().__init__() self.name = name self.is_root = is_root self.filters = filters or [] if not self.filters and self.name: self.filters = [Filter(name=self.name)] for filter_ in self.filters: filter_.set_parent(self) def __repr__(self): filters = '' if len(self.filters) > 1 or self.filters and ( self.filters[0].name != self.name or self.filters[0].args): filters = ' ' + ''.join(map(str, self.filters)) result = '%(indent)s<%(cls)s%(name)s%(filters)s />' % { 'cls': self.__class__.__name__, 'name': ('[%s]' % self.name) if self.name else '', 'filters': filters, 'indent': ' ' * self.get_level(), } return result class Field(Resource): pass class MultiResources(Resource, metaclass=ABCMeta): def __init__(self, name, filters=None, is_root=False, resources=None): super().__init__(name, filters, is_root) self.resources = resources or [] for resource in self.resources: resource.set_parent(self) def __repr__(self): parent_repr = super().__repr__() if self.resources: return ( '%(start)s\n' '%(sub)s\n' '%(indent)s</%(cls)s%(name)s>' ) % { 'start': parent_repr[:-3] + '>', 'sub': '\n'.join(map(str, self.resources)), 'cls': self.__class__.__name__, 'name': ('[%s]' % self.name) if self.name else '', 'indent': ' ' * self.get_level(), } else: return parent_repr class List(MultiResources): pass class Object(MultiResources): pass class BaseFilter(WithParent, metaclass=ABCMeta): class Filter(BaseFilter): __slots__ = BaseFilter.__slots__ + ( 'name', 'args', ) def __init__(self, name, args=None): super().__init__() self.name = name self.args = args if self.args: for arg in self.args: arg.set_parent(self)
BSD 2-Clause Simplified License
joshuaavalon/avalonxmlagent.bundle
Contents/Libraries/Shared/mutagen/oggflac.py
OggFLACVComment._inject
python
def _inject(self, fileobj): fileobj.seek(0) page = OggPage(fileobj) while not page.packets[0].startswith(b"\x7FFLAC"): page = OggPage(fileobj) first_page = page while not (page.sequence == 1 and page.serial == first_page.serial): page = OggPage(fileobj) old_pages = [page] while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1): page = OggPage(fileobj) if page.serial == first_page.serial: old_pages.append(page) packets = OggPage.to_packets(old_pages, strict=False) data = self.write() data = packets[0][:1] + struct.pack(">I", len(data))[-3:] + data packets[0] = data new_pages = OggPage.from_packets(packets, old_pages[0].sequence) OggPage.replace(fileobj, old_pages, new_pages)
Write tag data into the FLAC Vorbis comment packet/page.
https://github.com/joshuaavalon/avalonxmlagent.bundle/blob/41b4f1eca7d3db7718372d11b47c795b413c06cc/Contents/Libraries/Shared/mutagen/oggflac.py#L97-L125
__all__ = ["OggFLAC", "Open", "delete"] import struct from ._compat import cBytesIO from mutagen import flac from mutagen.flac import VCFLACDict, StrictFileObject from mutagen.ogg import OggPage, OggFileType, error as OggError class error(OggError): pass class OggFLACHeaderError(error): pass class OggFLACStreamInfo(flac.StreamInfo): packets = 0 serial = 0 def load(self, data): if isinstance(data, StrictFileObject): data = data._fileobj page = OggPage(data) while not page.packets[0].startswith(b"\x7FFLAC"): page = OggPage(data) major, minor, self.packets, flac = struct.unpack( ">BBH4s", page.packets[0][5:13]) if flac != b"fLaC": raise OggFLACHeaderError("invalid FLAC marker (%r)" % flac) elif (major, minor) != (1, 0): raise OggFLACHeaderError( "unknown mapping version: %d.%d" % (major, minor)) self.serial = page.serial stringobj = StrictFileObject(cBytesIO(page.packets[0][17:])) super(OggFLACStreamInfo, self).load(stringobj) def _post_tags(self, fileobj): if self.length: return page = OggPage.find_last(fileobj, self.serial) self.length = page.position / float(self.sample_rate) def pprint(self): return u"Ogg " + super(OggFLACStreamInfo, self).pprint() class OggFLACVComment(VCFLACDict): def load(self, data, info, errors='replace'): pages = [] complete = False while not complete: page = OggPage(data) if page.serial == info.serial: pages.append(page) complete = page.complete or (len(page.packets) > 1) comment = cBytesIO(OggPage.to_packets(pages)[0][4:]) super(OggFLACVComment, self).load(comment, errors=errors)
MIT License
sassoftware/python-sasctl
src/sasctl/core.py
Session.as_swat
python
def as_swat(self, server=None, **kwargs): server = server or 'cas-shared-default' if swat is None: raise RuntimeError( "The 'swat' package must be installed to create a SWAT connection." ) url = '{}://{}/{}-http/'.format( self._settings['protocol'], self.hostname, server ) kwargs.setdefault('hostname', url) if version.parse(swat.__version__) >= version.parse('1.8'): kwargs['username'] = None kwargs['password'] = self.auth.access_token else: kwargs.setdefault('username', self.username) kwargs.setdefault('password', self._settings['password']) orig_sslreqcert = os.environ.get('SSLREQCERT') if not self.verify: os.environ['SSLREQCERT'] = 'no' try: cas = swat.CAS(**kwargs) cas.setsessopt(messagelevel='warning') finally: if orig_sslreqcert: os.environ['SSLREQCERT'] = orig_sslreqcert return cas
Create a SWAT connection to a CAS server. Uses the authentication information from the session to establish a CAS connection using SWAT. Parameters ---------- server : str, optional The logical name of the CAS server, not the hostname. Defaults to "cas-shared-default". kwargs : any Additional arguments to pass to the `swat.CAS` constructor. Can be used to override this method's default behavior or customize the CAS session. Returns ------- swat.CAS An active SWAT connection Raises ------ RuntimeError If `swat` package is not available. Examples -------- >>> sess = Session('example.sas.com') >>> with sess.as_swat() as conn: ... conn.listnodes() CASResults([('nodelist', Node List name role connected IP Address 0 example.sas.com controller Yes 127.0.0.1)])
https://github.com/sassoftware/python-sasctl/blob/ab6387b86a26f6b0b08fbb36d0c94fe18be59b5f/src/sasctl/core.py#L462-L534
import concurrent.futures import copy import logging import json import netrc import os import re import ssl import sys import warnings from datetime import datetime, timedelta from uuid import UUID, uuid4 import requests import requests.exceptions import yaml from packaging import version from requests.adapters import HTTPAdapter from urllib.parse import urlsplit, urlunsplit from urllib.error import HTTPError try: import swat except ImportError: swat = None try: import kerberos except ImportError: try: import winkerberos as kerberos except ImportError: kerberos = None from .utils.cli import sasctl_command from .utils.misc import versionadded from . import exceptions logger = logging.getLogger(__name__) _session = None def _pformat(text): from pprint import pformat try: return pformat(json.loads(text)) except (TypeError, UnicodeDecodeError, ValueError): try: return pformat(text) except UnicodeDecodeError: return text def _redact(pattern, repl, string): is_bytes = isinstance(string, bytes) try: string = string.decode('utf-8') if is_bytes else string string = re.sub(pattern, repl, string) string = string.encode('utf-8') if is_bytes else string except UnicodeDecodeError: pass return string def _filter_password(r): if hasattr(r, 'body') and r.body is not None: r.body = _redact(r"(?<=&password=)([^&]*)\b", '*****', r.body) r.body = _redact('(?<=client_secret": ")[^"]*', '*****', r.body) return r def _filter_token(r): if hasattr(r, 'headers') and 'Authorization' in r.headers: r.headers['Authorization'] = _redact( r'(?<=Bearer ).*', '[redacted]', r.headers['Authorization'] ) if hasattr(r, 'headers') and 'Authorization' in r.headers: r.headers['Authorization'] = _redact( r'(?<=Basic ).*', '[redacted]', r.headers['Authorization'] ) if hasattr(r, 'headers') and 'X-Consul-Token' in r.headers: r.headers['X-Consul-Token'] = '[redacted]' if hasattr(r, '_content'): r._content = _redact('(?<=access_token":")[^"]*', '[redacted]', r._content) return r DEFAULT_FILTERS = [_filter_password, _filter_token] def current_session(*args, **kwargs): global _session if len(args) == 1 and (isinstance(args[0], Session) or args[0] is None): _session = args[0] elif args: _session = Session(*args, **kwargs) return _session class OAuth2Token(requests.auth.AuthBase): def __init__( self, access_token, refresh_token=None, expiration=None, expires_in=None, **kwargs ): self.access_token = access_token self.refresh_token = refresh_token self.expiration = expiration if expires_in is not None: self.expiration = datetime.now() + timedelta(seconds=expires_in) def __call__(self, r): r.headers['Authorization'] = 'Bearer ' + self.access_token return r @property def is_expired(self): if self.expiration is None: return False return self.expiration < datetime.now() class RestObj(dict): def __getattr__(self, item): if item in self: result = self[item] if isinstance(result, dict): return RestObj(result) return result raise AttributeError( "'%s' object has no attribute '%s'" % (self.__class__.__name__, item) ) def __repr__(self): headers = getattr(self, '_headers', {}) return "%s(headers=%r, data=%s)" % ( self.__class__, headers, super(RestObj, self).__repr__(), ) def __str__(self): if 'name' in self: return str(self['name']) if 'id' in self: return str(self['id']) return repr(self) class SSLContextAdapter(HTTPAdapter): def __init__(self, *args, **kwargs): self.assert_hostname = kwargs.pop('assert_hostname', True) requests.adapters.HTTPAdapter.__init__(self, *args, **kwargs) def init_poolmanager(self, *args, **kwargs): context = ssl.create_default_context() context.check_hostname = self.assert_hostname kwargs['ssl_context'] = context kwargs['assert_hostname'] = self.assert_hostname return super(SSLContextAdapter, self).init_poolmanager(*args, **kwargs) class Session(requests.Session): PROFILE_PATH = '~/.sas/viya-api-profiles.yaml' def __init__( self, hostname, username=None, password=None, authinfo=None, protocol=None, port=None, verify_ssl=None, token=None, ): super(Session, self).__init__() if verify_ssl is None: verify_ssl = os.environ.get('SSLREQCERT', 'yes') verify_ssl = str(verify_ssl).lower() not in ('no', 'false') self._id = uuid4().hex self.message_log = logger.getChild('session.%s' % self._id) for k in ['SSLCALISTLOC', 'CAS_CLIENT_SSL_CA_LIST']: if k in os.environ: os.environ['REQUESTS_CA_BUNDLE'] = os.environ[k] break if 'REQUESTS_CA_BUNDLE' not in os.environ: if verify_ssl: try: from urllib3.util.ssl_ import is_ipaddress except ImportError: def is_ipaddress(hst): return re.match(r"^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$", hst) verify_hostname = not is_ipaddress(hostname) adapter = SSLContextAdapter(assert_hostname=verify_hostname) self.mount('https://', adapter) if not verify_ssl: from urllib3.exceptions import InsecureRequestWarning warnings.simplefilter('default', InsecureRequestWarning) self.filters = DEFAULT_FILTERS if swat and isinstance(hostname, swat.CAS): if isinstance( hostname._sw_connection, swat.cas.rest.connection.REST_CASConnection ): import base64 httpAddress = hostname.get_action('builtins.httpAddress') address = httpAddress() domain = address.virtualHost if not domain: domain = hostname._sw_connection._current_hostname protocol = address.protocol port = address.port auth = hostname._sw_connection._auth.decode('utf-8').replace( 'Basic ', '' ) username, password = base64.b64decode(auth).decode('utf-8').split(':') else: raise ValueError( "A 'swat.CAS' session can only be reused " "when it's connected via the REST APIs." ) else: url = urlsplit(hostname) protocol = protocol or url.scheme domain = url.hostname or str(hostname) self._settings = { 'protocol': protocol or 'https', 'domain': domain, 'port': port, 'username': username, 'password': password, } if self._settings['password'] is None: if 'swat' in sys.modules: auth = swat.utils.authinfo.query_authinfo( domain, user=username, path=authinfo ) self._settings['username'] = auth.get('user') self._settings['password'] = auth.get('password') if self._settings['password'] is None: try: parser = netrc.netrc(authinfo) values = parser.authenticators(domain) if values: ( self._settings['username'], _, self._settings['password'], ) = values except (OSError, IOError): pass self.verify = verify_ssl self.auth = self.get_auth( self._settings['username'], self._settings['password'], token ) self._old_session = current_session() current_session(self) def add_logger(self, handler, level=None): level = level or logging.DEBUG if handler.level == logging.NOTSET: handler.setLevel(level) self.message_log.addHandler(handler) if self.message_log.level == logging.NOTSET: self.message_log.setLevel(handler.level) return handler def add_stderr_logger(self, level=None): return self.add_logger(logging.StreamHandler(), level) @versionadded(version='1.5.4')
Apache License 2.0
quay/quay
data/model/modelutil.py
paginate_query
python
def paginate_query(query, limit=50, sort_field_name=None, page_number=None): results = list(query) page_token = None if len(results) > limit: start_index = getattr(results[limit], sort_field_name or "id") is_datetime = False if isinstance(start_index, datetime): start_index = start_index.isoformat() + "Z" is_datetime = True page_token = { "start_index": start_index, "page_number": page_number + 1 if page_number else 1, "is_datetime": is_datetime, } return results[0:limit], page_token
Executes the given query and returns a page's worth of results, as well as the page token for the next page (if any).
https://github.com/quay/quay/blob/f50f37a393fa2273234f8ac0aa9f34a03a77a731/data/model/modelutil.py#L72-L92
import dateutil.parser from datetime import datetime from peewee import SQL def paginate( query, model, descending=False, page_token=None, limit=50, sort_field_alias=None, max_page=None, sort_field_name=None, ): sort_field_name = sort_field_name or "id" sort_field = getattr(model, sort_field_name) if sort_field_alias is not None: sort_field_name = sort_field_alias sort_field = SQL(sort_field_alias) if descending: query = query.order_by(sort_field.desc()) else: query = query.order_by(sort_field) start_index = pagination_start(page_token) if start_index is not None: if descending: query = query.where(sort_field <= start_index) else: query = query.where(sort_field >= start_index) query = query.limit(limit + 1) page_number = (page_token.get("page_number") or None) if page_token else None if page_number is not None and max_page is not None and page_number > max_page: return [], None return paginate_query( query, limit=limit, sort_field_name=sort_field_name, page_number=page_number ) def pagination_start(page_token=None): if page_token is not None: start_index = page_token.get("start_index") if page_token.get("is_datetime"): start_index = dateutil.parser.parse(start_index) return start_index return None
Apache License 2.0
tl-system/plato
examples/tempo/tempo_edge.py
Client.process_server_response
python
def process_server_response(self, server_response): super().process_server_response(server_response) if 'local_epoch_num' in server_response: local_epoch_list = server_response['local_epoch_num'] if hasattr(Config().clients, 'simulation') and Config().clients.simulation: index = self.client_id - Config().clients.per_round - 1 else: index = self.client_id - Config().clients.total_clients - 1 local_epoch_num = local_epoch_list[index] Config().trainer = Config().trainer._replace( epochs=local_epoch_num)
Additional client-specific processing on the server response.
https://github.com/tl-system/plato/blob/cbc5ddc04b554b4b05679a85c6ed6e5fb7f70bef/examples/tempo/tempo_edge.py#L12-L27
from plato.config import Config from plato.clients import edge class Client(edge.Client):
Apache License 2.0
buildacell/biocrnpyler
biocrnpyler/chemical_reaction_network.py
ChemicalReactionNetwork.check_crn_validity
python
def check_crn_validity(reactions: List[Reaction], species: List[Species], show_warnings=True) -> Tuple[List[Reaction],List[Species]]: if not all(isinstance(r, Reaction) for r in reactions): raise ValueError("A non-reaction object was used as a reaction!") if not all(isinstance(s, Species) for s in species): raise ValueError("A non-species object was used as a species!") for r in reactions: if reactions.count(r) > 1 and show_warnings: warn(f"Reaction {r} may be duplicated in CRN definitions. " f"Duplicates have NOT been removed.") for s in species: if species.count(s) > 1 and show_warnings: warn(f"Species {s} is duplicated in the CRN definition. " f"Duplicates have NOT been removed.") unique_species = set(species) all_species_in_reactions = set(Species.flatten_list([r.species for r in reactions])) if unique_species != all_species_in_reactions: species_without_reactions = unique_species - all_species_in_reactions if species_without_reactions and show_warnings: warn(f'These Species {list(species_without_reactions)} are not part of any reactions in the CRN!') unlisted_reactions = all_species_in_reactions - unique_species if unlisted_reactions and show_warnings: warn(f'These Species {list(unlisted_reactions)} are not listed in the Species list, but part of the reactions!') return reactions, species
Checks that the given list of reactions and list of species can form a valid CRN. :param reactions: list of reaction :param species: list of species :param show_warnings: whether to show warning when duplicated reactions/species was found :return: tuple(reaction,species)
https://github.com/buildacell/biocrnpyler/blob/30a7324b77e7bf88664e5a70920fa21bb4354acf/biocrnpyler/chemical_reaction_network.py#L138-L174
import copy import warnings from typing import Dict, List, Tuple, Union from warnings import warn import numbers import libsbml from .reaction import Reaction from .sbmlutil import add_all_reactions, add_all_species, add_all_compartments, create_sbml_model from .species import Species from .utils import process_initial_concentration_dict, parameter_to_value, remove_bindloc from .parameter import ModelParameter, Parameter class ChemicalReactionNetwork(object): def __init__(self, species: List[Species], reactions: List[Reaction], initial_concentration_dict: Dict[Species,Union[numbers.Real, Parameter]] = None, show_warnings=False): self.species = species self.reactions = reactions self.initial_concentration_dict = None self.initial_concentration_dict = initial_concentration_dict ChemicalReactionNetwork.check_crn_validity(self._reactions, self._species, show_warnings=show_warnings) @property def species(self): return copy.deepcopy(self._species) @species.setter def species(self, species): if not hasattr(self, "_species"): self._species = [] self._species_dict = {} self.add_species(species) else: raise AttributeError("The species in a CRN cannot be removed or modified. New Species can be added with CRN.add_species(...).") @property def reactions(self): return copy.deepcopy(self._reactions) @reactions.setter def reactions(self, reactions): if not hasattr(self, "_reactions"): self._reactions = [] self.add_reactions(reactions) else: raise AttributeError("The reactions in a CRN cannot be removed or modified. New reactions can be added with CRN.add_reactions(...).") def add_species(self, species, copy_species = True): if not isinstance(species, list): species = [species] species = Species.flatten_list(species) species = remove_bindloc(species) if copy_species: species = copy.deepcopy(species) for s in species: if not isinstance(s, Species): raise ValueError("A non-species object was used as a species!") if s not in self._species_dict: self._species_dict[s] = True self._species.append(s) def add_reactions(self, reactions: Union[Reaction,List[Reaction]], copy_reactions = True, add_species = True) -> None: if not isinstance(reactions, list): reactions = [reactions] if copy_reactions: reactions = copy.deepcopy(reactions) self._reactions += reactions if add_species: for r in reactions: if not isinstance(r, Reaction): raise ValueError("A non-reaction object was used as a reaction!") reaction_species = list(set([w.species for w in r.inputs + r.outputs])) self.add_species(reaction_species, copy_species = copy_reactions) @property def initial_concentration_dict(self): return self._initial_concentration_dict @initial_concentration_dict.setter def initial_concentration_dict(self, initial_concentration_dict): if initial_concentration_dict is None: self._initial_concentration_dict = {} elif isinstance(initial_concentration_dict, dict): for s in initial_concentration_dict: if s not in self._species_dict: raise ValueError(f"Trying to set the initial concentration of a Species {s} not in the CRN") elif parameter_to_value(initial_concentration_dict[s]) >= 0: self.initial_concentration_dict[s] = initial_concentration_dict[s] else: raise ValueError(f"Trying to set a species {s} to a negative concentration {initial_concentration_dict[s]}") @staticmethod
BSD 3-Clause New or Revised License
rwightman/efficientdet-pytorch
effdet/config/model_config.py
default_detection_model_configs
python
def default_detection_model_configs(): h = OmegaConf.create() h.name = 'tf_efficientdet_d1' h.backbone_name = 'tf_efficientnet_b1' h.backbone_args = None h.backbone_indices = None h.image_size = (640, 640) h.num_classes = 90 h.min_level = 3 h.max_level = 7 h.num_levels = h.max_level - h.min_level + 1 h.num_scales = 3 h.aspect_ratios = [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)] h.anchor_scale = 4.0 h.pad_type = 'same' h.act_type = 'swish' h.norm_layer = None h.norm_kwargs = dict(eps=.001, momentum=.01) h.box_class_repeats = 3 h.fpn_cell_repeats = 3 h.fpn_channels = 88 h.separable_conv = True h.apply_resample_bn = True h.conv_after_downsample = False h.conv_bn_relu_pattern = False h.use_native_resize_op = False h.downsample_type = 'max' h.upsample_type = 'nearest' h.redundant_bias = True h.head_bn_level_first = False h.head_act_type = None h.fpn_name = None h.fpn_config = None h.fpn_drop_path_rate = 0. h.alpha = 0.25 h.gamma = 1.5 h.label_smoothing = 0. h.legacy_focal = False h.jit_loss = False h.delta = 0.1 h.box_loss_weight = 50.0 h.soft_nms = False h.max_detection_points = 5000 h.max_det_per_image = 100 return h
Returns a default detection configs.
https://github.com/rwightman/efficientdet-pytorch/blob/c5b694aa34900fdee6653210d856ca8320bf7d4e/effdet/config/model_config.py#L12-L79
from omegaconf import OmegaConf from copy import deepcopy
Apache License 2.0
colin-b/requests_auth
requests_auth/authentication.py
AzureActiveDirectoryImplicitIdToken.__init__
python
def __init__(self, tenant_id: str, client_id: str, **kwargs): OAuth2Implicit.__init__( self, f"https://login.microsoftonline.com/{tenant_id}/oauth2/authorize", client_id=client_id, response_type=kwargs.pop("response_type", "id_token"), token_field_name=kwargs.pop("token_field_name", "id_token"), nonce=kwargs.pop("nonce", None) or str(uuid.uuid4()), **kwargs, )
:param tenant_id: Microsoft Tenant Identifier (formatted as an Universal Unique Identifier) :param client_id: Microsoft Application Identifier (formatted as an Universal Unique Identifier) :param response_type: Value of the response_type query parameter. id_token by default. :param token_field_name: Name of the expected field containing the token. id_token by default. :param nonce: Refer to http://openid.net/specs/openid-connect-core-1_0.html#IDToken for more details (formatted as an Universal Unique Identifier - UUID). Use a newly generated UUID by default. :param redirect_uri_endpoint: Custom endpoint that will be used as redirect_uri the following way: http://localhost:<redirect_uri_port>/<redirect_uri_endpoint>. Default value is to redirect on / (root). :param redirect_uri_port: The port on which the server listening for the OAuth 2 token will be started. Listen on port 5000 by default. :param timeout: Maximum amount of seconds to wait for a token to be received once requested. Wait for 1 minute by default. :param success_display_time: In case a token is successfully received, this is the maximum amount of milliseconds the success page will be displayed in your browser. Display the page for 1 millisecond by default. :param failure_display_time: In case received token is not valid, this is the maximum amount of milliseconds the failure page will be displayed in your browser. Display the page for 5 seconds by default. :param header_name: Name of the header field used to send token. Token will be sent in Authorization header field by default. :param header_value: Format used to send the token value. "{token}" must be present as it will be replaced by the actual token. Token will be sent as "Bearer {token}" by default. :param kwargs: all additional authorization parameters that should be put as query parameter in the authorization URL. Usual parameters are: * prompt: none to avoid prompting the user if a session is already opened.
https://github.com/colin-b/requests_auth/blob/dc971baf73b8dbc7a1d0465c9717b5a3fe4c80ce/requests_auth/authentication.py#L832-L872
import base64 import os import uuid from hashlib import sha256, sha512 from urllib.parse import parse_qs, urlsplit, urlunsplit, urlencode from typing import Optional import requests import requests.auth import warnings from requests_auth import oauth2_authentication_responses_server, oauth2_tokens from requests_auth.errors import InvalidGrantRequest, GrantNotProvided def _add_parameters(initial_url: str, extra_parameters: dict) -> str: scheme, netloc, path, query_string, fragment = urlsplit(initial_url) query_params = parse_qs(query_string) query_params.update( { parameter_name: [parameter_value] for parameter_name, parameter_value in extra_parameters.items() } ) new_query_string = urlencode(query_params, doseq=True) return urlunsplit((scheme, netloc, path, new_query_string, fragment)) def _pop_parameter(url: str, query_parameter_name: str) -> (str, Optional[str]): scheme, netloc, path, query_string, fragment = urlsplit(url) query_params = parse_qs(query_string) parameter_value = query_params.pop(query_parameter_name, None) new_query_string = urlencode(query_params, doseq=True) return ( urlunsplit((scheme, netloc, path, new_query_string, fragment)), parameter_value, ) def _get_query_parameter(url: str, param_name: str) -> Optional[str]: scheme, netloc, path, query_string, fragment = urlsplit(url) query_params = parse_qs(query_string) all_values = query_params.get(param_name) return all_values[0] if all_values else None def request_new_grant_with_post( url: str, data, grant_name: str, timeout: float, session: requests.Session ) -> (str, int, str): with session: response = session.post(url, data=data, timeout=timeout) if not response: raise InvalidGrantRequest(response) content = response.json() token = content.get(grant_name) if not token: raise GrantNotProvided(grant_name, content) return token, content.get("expires_in"), content.get('refresh_token') class OAuth2: token_cache = oauth2_tokens.TokenMemoryCache() class SupportMultiAuth: def __add__(self, other): if isinstance(other, _MultiAuth): return _MultiAuth(self, *other.authentication_modes) return _MultiAuth(self, other) def __and__(self, other): if isinstance(other, _MultiAuth): return _MultiAuth(self, *other.authentication_modes) return _MultiAuth(self, other) class BrowserAuth: def __init__(self, kwargs): redirect_uri_endpoint = kwargs.pop("redirect_uri_endpoint", None) or "" self.redirect_uri_port = int(kwargs.pop("redirect_uri_port", None) or 5000) self.redirect_uri = ( f"http://localhost:{self.redirect_uri_port}/{redirect_uri_endpoint}" ) self.timeout = float(kwargs.pop("timeout", None) or 60) self.success_display_time = int(kwargs.pop("success_display_time", None) or 1) self.failure_display_time = int( kwargs.pop("failure_display_time", None) or 5000 ) class OAuth2ResourceOwnerPasswordCredentials(requests.auth.AuthBase, SupportMultiAuth): def __init__(self, token_url: str, username: str, password: str, **kwargs): self.token_url = token_url if not self.token_url: raise Exception("Token URL is mandatory.") self.username = username if not self.username: raise Exception("User name is mandatory.") self.password = password if not self.password: raise Exception("Password is mandatory.") self.header_name = kwargs.pop("header_name", None) or "Authorization" self.header_value = kwargs.pop("header_value", None) or "Bearer {token}" if "{token}" not in self.header_value: raise Exception("header_value parameter must contains {token}.") self.token_field_name = kwargs.pop("token_field_name", None) or "access_token" self.timeout = int(kwargs.pop("timeout", None) or 60) self.session = kwargs.pop("session", None) or requests.Session() self.session.auth = (self.username, self.password) self.data = { "grant_type": "password", "username": self.username, "password": self.password, } scope = kwargs.pop("scope", None) if scope: self.data["scope"] = " ".join(scope) if isinstance(scope, list) else scope self.data.update(kwargs) self.refresh_data = { "grant_type": "refresh_token" } if scope: self.refresh_data["scope"] = self.data["scope"] self.refresh_data.update(kwargs) all_parameters_in_url = _add_parameters(self.token_url, self.data) self.state = sha512(all_parameters_in_url.encode("unicode_escape")).hexdigest() def __call__(self, r): token = OAuth2.token_cache.get_token( key=self.state, on_missing_token=self.request_new_token, on_expired_token=self.refresh_token, ) r.headers[self.header_name] = self.header_value.format(token=token) return r def request_new_token(self): token, expires_in, refresh_token = request_new_grant_with_post( self.token_url, self.data, self.token_field_name, self.timeout, self.session, ) return (self.state, token, expires_in, refresh_token) if expires_in else (self.state, token) def refresh_token(self, refresh_token: str): self.refresh_data['refresh_token'] = refresh_token token, expires_in, refresh_token = request_new_grant_with_post( self.token_url, self.refresh_data, self.token_field_name, self.timeout, self.session ) return self.state, token, expires_in, refresh_token class OAuth2ClientCredentials(requests.auth.AuthBase, SupportMultiAuth): def __init__(self, token_url: str, client_id: str, client_secret: str, **kwargs): self.token_url = token_url if not self.token_url: raise Exception("Token URL is mandatory.") self.client_id = client_id if not self.client_id: raise Exception("client_id is mandatory.") self.client_secret = client_secret if not self.client_secret: raise Exception("client_secret is mandatory.") self.header_name = kwargs.pop("header_name", None) or "Authorization" self.header_value = kwargs.pop("header_value", None) or "Bearer {token}" if "{token}" not in self.header_value: raise Exception("header_value parameter must contains {token}.") self.token_field_name = kwargs.pop("token_field_name", None) or "access_token" self.timeout = int(kwargs.pop("timeout", None) or 60) self.session = kwargs.pop("session", None) or requests.Session() self.session.auth = (self.client_id, self.client_secret) self.data = {"grant_type": "client_credentials"} scope = kwargs.pop("scope", None) if scope: self.data["scope"] = " ".join(scope) if isinstance(scope, list) else scope self.data.update(kwargs) all_parameters_in_url = _add_parameters(self.token_url, self.data) self.state = sha512(all_parameters_in_url.encode("unicode_escape")).hexdigest() def __call__(self, r): token = OAuth2.token_cache.get_token(self.state, self.request_new_token) r.headers[self.header_name] = self.header_value.format(token=token) return r def request_new_token(self) -> tuple: token, expires_in, _ = request_new_grant_with_post( self.token_url, self.data, self.token_field_name, self.timeout, self.session, ) return (self.state, token, expires_in) if expires_in else (self.state, token) class OAuth2AuthorizationCode(requests.auth.AuthBase, SupportMultiAuth, BrowserAuth): def __init__(self, authorization_url: str, token_url: str, **kwargs): self.authorization_url = authorization_url if not self.authorization_url: raise Exception("Authorization URL is mandatory.") self.token_url = token_url if not self.token_url: raise Exception("Token URL is mandatory.") BrowserAuth.__init__(self, kwargs) self.header_name = kwargs.pop("header_name", None) or "Authorization" self.header_value = kwargs.pop("header_value", None) or "Bearer {token}" if "{token}" not in self.header_value: raise Exception("header_value parameter must contains {token}.") self.token_field_name = kwargs.pop("token_field_name", None) or "access_token" username = kwargs.pop("username", None) password = kwargs.pop("password", None) self.auth = (username, password) if username and password else None self.session = kwargs.pop("session", None) or requests.Session() self.session.auth = self.auth code_field_name = kwargs.pop("code_field_name", "code") if _get_query_parameter(self.authorization_url, "response_type"): kwargs.pop("response_type", None) else: kwargs.setdefault("response_type", "code") authorization_url_without_nonce = _add_parameters( self.authorization_url, kwargs ) authorization_url_without_nonce, nonce = _pop_parameter( authorization_url_without_nonce, "nonce" ) self.state = sha512( authorization_url_without_nonce.encode("unicode_escape") ).hexdigest() custom_code_parameters = { "state": self.state, "redirect_uri": self.redirect_uri, } if nonce: custom_code_parameters["nonce"] = nonce code_grant_url = _add_parameters( authorization_url_without_nonce, custom_code_parameters ) self.code_grant_details = oauth2_authentication_responses_server.GrantDetails( code_grant_url, code_field_name, self.timeout, self.success_display_time, self.failure_display_time, self.redirect_uri_port, ) self.token_data = { "grant_type": "authorization_code", "redirect_uri": self.redirect_uri, } self.token_data.update(kwargs) self.refresh_data = { "grant_type": "refresh_token" } self.refresh_data.update(kwargs) def __call__(self, r): token = OAuth2.token_cache.get_token( key=self.state, on_missing_token=self.request_new_token, on_expired_token=self.refresh_token ) r.headers[self.header_name] = self.header_value.format(token=token) return r def request_new_token(self): state, code = oauth2_authentication_responses_server.request_new_grant( self.code_grant_details ) self.token_data["code"] = code token, expires_in, refresh_token = request_new_grant_with_post( self.token_url, self.token_data, self.token_field_name, self.timeout, self.session, ) return (self.state, token, expires_in, refresh_token) if expires_in else (self.state, token) def refresh_token(self, refresh_token: str): self.refresh_data['refresh_token'] = refresh_token token, expires_in, refresh_token = request_new_grant_with_post( self.token_url, self.refresh_data, self.token_field_name, self.timeout, self.session ) return self.state, token, expires_in, refresh_token class OAuth2AuthorizationCodePKCE( requests.auth.AuthBase, SupportMultiAuth, BrowserAuth ): def __init__(self, authorization_url: str, token_url: str, **kwargs): self.authorization_url = authorization_url if not self.authorization_url: raise Exception("Authorization URL is mandatory.") self.token_url = token_url if not self.token_url: raise Exception("Token URL is mandatory.") BrowserAuth.__init__(self, kwargs) self.session = kwargs.pop("session", None) or requests.Session() self.session.timeout = self.timeout self.header_name = kwargs.pop("header_name", None) or "Authorization" self.header_value = kwargs.pop("header_value", None) or "Bearer {token}" if "{token}" not in self.header_value: raise Exception("header_value parameter must contains {token}.") self.token_field_name = kwargs.pop("token_field_name", None) or "access_token" code_field_name = kwargs.pop("code_field_name", "code") authorization_url_without_response_type, response_type = _pop_parameter( self.authorization_url, "response_type" ) if response_type: kwargs["response_type"] = response_type else: kwargs.setdefault("response_type", "code") authorization_url_without_nonce = _add_parameters( authorization_url_without_response_type, kwargs ) authorization_url_without_nonce, nonce = _pop_parameter( authorization_url_without_nonce, "nonce" ) self.state = sha512( authorization_url_without_nonce.encode("unicode_escape") ).hexdigest() custom_code_parameters = { "state": self.state, "redirect_uri": self.redirect_uri, } if nonce: custom_code_parameters["nonce"] = nonce code_verifier = self.generate_code_verifier() code_challenge = self.generate_code_challenge(code_verifier) custom_code_parameters["code_challenge"] = code_challenge custom_code_parameters["code_challenge_method"] = "S256" code_grant_url = _add_parameters( authorization_url_without_nonce, custom_code_parameters ) self.code_grant_details = oauth2_authentication_responses_server.GrantDetails( code_grant_url, code_field_name, self.timeout, self.success_display_time, self.failure_display_time, self.redirect_uri_port, ) self.token_data = { "code_verifier": code_verifier, "grant_type": "authorization_code", "redirect_uri": self.redirect_uri, } self.token_data.update(kwargs) self.refresh_data = { "grant_type": "refresh_token" } self.refresh_data.update(kwargs) def __call__(self, r): token = OAuth2.token_cache.get_token( key=self.state, on_missing_token=self.request_new_token, on_expired_token=self.refresh_token ) r.headers[self.header_name] = self.header_value.format(token=token) return r def request_new_token(self) -> tuple: state, code = oauth2_authentication_responses_server.request_new_grant( self.code_grant_details ) self.token_data["code"] = code token, expires_in, refresh_token = request_new_grant_with_post( self.token_url, self.token_data, self.token_field_name, self.timeout, self.session, ) return (self.state, token, expires_in, refresh_token) if expires_in else (self.state, token) def refresh_token(self, refresh_token: str): self.refresh_data['refresh_token'] = refresh_token token, expires_in, refresh_token = request_new_grant_with_post( self.token_url, self.refresh_data, self.token_field_name, self.timeout, self.session ) return self.state, token, expires_in, refresh_token @staticmethod def generate_code_verifier() -> bytes: return base64.urlsafe_b64encode(os.urandom(64)).rstrip(b"=") @staticmethod def generate_code_challenge(verifier: bytes) -> bytes: digest = sha256(verifier).digest() return base64.urlsafe_b64encode(digest).rstrip(b"=") class OAuth2Implicit(requests.auth.AuthBase, SupportMultiAuth, BrowserAuth): def __init__(self, authorization_url: str, **kwargs): self.authorization_url = authorization_url if not self.authorization_url: raise Exception("Authorization URL is mandatory.") BrowserAuth.__init__(self, kwargs) self.header_name = kwargs.pop("header_name", None) or "Authorization" self.header_value = kwargs.pop("header_value", None) or "Bearer {token}" if "{token}" not in self.header_value: raise Exception("header_value parameter must contains {token}.") response_type = _get_query_parameter(self.authorization_url, "response_type") if response_type: kwargs.pop("response_type", None) else: response_type = kwargs.setdefault("response_type", "token") token_field_name = kwargs.pop("token_field_name", None) if not token_field_name: token_field_name = ( "id_token" if "id_token" == response_type else "access_token" ) authorization_url_without_nonce = _add_parameters( self.authorization_url, kwargs ) authorization_url_without_nonce, nonce = _pop_parameter( authorization_url_without_nonce, "nonce" ) self.state = sha512( authorization_url_without_nonce.encode("unicode_escape") ).hexdigest() custom_parameters = {"state": self.state, "redirect_uri": self.redirect_uri} if nonce: custom_parameters["nonce"] = nonce grant_url = _add_parameters(authorization_url_without_nonce, custom_parameters) self.grant_details = oauth2_authentication_responses_server.GrantDetails( grant_url, token_field_name, self.timeout, self.success_display_time, self.failure_display_time, self.redirect_uri_port, ) def __call__(self, r): token = OAuth2.token_cache.get_token( self.state, oauth2_authentication_responses_server.request_new_grant, self.grant_details, ) r.headers[self.header_name] = self.header_value.format(token=token) return r class AzureActiveDirectoryImplicit(OAuth2Implicit): def __init__(self, tenant_id: str, client_id: str, **kwargs): OAuth2Implicit.__init__( self, f"https://login.microsoftonline.com/{tenant_id}/oauth2/authorize", client_id=client_id, nonce=kwargs.pop("nonce", None) or str(uuid.uuid4()), **kwargs, ) class AzureActiveDirectoryImplicitIdToken(OAuth2Implicit):
MIT License
pyansys/pyaedt
pyaedt/desktop.py
force_close_desktop
python
def force_close_desktop(): Module = sys.modules["__main__"] pid = Module.oDesktop.GetProcessID() if pid > 0: try: projects = Module.oDesktop.GetProjectList() for project in projects: Module.oDesktop.CloseProject(project) except: logger.warning("No Projects. Closing Desktop Connection") try: i = 0 scopeID = 5 while i <= scopeID: Module.COMUtil.ReleaseCOMObjectScope(Module.COMUtil.PInvokeProxyAPI, 0) i += 1 except: logger.warning("No COM UTIL. Closing the Desktop....") try: del Module.pyaedt_initialized except: pass try: os.kill(pid, 9) del Module.oDesktop successfully_closed = True except: Module.oMessenger.add_error_message("Something went wrong in Closing AEDT.") successfully_closed = False finally: log = logging.getLogger(__name__) handlers = log.handlers[:] for handler in handlers: handler.close() log.removeHandler(handler) return successfully_closed
Forcibly close all AEDT projects and shut down AEDT. Returns ------- bool ``True`` when successful, ``False`` when failed.
https://github.com/pyansys/pyaedt/blob/817c7d706a2d10942470ccac959645e16e9ea971/pyaedt/desktop.py#L193-L236
from __future__ import absolute_import import os import sys import traceback import logging import pkgutil import getpass import re import warnings import gc import time import datetime import tempfile if os.name == "posix": try: import subprocessdotnet as subprocess except: warnings.warn("Pythonnet is needed to run pyaedt within Linux") else: import subprocess from pyaedt.application.MessageManager import AEDTMessageManager from pyaedt.misc import list_installed_ansysem from pyaedt import is_ironpython, _pythonver, inside_desktop from . import aedt_logger pathname = os.path.dirname(__file__) if os.path.exists(os.path.join(pathname, "version.txt")): with open(os.path.join(pathname, "version.txt"), "r") as f: pyaedtversion = f.readline().strip() elif os.path.exists(os.path.join(pathname, "..", "version.txt")): with open(os.path.join(pathname, "..", "version.txt"), "r") as f: pyaedtversion = f.readline().strip() else: pyaedtversion = "X" if os.name == "nt": IsWindows = True else: IsWindows = False logger = logging.getLogger(__name__) if is_ironpython: import clr _com = "ironpython" elif IsWindows: import pythoncom modules = [tup[1] for tup in pkgutil.iter_modules()] if "clr" in modules: import clr import win32com.client _com = "pythonnet_v3" elif "win32com" in modules: import win32com.client _com = "pywin32" else: raise Exception("Error. No win32com.client or Pythonnet modules found. Please install them.") def exception_to_desktop(ex_value, tb_data): if "oMessenger" in dir(sys.modules["__main__"]): messenger = sys.modules["__main__"].oMessenger tb_trace = traceback.format_tb(tb_data) tblist = tb_trace[0].split("\n") messenger.add_error_message(str(ex_value), "Global") for el in tblist: messenger.add_error_message(el, "Global") else: tb_trace = traceback.format_tb(tb_data) tblist = tb_trace[0].split("\n") warnings.warn(str(ex_value)) for el in tblist: warnings.warn(el) def _delete_objects(): module = sys.modules["__main__"] if "COMUtil" in dir(module): del module.COMUtil if "Hfss" in dir(module): del module.Hfss if "Edb" in dir(module): del module.Edb if "Q3d" in dir(module): del module.Q3d if "Q2d" in dir(module): del module.Q2d if "Maxwell3d" in dir(module): del module.Maxwell3d if "Maxwell2d" in dir(module): del module.Maxwell2d if "Icepak" in dir(module): del module.Icepak if "Mechanical" in dir(module): del module.Mechanical if "Emit" in dir(module): del module.Emit if "Circuit" in dir(module): del module.Circuit if "Simplorer" in dir(module): del module.Simplorer if "Hfss3dLayout" in dir(module): del module.Hfss3dLayout if "oMessenger" in dir(module): del module.oMessenger if "oDesktop" in dir(module): del module.oDesktop if "pyaedt_initialized" in dir(module): del module.pyaedt_initialized if "aedt_logger" in dir(module): del module.aedt_logger if "_aedt_handler" in dir(module): _global = logging.getLogger('Global') for i in range(len(module._aedt_handler) - 1, -1, -1): _global.removeHandler(module._aedt_handler[i]) gc.collect() def release_desktop(close_projects=True, close_desktop=True): Module = sys.modules["__main__"] if "oDesktop" not in dir(Module): _delete_objects() return False else: desktop = Module.oDesktop if close_projects: projects = desktop.GetProjectList() for project in projects: desktop.CloseProject(project) pid = Module.oDesktop.GetProcessID() if not (is_ironpython and inside_desktop): i = 0 scopeID = 5 while i <= scopeID: Module.COMUtil.ReleaseCOMObjectScope(Module.COMUtil.PInvokeProxyAPI, i) i += 1 _delete_objects() if close_desktop: try: os.kill(pid, 9) _delete_objects() return True except: warnings.warn("Something went wrong in Closing AEDT") return False return True
MIT License
fuelrats/pipsqueak3
src/packages/rescue/rat_rescue.py
Rescue.active
python
def active(self, value: bool) -> None: if isinstance(value, bool): if value: self.status = Status.OPEN else: self.status = Status.INACTIVE else: raise ValueError(f"expected bool, got type {type(value)}")
setter for `Rescue.active` Args: value (bool): state to set `active` to. Returns: None
https://github.com/fuelrats/pipsqueak3/blob/aba1b9ca66115e36faca6258aef4a4650149ab4a/src/packages/rescue/rat_rescue.py#L432-L448
from contextlib import contextmanager import pendulum from io import StringIO from typing import Union, Optional, List, TYPE_CHECKING, Dict, Set from uuid import UUID, uuid4 from dateutil.tz import tzutc from loguru import logger from ..epic import Epic from ..mark_for_deletion import MarkForDeletion from ..quotation import Quotation from ..rat import Rat from ..utils import Platforms, Status, Colors, color, bold if TYPE_CHECKING: from ..board import RatBoard class Rescue: def __init__(self, uuid: UUID = None, client: Optional[str] = None, system: Optional[str] = None, irc_nickname: Optional[str] = None, board: 'RatBoard' = None, created_at: Optional[pendulum.DateTime] = None, updated_at: Optional[pendulum.DateTime] = None, unidentified_rats: Optional[List[str]] = None, active: bool = True, quotes: Optional[List[Quotation]] = None, epic: List[Epic] = None, title: Optional[str] = None, first_limpet: Optional[UUID] = None, board_index: Optional[int] = None, mark_for_deletion: MarkForDeletion = MarkForDeletion(), lang_id: str = "en-US", rats: List[Rat] = None, status: Status = Status.OPEN, code_red=False, platform: Platforms = None): self.modified: Set[str] = set() self._platform: Platforms = platform self.rat_board: 'RatBoard' = board self._rats = rats if rats else {} self._created_at: pendulum.DateTime = created_at if created_at else pendulum.now() self._updated_at: pendulum.DateTime = updated_at if updated_at else pendulum.now() self._api_id: UUID = uuid if uuid else uuid4() self._client: str = client self._irc_nick: str = irc_nickname if irc_nickname else client self._unidentified_rats = unidentified_rats if unidentified_rats else {} self._system: str = system.upper() if system else None self._quotes: list = quotes if quotes else [] self._epic: List[Epic] = epic if epic is not None else [] self._code_red: bool = code_red self._outcome: None = None self._title: Union[str, None] = title self._first_limpet: UUID = first_limpet self._board_index = board_index self._mark_for_deletion = mark_for_deletion self._board_index = board_index self._lang_id = lang_id self._status = status self._hash = None self.active: bool = active def __eq__(self, other) -> bool: if not isinstance(other, Rescue): return NotImplemented return other.api_id == self.api_id def __hash__(self): if self._hash is None: self._hash = hash(self.api_id) return self._hash async def add_rat(self, rat: Rat): if rat.unidentified: self.unidentified_rats[rat.name.casefold()] = rat else: self.rats[rat.name.casefold()] = rat @property def status(self) -> Status: return self._status @status.setter def status(self, value: status): if isinstance(value, Status): self._status = value self.modified.add("status") else: raise TypeError @property def irc_nickname(self) -> str: return self._irc_nick @irc_nickname.setter def irc_nickname(self, value: str) -> None: if isinstance(value, str): self._irc_nick = value self.modified.add("irc_nick") else: raise TypeError @property def lang_id(self) -> str: return self._lang_id @lang_id.setter def lang_id(self, value) -> None: if isinstance(value, str): self._lang_id = value self.modified.add("lang_id") else: raise TypeError @property def platform(self): return self._platform @platform.setter def platform(self, value) -> None: if isinstance(value, Platforms): self._platform = value self.modified.add("platform") else: raise TypeError(f"expected a Platforms, got type {type(value)}") @property def first_limpet(self) -> UUID: return self._first_limpet @first_limpet.setter def first_limpet(self, value: UUID) -> None: if isinstance(value, UUID): self._first_limpet = value self.modified.add("first_limpet") else: try: guid = UUID(value) except (ValueError, AttributeError): raise TypeError(f"expected UUID, got type {type(value)}") else: self._first_limpet = guid self.modified.add("first_limpet") @property def board_index(self) -> int or None: return self._board_index @board_index.setter def board_index(self, value: int or None) -> None: if isinstance(value, int) or value is None: if value is None or value >= 0: self._board_index = value self.modified.add("board_index") else: raise ValueError("Value must be greater than or equal to zero," " or None.") else: raise TypeError(f"expected int or None, got {type(value)}") @property def api_id(self) -> UUID: return self._api_id @property def client(self) -> str: return self._client @client.setter def client(self, value: str) -> None: self._client = value self.modified.add("client") @property def created_at(self) -> pendulum.DateTime: return self._created_at @property def system(self) -> Optional[str]: return self._system @system.setter def system(self, value: Optional[str]): if not (value is None or isinstance(value, str)): raise TypeError("value must be of type None or str") if value is None: self._system = None self.modified.add("system") return self._system = value.upper() self.modified.add("system") @property def active(self) -> bool: return self.status != Status.INACTIVE @active.setter
BSD 3-Clause New or Revised License
databiosphere/toil
src/toil/bus.py
MessageBus.put
python
def put(self, message: Any) -> None: our_frame = inspect.currentframe() assert our_frame is not None, "Interpreter for Toil must have Python stack frame support" caller_frame = our_frame.f_back assert caller_frame is not None, "MessageBus.put() cannot determine its caller" logger.debug('%s sent: %s', caller_frame.f_code.co_name, message) self.__messages_by_type[type(message)].append(message)
Put a message onto the bus.
https://github.com/databiosphere/toil/blob/eb2ae8365ae2ebdd50132570b20f7d480eb40cac/src/toil/bus.py#L58-L71
import collections import inspect import logging import threading from typing import Any, Dict, Iterator, List, NamedTuple, Type, TypeVar logger = logging.getLogger( __name__ ) class JobUpdatedMessage(NamedTuple): job_id: str result_status: int class MessageBus: def __init__(self) -> None: self.__messages_by_type: Dict[type, List[NamedTuple]] = collections.defaultdict(list)
Apache License 2.0
nasa/delta
delta/imagery/disk_folder_cache.py
DiskCache.num_cached
python
def num_cached(self): return len(self._item_list)
Returns ------- int: The number of items currently cached.
https://github.com/nasa/delta/blob/4e4df1d160edc0e8597fc3450f3e13c0ef755a5f/delta/imagery/disk_folder_cache.py#L71-L78
import os class DiskCache: def __init__(self, top_folder: str, limit: int): if limit < 1: raise Exception('Illegal limit passed to Disk Cache: ' + str(limit)) if not os.path.exists(top_folder): try: os.mkdir(top_folder) except Exception as e: raise Exception('Could not create disk cache folder: ' + top_folder) from e self._limit = limit self._folder = top_folder self._item_list = [] self._update_items() def limit(self): return self._limit def folder(self): return self._folder
Apache License 2.0
fraunhoferportugal/tsfel
tsfel/feature_extraction/features.py
calc_std
python
def calc_std(signal): return np.std(signal)
Computes standard deviation (std) of the signal. Feature computational cost: 1 Parameters ---------- signal : nd-array Input from which std is computed Returns ------- float Standard deviation result
https://github.com/fraunhoferportugal/tsfel/blob/4e078301cfbf09f9364c758f72f5fe378f3229c8/tsfel/feature_extraction/features.py#L688-L704
import scipy.signal from tsfel.feature_extraction.features_utils import * @set_domain("domain", "temporal") @set_domain("tag", "inertial") def autocorr(signal): signal = np.array(signal) return float(np.correlate(signal, signal)) @set_domain("domain", "temporal") def calc_centroid(signal, fs): time = compute_time(signal, fs) energy = np.array(signal) ** 2 t_energy = np.dot(np.array(time), np.array(energy)) energy_sum = np.sum(energy) if energy_sum == 0 or t_energy == 0: centroid = 0 else: centroid = t_energy / energy_sum return centroid @set_domain("domain", "temporal") @set_domain("tag", "emg") def negative_turning(signal): diff_sig = np.diff(signal) array_signal = np.arange(len(diff_sig[:-1])) negative_turning_pts = np.where((diff_sig[array_signal] < 0) & (diff_sig[array_signal + 1] > 0))[0] return len(negative_turning_pts) @set_domain("domain", "temporal") @set_domain("tag", "emg") def positive_turning(signal): diff_sig = np.diff(signal) array_signal = np.arange(len(diff_sig[:-1])) positive_turning_pts = np.where((diff_sig[array_signal + 1] < 0) & (diff_sig[array_signal] > 0))[0] return len(positive_turning_pts) @set_domain("domain", "temporal") def mean_abs_diff(signal): return np.mean(np.abs(np.diff(signal))) @set_domain("domain", "temporal") def mean_diff(signal): return np.mean(np.diff(signal)) @set_domain("domain", "temporal") def median_abs_diff(signal): return np.median(np.abs(np.diff(signal))) @set_domain("domain", "temporal") def median_diff(signal): return np.median(np.diff(signal)) @set_domain("domain", "temporal") def distance(signal): diff_sig = np.diff(signal).astype(float) return np.sum([np.sqrt(1 + diff_sig ** 2)]) @set_domain("domain", "temporal") def sum_abs_diff(signal): return np.sum(np.abs(np.diff(signal))) @set_domain("domain", "temporal") @set_domain("tag", ["audio", "emg"]) def zero_cross(signal): return len(np.where(np.diff(np.sign(signal)))[0]) @set_domain("domain", "temporal") @set_domain("tag", "audio") def total_energy(signal, fs): time = compute_time(signal, fs) return np.sum(np.array(signal) ** 2) / (time[-1] - time[0]) @set_domain("domain", "temporal") def slope(signal): t = np.linspace(0, len(signal) - 1, len(signal)) return np.polyfit(t, signal, 1)[0] @set_domain("domain", "temporal") def auc(signal, fs): t = compute_time(signal, fs) return np.sum(0.5 * np.diff(t) * np.abs(np.array(signal[:-1]) + np.array(signal[1:]))) @set_domain("domain", "temporal") @set_domain("tag", "audio") def abs_energy(signal): return np.sum(np.abs(signal) ** 2) @set_domain("domain", "temporal") def pk_pk_distance(signal): return np.abs(np.max(signal) - np.min(signal)) @set_domain("domain", "temporal") @set_domain("tag", "eeg") def entropy(signal, prob='standard'): if prob == 'standard': value, counts = np.unique(signal, return_counts=True) p = counts / counts.sum() elif prob == 'kde': p = kde(signal) elif prob == 'gauss': p = gaussian(signal) if np.sum(p) == 0: return 0.0 p = p[np.where(p != 0)] if np.log2(len(signal)) == 1: return 0.0 elif np.sum(p * np.log2(p)) / np.log2(len(signal)) == 0: return 0.0 else: return - np.sum(p * np.log2(p)) / np.log2(len(signal)) @set_domain("domain", "temporal") def neighbourhood_peaks(signal, n=10): signal = np.array(signal) subsequence = signal[n:-n] peaks = ((subsequence > np.roll(signal, 1)[n:-n]) & (subsequence > np.roll(signal, -1)[n:-n])) for i in range(2, n + 1): peaks &= (subsequence > np.roll(signal, i)[n:-n]) peaks &= (subsequence > np.roll(signal, -i)[n:-n]) return np.sum(peaks) @set_domain("domain", "statistical") def hist(signal, nbins=10, r=1): histsig, bin_edges = np.histogram(signal, bins=nbins, range=[-r, r]) return tuple(histsig) @set_domain("domain", "statistical") def interq_range(signal): return np.percentile(signal, 75) - np.percentile(signal, 25) @set_domain("domain", "statistical") def kurtosis(signal): return scipy.stats.kurtosis(signal) @set_domain("domain", "statistical") def skewness(signal): return scipy.stats.skew(signal) @set_domain("domain", "statistical") def calc_max(signal): return np.max(signal) @set_domain("domain", "statistical") def calc_min(signal): return np.min(signal) @set_domain("domain", "statistical") @set_domain("tag", "inertial") def calc_mean(signal): return np.mean(signal) @set_domain("domain", "statistical") def calc_median(signal): return np.median(signal) @set_domain("domain", "statistical") def mean_abs_deviation(signal): return np.mean(np.abs(signal - np.mean(signal, axis=0)), axis=0) @set_domain("domain", "statistical") def median_abs_deviation(signal): return scipy.stats.median_absolute_deviation(signal, scale=1) @set_domain("domain", "statistical") @set_domain("tag", ["inertial", "emg"]) def rms(signal): return np.sqrt(np.sum(np.array(signal) ** 2) / len(signal)) @set_domain("domain", "statistical")
BSD 3-Clause New or Revised License
jfilak/sapcli
sap/adt/core.py
Connection._retrieve
python
def _retrieve(self, session, method, url, params=None, headers=None, body=None): req = requests.Request(method.upper(), url, params=params, data=body, headers=headers) req = session.prepare_request(req) mod_log().info('Executing %s %s', method, url) try: res = session.send(req, timeout=self._timeout) except requests.exceptions.ConnectTimeout as ex: raise TimedOutRequestError(req, self._timeout) from ex mod_log().debug('Response %s %s:\n++++\n%s\n++++', method, url, res.text) return (req, res)
A helper method for easier testing.
https://github.com/jfilak/sapcli/blob/072ab1b8d58ea58e4f4bd67fc4f349a6b0b52fac/sap/adt/core.py#L168-L183
import os import xml.sax from xml.sax.handler import ContentHandler import requests from requests.auth import HTTPBasicAuth from sap import get_logger, config_get from sap.rest.connection import setup_keepalive from sap.adt.errors import new_adt_error_from_xml from sap.rest.errors import ( HTTPRequestError, UnexpectedResponseContent, UnauthorizedError, TimedOutRequestError ) def mod_log(): return get_logger() class _DiscoveryHandler(ContentHandler): def __init__(self): super().__init__() self.result = {} self._collection = None self._mimetypes = None self._accept = None def startElement(self, name, attrs): if name == 'app:collection': self._collection = attrs['href'] self._mimetypes = [] elif name == 'app:accept': self._accept = '' elif name == 'adtcomp:templateLink': if 'type' in attrs: self.result[attrs['template']] = [attrs['type']] def characters(self, content): if self._accept is None: return self._accept += content def endElement(self, name): if name == 'app:collection': if self._mimetypes: self.result[self._collection] = self._mimetypes self._collection = None self._mimetypes = None elif name == 'app:accept': self._mimetypes.append(self._accept) self._accept = None def _get_collection_accepts(discovery_xml): xml_handler = _DiscoveryHandler() xml.sax.parseString(discovery_xml, xml_handler) return xml_handler.result class Connection: def __init__(self, host, client, user, password, port=None, ssl=True, verify=True): setup_keepalive() if ssl: protocol = 'https' if port is None: port = '443' else: protocol = 'http' if port is None: port = '80' self._ssl_verify = verify self._adt_uri = 'sap/bc/adt' self._base_url = '{protocol}://{host}:{port}/{adt_uri}'.format( protocol=protocol, host=host, port=port, adt_uri=self._adt_uri) self._query_args = 'sap-client={client}&saml2=disabled'.format( client=client) self._user = user self._auth = HTTPBasicAuth(user, password) self._session = None self._collection_types = None self._timeout = config_get('http_timeout') @property def user(self): return self._user @property def uri(self): return self._adt_uri def _build_adt_url(self, adt_uri): return '{base_url}/{adt_uri}?{query_args}'.format( base_url=self._base_url, adt_uri=adt_uri, query_args=self._query_args) def _handle_http_error(self, req, res): if res.headers['content-type'] == 'application/xml': error = new_adt_error_from_xml(res.text) if error is not None: raise error if res.status_code == 401: raise UnauthorizedError(req, res, self._user) raise HTTPRequestError(req, res)
Apache License 2.0
amundsen-io/amundsen
databuilder/databuilder/extractor/dashboard/tableau/tableau_dashboard_utils.py
TableauDashboardUtils.sanitize_workbook_name
python
def sanitize_workbook_name(workbook_name: str) -> str: return re.sub(r'(\/|\')', '', workbook_name)
Sanitizes a given string so that it can safely be used as a workbook ID. Mimics the current behavior of sanitize_table_name for now, but is purely coincidental. As more breaking characters/patterns are found, each method should be updated to reflect the specifics. Sanitization behaves as follows: - all forward slashes and single quotes characters are deleted
https://github.com/amundsen-io/amundsen/blob/dfe0b4f50f040a95cf652221126db6d3dac4c443/databuilder/databuilder/extractor/dashboard/tableau/tableau_dashboard_utils.py#L57-L65
import json import re from typing import ( Any, Dict, Iterator, Optional, ) import requests from pyhocon import ConfigTree import databuilder.extractor.dashboard.tableau.tableau_dashboard_constants as const from databuilder.extractor.base_extractor import Extractor from databuilder.extractor.restapi.rest_api_extractor import STATIC_RECORD_DICT class TableauDashboardUtils: @staticmethod def sanitize_schema_name(schema_name: str) -> str: return re.sub(r' ', '_', re.sub(r'\.', '_', re.sub(r'(\[|\]|\(|\)|\-|\&|\?)', '', schema_name))) @staticmethod def sanitize_database_name(database_name: str) -> str: return re.sub(r'-', '', database_name) @staticmethod def sanitize_table_name(table_name: str) -> str: return re.sub(r'(\/|\')', '', table_name) @staticmethod
Apache License 2.0
niconoe/pyinaturalist
examples/observation_photo_metadata.py
get_observation_photo_metadata
python
def get_observation_photo_metadata(observation_id, access_token): print(f'Fetching observation {observation_id}') obs = get_observation(observation_id) photo_ids = [photo['id'] for photo in obs.get('photos', [])] photo_urls = [f'{PHOTO_INFO_BASE_URL}/{id}' for id in photo_ids] print(f'{len(photo_urls)} photo URL(s) found') return [get_photo_metadata(url, access_token) for url in photo_urls]
Attempt to scrape metadata from all photo info pages associated with an observation
https://github.com/niconoe/pyinaturalist/blob/17e2760ba3ca38102d4498fe7d0c92914fb98d2d/examples/observation_photo_metadata.py#L46-L53
from pprint import pprint import requests from bs4 import BeautifulSoup from pyinaturalist import get_access_token, get_observation IGNORE_ATTRIBUTES = ['Associated observations', 'Sizes'] PHOTO_INFO_BASE_URL = 'https://www.inaturalist.org/photos' def get_photo_metadata(photo_url, access_token): print(f'Fetching {photo_url}') photo_page = requests.get(photo_url, headers={'Authorization': f'Bearer {access_token}'}) soup = BeautifulSoup(photo_page.content, 'html.parser') table = soup.find(id='wrapper').find_all('table')[1] metadata = {} for row in table.find_all('tr'): key = row.find('th').text.strip() value = row.find('td').text.strip() if value and key not in IGNORE_ATTRIBUTES: metadata[key] = value return metadata
MIT License
glotzerlab/signac
signac/core/jsondict.py
buffer_reads_writes
python
def buffer_reads_writes(buffer_size=DEFAULT_BUFFER_SIZE, force_write=False): global _BUFFERED_MODE global _BUFFERED_MODE_FORCE_WRITE global _BUFFER_SIZE global _BUFFER_LOAD assert _BUFFERED_MODE >= 0 if ( not isinstance(buffer_size, int) or buffer_size is True or buffer_size is False ): raise TypeError("The buffer size must be an integer!") if _BUFFERED_MODE_FORCE_WRITE is not None and ( force_write and not _BUFFERED_MODE_FORCE_WRITE ): raise BufferException( "Unable to enter buffered mode with force write enabled, because " "we are already in buffered mode with force write disabled." ) if _BUFFER_SIZE is not None and _BUFFER_SIZE != buffer_size: raise BufferException("Buffer size already set, unable to change its size!") _BUFFER_SIZE = buffer_size _BUFFERED_MODE_FORCE_WRITE = force_write _BUFFERED_MODE += 1 try: yield finally: _BUFFERED_MODE -= 1 if _BUFFERED_MODE == 0: try: flush_all() finally: assert not _JSONDICT_BUFFER assert not _JSONDICT_HASHES assert not _JSONDICT_META _BUFFER_SIZE = None _BUFFER_LOAD = 0 _BUFFERED_MODE_FORCE_WRITE = None
Enter a global buffer mode for all JSONDict instances. All future write operations are written to the buffer, read operations are performed from the buffer whenever possible. All write operations are deferred until the flush_all() function is called, the buffer overflows, or upon exiting the buffer mode. This context may be entered multiple times, however the buffer size can only be set *once*. Any subsequent specifications of the buffer size are ignored. :param buffer_size: Specify the maximum size of the read/write buffer. Defaults to DEFAULT_BUFFER_SIZE. A negative number indicates to not restrict the buffer size. :type buffer_size: int
https://github.com/glotzerlab/signac/blob/a03a6d05d0f994ac9c4d5353533883e49cf6b386/signac/core/jsondict.py#L178-L240
import errno import hashlib import logging import os import sys import uuid from collections.abc import Mapping from contextlib import contextmanager from copy import copy from tempfile import mkstemp from deprecation import deprecated from ..version import __version__ from . import json from .attrdict import SyncedAttrDict from .errors import Error logger = logging.getLogger(__name__) DEFAULT_BUFFER_SIZE = 32 * 2 ** 20 _BUFFERED_MODE = 0 _BUFFERED_MODE_FORCE_WRITE = None _BUFFER_SIZE = None _BUFFER_LOAD = 0 _JSONDICT_BUFFER = {} _JSONDICT_HASHES = {} _JSONDICT_META = {} class BufferException(Error): pass class BufferedFileError(BufferException): def __init__(self, files): self.files = files def __str__(self): return f"{type(self).__name__}({self.files})" def _hash(blob): if blob is not None: m = hashlib.md5() m.update(blob) return m.hexdigest() def _get_file_metadata(filename): try: return os.path.getsize(filename), os.path.getmtime(filename) except OSError as error: if error.errno != errno.ENOENT: raise def _store_in_buffer(filename, blob, store_hash=False): global _BUFFER_LOAD assert _BUFFERED_MODE > 0 blob_size = sys.getsizeof(blob) if _BUFFER_SIZE > 0: if blob_size > _BUFFER_SIZE: return False elif blob_size + _BUFFER_LOAD > _BUFFER_SIZE: logger.debug("Buffer overflow, flushing...") flush_all() _JSONDICT_BUFFER[filename] = blob _BUFFER_LOAD += blob_size if store_hash: if not _BUFFERED_MODE_FORCE_WRITE: _JSONDICT_META[filename] = _get_file_metadata(filename) _JSONDICT_HASHES[filename] = _hash(blob) return True @deprecated( deprecated_in="1.7", removed_in="2.0", current_version=__version__, ) def flush_all(): global _BUFFER_LOAD logger.debug("Flushing buffer...") issues = {} while _JSONDICT_BUFFER: filename, blob = _JSONDICT_BUFFER.popitem() if not _BUFFERED_MODE_FORCE_WRITE: meta = _JSONDICT_META.pop(filename) if _hash(blob) != _JSONDICT_HASHES.pop(filename): try: if not _BUFFERED_MODE_FORCE_WRITE: if _get_file_metadata(filename) != meta: issues[ filename ] = "File appears to have been externally modified." continue try: fd_tmp, fn_tmp = mkstemp( dir=os.path.dirname(filename), suffix=".json" ) with os.fdopen(fd_tmp, "wb") as file: file.write(blob) except OSError: os.remove(fn_tmp) raise else: os.replace(fn_tmp, filename) except OSError as error: logger.error(str(error)) issues[filename] = error if issues: raise BufferedFileError(issues) _BUFFER_LOAD = 0 @deprecated( deprecated_in="1.7", removed_in="2.0", current_version=__version__, ) def get_buffer_size(): return _BUFFER_SIZE @deprecated( deprecated_in="1.7", removed_in="2.0", current_version=__version__, ) def get_buffer_load(): return _BUFFER_LOAD @deprecated( deprecated_in="1.7", removed_in="2.0", current_version=__version__, ) def in_buffered_mode(): return _BUFFERED_MODE > 0 @deprecated( deprecated_in="1.7", removed_in="2.0", current_version=__version__, ) @contextmanager
BSD 3-Clause New or Revised License
google/citest
citest/json_contract/value_observation_verifier.py
ValueObservationVerifierBuilder.export_to_json_snapshot
python
def export_to_json_snapshot(self, snapshot, entity): snapshot.edge_builder.make_control(entity, 'Strict', self.__strict) super(ValueObservationVerifierBuilder, self).export_to_json_snapshot( snapshot, entity)
Implements JsonSnapshotableEntity interface.
https://github.com/google/citest/blob/eda9171eed35b82ce6f048229bebd898edc25369/citest/json_contract/value_observation_verifier.py#L75-L79
import logging from citest.json_predicate import ( CardinalityPredicate, PathPredicate, ValuePredicate, AND, CONTAINS, EQUIVALENT, DICT_MATCHES, LIST_MATCHES) from . import observation_predicate as op from . import observation_verifier as ov from . import observation_failure as of class ValueObservationVerifierBuilder(ov.ObservationVerifierBuilder): def __init__(self, title, strict=False): super(ValueObservationVerifierBuilder, self).__init__(title) self.__strict = strict def __eq__(self, builder): return (super(ValueObservationVerifierBuilder, self).__eq__(builder) and self.__strict == builder.__strict) def _do_build_generate(self, dnf_verifiers): return ov.ObservationVerifier( title=self.title, dnf_verifiers=dnf_verifiers)
Apache License 2.0
oseledets/ttpy
tt/core/tools.py
delta
python
def delta(n, d=None, center=0): if isinstance(n, six.integer_types): n = [n] if d is None: n0 = _np.asanyarray(n, dtype=_np.int32) else: n0 = _np.array(n * d, dtype=_np.int32) d = n0.size if center < 0: cind = [0] * d else: cind = [] for i in xrange(d): cind.append(center % n0[i]) center //= n0[i] if center > 0: cind = [0] * d cr = [] for i in xrange(d): cur_core = _np.zeros((1, n0[i], 1)) cur_core[0, cind[i], 0] = 1 cr.append(cur_core) return _vector.vector.from_list(cr)
Create TT-vector for delta-function :math:`\\delta(x - x_0)`.
https://github.com/oseledets/ttpy/blob/c43f49ecbb34ee997a8bad9e459e051304bb8a50/tt/core/tools.py#L712-L736
from __future__ import print_function, absolute_import, division import six from six.moves import xrange import numpy as _np import math as _math import copy as _cp from . import tt_f90 as _tt_f90 from . import vector as _vector from . import matrix as _matrix from .utils import ind2sub as _ind2sub from .utils import gcd as _gcd from .utils import my_chop2 as _my_chop2 def matvec(a, b, compression=False): acrs = _vector.vector.to_list(a.tt) bcrs = _vector.vector.to_list(b) ccrs = [] d = b.d def get_core(i): acr = _np.reshape( acrs[i], (a.tt.r[i], a.n[i], a.m[i], a.tt.r[ i + 1]), order='F') acr = acr.transpose([3, 0, 1, 2]) bcr = bcrs[i].transpose([1, 0, 2]) ccr = _np.tensordot(acr, bcr, axes=(3, 0)) ccr = ccr.transpose([1, 3, 2, 0, 4]).reshape( (a.tt.r[i] * b.r[i], a.n[i], a.tt.r[i + 1] * b.r[i + 1]), order='F') return ccr if compression: nrm = _np.array([[1.0]]) v = _np.array([[1.0]]) for i in xrange(d): ccr = get_core(i) ccr = _np.tensordot(v, ccr, (1, 0)) rl, n, rr = ccr.shape if i < d - 1: u, s, v = _np.linalg.svd( ccr.reshape( (rl * n, rr), order='F'), full_matrices=False) newr = min(rl * n, rr) ccr = u[:, :newr].reshape((rl, n, newr), order='F') v = _np.dot(_np.diag(s[:newr]), v[:newr, :]) nrm = _np.tensordot(nrm, ccr, (0, 0)) nrm = _np.tensordot(nrm, _np.conj(ccr), (0, 0)) nrm = nrm.diagonal(axis1=0, axis2=2) nrm = nrm.sum(axis=2) if nrm.size > 1: raise Exception('too many numbers in norm') nrm = _np.sqrt(_np.linalg.norm(nrm)) compression = compression * nrm / _np.sqrt(d - 1) v = _np.array([[1.0]]) for i in xrange(d): ccr = get_core(i) rl, n, rr = ccr.shape if compression: ccr = _np.tensordot(v, ccr, (1, 0)) if i < d - 1: rl = v.shape[0] u, s, v = _np.linalg.svd( ccr.reshape( (rl * n, rr), order='F'), full_matrices=False) ss = _np.cumsum(s[::-1])[::-1] newr = max(min([r for r in range(ss.size) if ss[ r] <= compression] + [min(rl * n, rr)]), 1) ccr = u[:, :newr].reshape((rl, n, newr), order='F') v = _np.dot(_np.diag(s[:newr]), v[:newr, :]) ccrs.append(ccr) result = _vector.vector.from_list(ccrs) if compression: print("Norm actual:", result.norm(), " mean rank:", result.rmean()) return result def col(a, k): if hasattr(a, '__col__'): return a.__col__(k) else: raise ValueError('col is waiting for a TT-vector or a TT-matrix') def kron(a, b): if hasattr(a, '__kron__'): return a.__kron__(b) if a is None: return b else: raise ValueError( 'Kron is waiting for two TT-vectors or two TT-matrices') def dot(a, b): if hasattr(a, '__dot__'): return a.__dot__(b) if a is None: return b else: raise ValueError( 'Dot is waiting for two TT-vectors or two TT- matrices') def diag(a): if hasattr(a, '__diag__'): return a.__diag__() else: raise ValueError('Can be called only on TT-vector or a TT-matrix') def mkron(a, *args): if not isinstance(a, list): a = [a] a = list(a) for i in args: if isinstance(i, list): a.extend(i) else: a.append(i) c = _vector.vector() c.d = 0 c.n = _np.array([], dtype=_np.int32) c.r = _np.array([], dtype=_np.int32) c.core = [] for t in a: thetensor = t.tt if isinstance(t, _matrix.matrix) else t c.d += thetensor.d c.n = _np.concatenate((c.n, thetensor.n)) c.r = _np.concatenate((c.r[:-1], thetensor.r)) c.core = _np.concatenate((c.core, thetensor.core)) c.get_ps() return c def zkron(ttA, ttB): Al = _matrix.matrix.to_list(ttA) Bl = _matrix.matrix.to_list(ttB) Hl = [_np.kron(B, A) for (A, B) in zip(Al, Bl)] return _matrix.matrix.from_list(Hl) def zkronv(ttA, ttB): Al = _vector.vector.to_list(ttA) Bl = _vector.vector.to_list(ttB) Hl = [_np.kron(B, A) for (A, B) in zip(Al, Bl)] return _vector.vector.from_list(Hl) def zmeshgrid(d): lin = xfun(2, d) one = ones(2, d) xx = zkronv(lin, one) yy = zkronv(one, lin) return xx, yy def zaffine(c0, c1, c2, d): xx, yy = zmeshgrid(d) Hx, Hy = _vector.vector.to_list(xx), _vector.vector.to_list(yy) Hs = _cp.deepcopy(Hx) Hs[0][:, :, 0] = c1 * Hx[0][:, :, 0] + c2 * Hy[0][:, :, 0] Hs[-1][1, :, :] = c1 * Hx[-1][1, :, :] + (c0 + c2 * Hy[-1][1, :, :]) d = len(Hs) for k in range(1, d - 1): Hs[k][1, :, 0] = c1 * Hx[k][1, :, 0] + c2 * Hy[k][1, :, 0] return _vector.vector.from_list(Hs) def concatenate(*args): tmp = _np.array([[1] + [0] * (len(args) - 1)]) result = kron(_vector.vector(tmp), args[0]) for i in range(1, len(args)): result += kron(_vector.vector(_np.array([[0] * i + [1] + [0] * (len(args) - i - 1)])), args[i]) return result def sum(a, axis=-1): d = a.d crs = _vector.vector.to_list(a.tt if isinstance(a, _matrix.matrix) else a) if axis < 0: axis = range(a.d) elif isinstance(axis, int): axis = [axis] axis = list(axis)[::-1] for ax in axis: crs[ax] = _np.sum(crs[ax], axis=1) rleft, rright = crs[ax].shape if (rleft >= rright or rleft < rright and ax + 1 >= d) and ax > 0: crs[ax - 1] = _np.tensordot(crs[ax - 1], crs[ax], axes=(2, 0)) elif ax + 1 < d: crs[ax + 1] = _np.tensordot(crs[ax], crs[ax + 1], axes=(1, 0)) else: return _np.sum(crs[ax]) crs.pop(ax) d -= 1 return _vector.vector.from_list(crs) def ones(n, d=None): c = _vector.vector() if d is None: c.n = _np.array(n, dtype=_np.int32) c.d = c.n.size else: c.n = _np.array([n] * d, dtype=_np.int32) c.d = d c.r = _np.ones((c.d + 1,), dtype=_np.int32) c.get_ps() c.core = _np.ones(c.ps[c.d] - 1) return c def rand(n, d=None, r=2, samplefunc=_np.random.randn): n0 = _np.asanyarray(n, dtype=_np.int32) r0 = _np.asanyarray(r, dtype=_np.int32) if d is None: d = n.size if n0.size is 1: n0 = _np.ones((d,), dtype=_np.int32) * n0 if r0.size is 1: r0 = _np.ones((d + 1,), dtype=_np.int32) * r0 r0[0] = 1 r0[d] = 1 c = _vector.vector() c.d = d c.n = n0 c.r = r0 c.get_ps() c.core = samplefunc(c.ps[d] - 1) return c def eye(n, d=None): c = _matrix.matrix() c.tt = _vector.vector() if d is None: n0 = _np.asanyarray(n, dtype=_np.int32) c.tt.d = n0.size else: n0 = _np.asanyarray([n] * d, dtype=_np.int32) c.tt.d = d c.n = n0.copy() c.m = n0.copy() c.tt.n = (c.n) * (c.m) c.tt.r = _np.ones((c.tt.d + 1,), dtype=_np.int32) c.tt.get_ps() c.tt.alloc_core() for i in xrange(c.tt.d): c.tt.core[ c.tt.ps[i] - 1:c.tt.ps[ i + 1] - 1] = _np.eye( c.n[i]).flatten() return c def Toeplitz(x, d=None, D=None, kind='F'): def check_kinds(D, kind): if D % len(kind) == 0: kind.extend(kind * (D // len(kind) - 1)) if len(kind) != D: raise ValueError( "Must give proper amount of _matrix kinds (one or D, for example)") kind = list(kind) if not set(kind).issubset(['F', 'C', 'L', 'U']): raise ValueError("Toeplitz _matrix kind must be one of F, C, L, U.") if d is None: if D is None: D = len(kind) if x.d % D: raise ValueError( "x.d must be divisible by D when d is not specified!") if len(kind) == 1: d = _np.array([x.d // D - (1 if kind[0] == 'F' else 0)] * D, dtype=_np.int32) kind = kind * D else: check_kinds(D, kind) if set(kind).issubset(['F']): d = _np.array([x.d // D - 1] * D, dtype=_np.int32) elif set(kind).issubset(['C', 'L', 'U']): d = _np.array([x.d // D] * D, dtype=_np.int32) else: raise ValueError( "Only similar _matrix kinds (only F or only C, L and U) are accepted when d is not specified!") elif d is not None: d = _np.asarray(d, dtype=_np.int32).flatten() if D is None: D = d.size elif d.size == 1: d = _np.array([d[0]] * D, dtype=_np.int32) if D != d.size: raise ValueError("D must be equal to len(d)") check_kinds(D, kind) if _np.sum(d) + _np.sum([(1 if knd == 'F' else 0) for knd in kind]) != x.d: raise ValueError( "Dimensions inconsistency: x.d != d_1 + d_2 + ... + d_D") I = [[1, 0], [0, 1]] J = [[0, 1], [0, 0]] JT = [[0, 0], [1, 0]] H = [[0, 1], [1, 0]] S = _np.array([[[0], [1]], [[1], [0]]]).transpose() P = _np.zeros((2, 2, 2, 2)) P[:, :, 0, 0] = I P[:, :, 1, 0] = H P[:, :, 0, 1] = H P[:, :, 1, 1] = I P = _np.transpose(P) Q = _np.zeros((2, 2, 2, 2)) Q[:, :, 0, 0] = I Q[:, :, 1, 0] = JT Q[:, :, 0, 1] = JT Q = _np.transpose(Q) R = _np.zeros((2, 2, 2, 2)) R[:, :, 1, 0] = J R[:, :, 0, 1] = J R[:, :, 1, 1] = I R = _np.transpose(R) W = _np.zeros([2] * 5) W[0, :, :, 0, 0] = W[1, :, :, 1, 1] = I W[0, :, :, 1, 0] = W[0, :, :, 0, 1] = JT W[1, :, :, 1, 0] = W[1, :, :, 0, 1] = J W = _np.transpose(W) V = _np.zeros((2, 2, 2, 2)) V[0, :, :, 0] = I V[0, :, :, 1] = JT V[1, :, :, 1] = J V = _np.transpose(V) crs = [] xcrs = _vector.vector.to_list(x) dp = 0 for j in xrange(D): currd = d[j] xcr = xcrs[dp] cr = _np.tensordot(V, xcr, (0, 1)) cr = cr.transpose(3, 0, 1, 2, 4) cr = cr.reshape((x.r[dp], 2, 2, 2 * x.r[dp + 1]), order='F') dp += 1 crs.append(cr) for i in xrange(1, currd - 1): xcr = xcrs[dp] cr = _np.tensordot(W, xcr, (1, 1)) cr = cr.transpose([0, 4, 1, 2, 3, 5]) cr = cr.reshape((2 * x.r[dp], 2, 2, 2 * x.r[dp + 1]), order='F') dp += 1 crs.append(cr) if kind[j] == 'F': xcr = xcrs[dp] cr = _np.tensordot(W, xcr, (1, 1)).transpose([0, 4, 1, 2, 3, 5]) cr = cr.reshape((2 * x.r[dp], 2, 2, 2 * x.r[dp + 1]), order='F') dp += 1 xcr = xcrs[dp] tmp = _np.tensordot(S, xcr, (1, 1)) tmp = tmp.reshape((2 * x.r[dp], x.r[dp + 1]), order='F') cr = _np.tensordot(cr, tmp, (3, 0)) dp += 1 crs.append(cr) else: dotcore = None if kind[j] == 'C': dotcore = P elif kind[j] == 'L': dotcore = Q elif kind[j] == 'U': dotcore = R xcr = xcrs[dp] cr = _np.tensordot(dotcore, xcr, (1, 1)) cr = cr.transpose([0, 3, 1, 2, 4]) cr = cr.reshape((2 * x.r[dp], 2, 2, x.r[dp + 1]), order='F') dp += 1 crs.append(cr) return _matrix.matrix.from_list(crs) def qlaplace_dd(d): res = _matrix.matrix() d0 = d[::-1] D = len(d0) I = _np.eye(2) J = _np.array([[0, 1], [0, 0]]) cr = [] if D is 1: for k in xrange(1, d0[0] + 1): if k is 1: cur_core = _np.zeros((1, 2, 2, 3)) cur_core[:, :, :, 0] = 2 * I - J - J.T cur_core[:, :, :, 1] = -J cur_core[:, :, :, 2] = -J.T elif k is d0[0]: cur_core = _np.zeros((3, 2, 2, 1)) cur_core[0, :, :, 0] = I cur_core[1, :, :, 0] = J.T cur_core[2, :, :, 0] = J else: cur_core = _np.zeros((3, 2, 2, 3)) cur_core[0, :, :, 0] = I cur_core[1, :, :, 1] = J cur_core[2, :, :, 2] = J.T cur_core[1, :, :, 0] = J.T cur_core[2, :, :, 0] = J cr.append(cur_core) else: for k in xrange(D): for kappa in xrange(1, d0[k] + 1): if kappa is 1: if k is 0: cur_core = _np.zeros((1, 2, 2, 4)) cur_core[:, :, :, 0] = 2 * I - J - J.T cur_core[:, :, :, 1] = -J cur_core[:, :, :, 2] = -J.T cur_core[:, :, :, 3] = I elif k is D - 1: cur_core = _np.zeros((2, 2, 2, 3)) cur_core[0, :, :, 0] = 2 * I - J - J.T cur_core[0, :, :, 1] = -J cur_core[0, :, :, 2] = -J.T cur_core[1, :, :, 0] = I else: cur_core = _np.zeros((2, 2, 2, 4)) cur_core[0, :, :, 0] = 2 * I - J - J.T cur_core[0, :, :, 1] = -J cur_core[0, :, :, 2] = -J.T cur_core[0, :, :, 3] = I cur_core[1, :, :, 0] = I elif kappa is d0[k]: if k is D - 1: cur_core = _np.zeros((3, 2, 2, 1)) cur_core[0, :, :, 0] = I cur_core[1, :, :, 0] = J.T cur_core[2, :, :, 0] = J else: cur_core = _np.zeros((4, 2, 2, 2)) cur_core[3, :, :, 0] = I cur_core[0, :, :, 1] = I cur_core[1, :, :, 1] = J.T cur_core[2, :, :, 1] = J else: if k is D - 1: cur_core = _np.zeros((3, 2, 2, 3)) cur_core[0, :, :, 0] = I cur_core[1, :, :, 1] = J cur_core[2, :, :, 2] = J.T cur_core[1, :, :, 0] = J.T cur_core[2, :, :, 0] = J else: cur_core = _np.zeros((4, 2, 2, 4)) cur_core[0, :, :, 0] = I cur_core[1, :, :, 1] = J cur_core[2, :, :, 2] = J.T cur_core[1, :, :, 0] = J.T cur_core[2, :, :, 0] = J cur_core[3, :, :, 3] = I cr.append(cur_core) return _matrix.matrix.from_list(cr) def xfun(n, d=None): if isinstance(n, six.integer_types): n = [n] if d is None: n0 = _np.asanyarray(n, dtype=_np.int32) else: n0 = _np.array(n * d, dtype=_np.int32) d = n0.size if d == 1: return _vector.vector.from_list( [_np.reshape(_np.arange(n0[0]), (1, n0[0], 1))]) cr = [] cur_core = _np.ones((1, n0[0], 2)) cur_core[0, :, 0] = _np.arange(n0[0]) cr.append(cur_core) ni = float(n0[0]) for i in xrange(1, d - 1): cur_core = _np.zeros((2, n0[i], 2)) for j in xrange(n0[i]): cur_core[:, j, :] = _np.eye(2) cur_core[1, :, 0] = ni * _np.arange(n0[i]) ni *= n0[i] cr.append(cur_core) cur_core = _np.ones((2, n0[d - 1], 1)) cur_core[1, :, 0] = ni * _np.arange(n0[d - 1]) cr.append(cur_core) return _vector.vector.from_list(cr) def linspace(n, d=None, a=0.0, b=1.0, right=True, left=True): if isinstance(n, six.integer_types): n = [n] if d is None: n0 = _np.asanyarray(n, dtype=_np.int32) else: n0 = _np.array(n * d, dtype=_np.int32) d = n0.size t = xfun(n0) e = ones(n0) N = _np.prod(n0) if left and right: h = (b - a) * 1.0 / (N - 1) res = a * e + t * h elif left and not right: h = (b - a) * 1.0 / N res = a * e + t * h elif right and not left: h = (b - a) * 1.0 / N res = a * e + (t + e) * h else: h = (b - a) * 1.0 / (N - 1) res = a * e + (t + e) * h return res.round(1e-13) def sin(d, alpha=1.0, phase=0.0): cr = [] cur_core = _np.zeros([1, 2, 2], dtype=_np.float) cur_core[0, 0, :] = [_math.cos(phase), _math.sin(phase)] cur_core[0, 1, :] = [_math.cos(alpha + phase), _math.sin(alpha + phase)] cr.append(cur_core) for i in xrange(1, d - 1): cur_core = _np.zeros([2, 2, 2], dtype=_np.float) cur_core[0, 0, :] = [1.0, 0.0] cur_core[1, 0, :] = [0.0, 1.0] cur_core[ 0, 1, :] = [ _math.cos( alpha * 2 ** i), _math.sin( alpha * 2 ** i)] cur_core[1, 1, :] = [-_math.sin(alpha * 2 ** i), _math.cos(alpha * 2 ** i)] cr.append(cur_core) cur_core = _np.zeros([2, 2, 1], dtype=_np.float) cur_core[0, :, 0] = [0.0, _math.sin(alpha * 2 ** (d - 1))] cur_core[1, :, 0] = [1.0, _math.cos(alpha * 2 ** (d - 1))] cr.append(cur_core) return _vector.vector.from_list(cr) def cos(d, alpha=1.0, phase=0.0): return sin(d, alpha, phase + _math.pi * 0.5)
MIT License
jhuggins/viabel
viabel/approximations.py
ApproximationFamily.dim
python
def dim(self): return self._dim
Dimension of the space the distribution is defined on
https://github.com/jhuggins/viabel/blob/a8f67b098d1d3ece0c16dd7607b28820d882f358/viabel/approximations.py#L174-L176
from abc import ABC, abstractmethod import autograd.numpy as np import autograd.numpy.random as npr import autograd.scipy.stats.multivariate_normal as mvn import autograd.scipy.stats.t as t_dist from autograd import elementwise_grad from autograd.scipy.linalg import sqrtm from paragami import ( FlattenFunctionInput, NumericArrayPattern, NumericVectorPattern, PatternDict, PSDSymmetricMatrixPattern) from ._distributions import multivariate_t_logpdf __all__ = [ 'ApproximationFamily', 'MFGaussian', 'MFStudentT', 'MultivariateT', 'NeuralNet', 'NVPFlow' ] class ApproximationFamily(ABC): def __init__(self, dim, var_param_dim, supports_entropy, supports_kl): self._dim = dim self._var_param_dim = var_param_dim self._supports_entropy = supports_entropy self._supports_kl = supports_kl def init_param(self): return np.zeros(self.var_param_dim) @abstractmethod def sample(self, var_param, n_samples, seed=None): def entropy(self, var_param): if self.supports_entropy: return self._entropy(var_param) raise NotImplementedError() def _entropy(self, var_param): raise NotImplementedError() @property def supports_entropy(self): return self._supports_entropy def kl(self, var_param0, var_param1): if self.supports_kl: return self._kl(var_param0, var_param1) raise NotImplementedError() def _kl(self, var_param): raise NotImplementedError() @property def supports_kl(self): return self._supports_kl @abstractmethod def log_density(self, var_param, x): @abstractmethod def mean_and_cov(self, var_param): def pth_moment(self, var_param, p): if self.supports_pth_moment(p): return self._pth_moment(var_param, p) raise ValueError('p = {} is not a supported moment'.format(p)) @abstractmethod def _pth_moment(self, var_param, p): @abstractmethod def supports_pth_moment(self, p): @property
MIT License
catalyst-cooperative/pudl
src/pudl/analysis/timeseries_cleaning.py
Timeseries.diff
python
def diff(self, shift: int = 1) -> np.ndarray: return array_diff(self.x, shift)
Values minus the value of their neighbor. Args: shift: Positions to shift for calculating the difference. Positive values select a preceding (left) neighbor.
https://github.com/catalyst-cooperative/pudl/blob/6a75069b90219a2da55262737b92fe0a024c4fb8/src/pudl/analysis/timeseries_cleaning.py#L833-L842
import functools import warnings from typing import Any, Iterable, List, Sequence, Tuple, Union import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy.stats def slice_axis( x: np.ndarray, start: int = None, end: int = None, step: int = None, axis: int = 0 ) -> Tuple: index = [slice(None)] * np.mod(axis, x.ndim) + [slice(start, end, step)] return tuple(index) def array_diff( x: np.ndarray, periods: int = 1, axis: int = 0, fill: Any = np.nan ) -> np.ndarray: if not periods: return x - x dx = np.empty_like(x) prepend = slice_axis(x, end=periods, axis=axis) append = slice_axis(x, start=periods, axis=axis) if periods > 0: dx[prepend] = fill dx[append] = x[append] - x[slice_axis(x, end=-periods, axis=axis)] else: dx[prepend] = x[prepend] - x[slice_axis(x, start=-periods, axis=axis)] dx[append] = fill return dx def encode_run_length( x: Union[Sequence, np.ndarray] ) -> Tuple[np.ndarray, np.ndarray]: x = np.asarray(x) n = len(x) if not n: return x, np.array([], dtype=int) y = np.array(x[1:] != x[:-1]) i = np.append(np.where(y), n - 1) lengths = np.diff(np.append(-1, i)) return x[i], lengths def insert_run_length( x: Union[Sequence, np.ndarray], values: Union[Sequence, np.ndarray], lengths: Sequence[int], mask: Sequence[bool] = None, padding: int = 0, intersect: bool = False, ) -> np.ndarray: if padding < 0: raise ValueError("Padding must zero or greater") x = np.array(x) if mask is None: run_starts = np.array([0]) run_lengths = np.array([len(x)]) else: mask_values, mask_lengths = encode_run_length(mask) run_starts = np.cumsum(np.append(0, mask_lengths))[:-1][mask_values] run_lengths = mask_lengths[mask_values] if padding: run_ends = run_starts + run_lengths moved = slice(int(run_starts[0] == 0), None) run_starts[moved] += padding moved = slice(None, -1 if run_ends[-1] == len(x) else None) run_ends[moved] -= padding run_lengths = run_ends - run_starts keep = run_lengths > 0 run_starts = run_starts[keep] run_lengths = run_lengths[keep] n_runs = len(run_starts) if not intersect: buffer = np.zeros(len(values), dtype=int) run_starts = np.concatenate((run_starts, buffer)) run_lengths = np.concatenate((run_lengths, buffer)) rng = np.random.default_rng() order = np.argsort(lengths)[::-1] values = np.asarray(values)[order] lengths = np.asarray(lengths)[order] for value, length in zip(values, lengths): if length < 1: raise ValueError("Run length must be greater than zero") choices = np.nonzero(run_lengths[:n_runs] >= length)[0] if not choices.size: raise ValueError(f"Could not find space for run of length {length}") idx = rng.choice(choices) offset = rng.integers(0, run_lengths[idx] - length, endpoint=True) start = run_starts[idx] + offset x[start:start + length] = value if intersect: continue padded_length = length + padding if offset: tail = run_lengths[idx] - offset - padded_length if tail > 0: run_starts[n_runs] = start + padded_length run_lengths[n_runs] = tail n_runs += 1 run_lengths[idx] = offset - padding else: run_starts[idx] += padded_length run_lengths[idx] -= padded_length return x def _mat2ten(matrix: np.ndarray, shape: np.ndarray, mode: int) -> np.ndarray: index = [mode] + [i for i in range(len(shape)) if i != mode] return np.moveaxis( np.reshape(matrix, newshape=shape[index], order='F'), source=0, destination=mode ) def _ten2mat(tensor: np.ndarray, mode: int) -> np.ndarray: return np.reshape( np.moveaxis(tensor, source=mode, destination=0), newshape=(tensor.shape[mode], -1), order='F' ) def _svt_tnn(matrix: np.ndarray, tau: float, theta: int) -> np.ndarray: [m, n] = matrix.shape if 2 * m < n: u, s, v = np.linalg.svd(matrix @ matrix.T, full_matrices=0) s = np.sqrt(s) idx = np.sum(s > tau) mid = np.zeros(idx) mid[: theta] = 1 mid[theta:idx] = (s[theta:idx] - tau) / s[theta:idx] return (u[:, :idx] @ np.diag(mid)) @ (u[:, :idx].T @ matrix) if m > 2 * n: return _svt_tnn(matrix.T, tau, theta).T u, s, v = np.linalg.svd(matrix, full_matrices=0) idx = np.sum(s > tau) vec = s[:idx].copy() vec[theta:idx] = s[theta:idx] - tau return u[:, :idx] @ np.diag(vec) @ v[:idx, :] def impute_latc_tnn( tensor: np.ndarray, lags: Sequence[int] = [1], alpha: Sequence[float] = [1 / 3, 1 / 3, 1 / 3], rho0: float = 1e-7, lambda0: float = 2e-7, theta: int = 20, epsilon: float = 1e-7, maxiter: int = 300 ) -> np.ndarray: tensor = np.where(np.isnan(tensor), 0, tensor) dim = np.array(tensor.shape) dim_time = int(np.prod(dim) / dim[0]) d = len(lags) max_lag = np.max(lags) mat = _ten2mat(tensor, mode=0) pos_missing = np.where(mat == 0) x = np.zeros(np.insert(dim, 0, len(dim))) t = np.zeros(np.insert(dim, 0, len(dim))) z = mat.copy() z[pos_missing] = np.mean(mat[mat != 0]) a = 0.001 * np.random.rand(dim[0], d) it = 0 ind = np.zeros((d, dim_time - max_lag), dtype=int) for i in range(d): ind[i, :] = np.arange(max_lag - lags[i], dim_time - lags[i]) last_mat = mat.copy() snorm = np.linalg.norm(mat, 'fro') rho = rho0 while True: rho = min(rho * 1.05, 1e5) for k in range(len(dim)): x[k] = _mat2ten( _svt_tnn( _ten2mat(_mat2ten(z, shape=dim, mode=0) - t[k] / rho, mode=k), tau=alpha[k] / rho, theta=theta ), shape=dim, mode=k ) tensor_hat = np.einsum('k, kmnt -> mnt', alpha, x) mat_hat = _ten2mat(tensor_hat, 0) mat0 = np.zeros((dim[0], dim_time - max_lag)) if lambda0 > 0: for m in range(dim[0]): qm = mat_hat[m, ind].T a[m, :] = np.linalg.pinv(qm) @ z[m, max_lag:] mat0[m, :] = qm @ a[m, :] mat1 = _ten2mat(np.mean(rho * x + t, axis=0), 0) z[pos_missing] = np.append( (mat1[:, :max_lag] / rho), (mat1[:, max_lag:] + lambda0 * mat0) / (rho + lambda0), axis=1 )[pos_missing] else: z[pos_missing] = (_ten2mat(np.mean(x + t / rho, axis=0), 0))[pos_missing] t = t + rho * (x - np.broadcast_to( _mat2ten(z, dim, 0), np.insert(dim, 0, len(dim)) )) tol = np.linalg.norm((mat_hat - last_mat), 'fro') / snorm last_mat = mat_hat.copy() it += 1 print(f"Iteration: {it}", end="\r") if tol < epsilon or it >= maxiter: break print(f"Iteration: {it}") return tensor_hat def _tsvt(tensor: np.ndarray, phi: np.ndarray, tau: float) -> np.ndarray: dim = tensor.shape x = np.zeros(dim) tensor = np.einsum('kt, ijk -> ijt', phi, tensor) for t in range(dim[2]): u, s, v = np.linalg.svd(tensor[:, :, t], full_matrices=False) r = len(np.where(s > tau)[0]) if r >= 1: s = s[:r] s[: r] = s[:r] - tau x[:, :, t] = u[:, :r] @ np.diag(s) @ v[:r, :] return np.einsum('kt, ijt -> ijk', phi, x) def impute_latc_tubal( tensor: np.ndarray, lags: Sequence[int] = [1], rho0: float = 1e-7, lambda0: float = 2e-7, epsilon: float = 1e-7, maxiter: int = 300 ) -> np.ndarray: tensor = np.where(np.isnan(tensor), 0, tensor) dim = np.array(tensor.shape) dim_time = int(np.prod(dim) / dim[0]) d = len(lags) max_lag = np.max(lags) mat = _ten2mat(tensor, 0) pos_missing = np.where(mat == 0) t = np.zeros(dim) z = mat.copy() z[pos_missing] = np.mean(mat[mat != 0]) a = 0.001 * np.random.rand(dim[0], d) it = 0 ind = np.zeros((d, dim_time - max_lag), dtype=np.int_) for i in range(d): ind[i, :] = np.arange(max_lag - lags[i], dim_time - lags[i]) last_mat = mat.copy() snorm = np.linalg.norm(mat, 'fro') rho = rho0 temp1 = _ten2mat(_mat2ten(z, dim, 0), 2) _, phi = np.linalg.eig(temp1 @ temp1.T) del temp1 if dim_time > 5e3 and dim_time <= 1e4: sample_rate = 0.2 elif dim_time > 1e4: sample_rate = 0.1 while True: rho = min(rho * 1.05, 1e5) x = _tsvt(_mat2ten(z, dim, 0) - t / rho, phi, 1 / rho) mat_hat = _ten2mat(x, 0) mat0 = np.zeros((dim[0], dim_time - max_lag)) temp2 = _ten2mat(rho * x + t, 0) if lambda0 > 0: if dim_time <= 5e3: for m in range(dim[0]): qm = mat_hat[m, ind].T a[m, :] = np.linalg.pinv(qm) @ z[m, max_lag:] mat0[m, :] = qm @ a[m, :] elif dim_time > 5e3: for m in range(dim[0]): idx = np.arange(0, dim_time - max_lag) np.random.shuffle(idx) idx = idx[: int(sample_rate * (dim_time - max_lag))] qm = mat_hat[m, ind].T a[m, :] = np.linalg.pinv(qm[idx[:], :]) @ z[m, max_lag:][idx[:]] mat0[m, :] = qm @ a[m, :] z[pos_missing] = np.append( (temp2[:, :max_lag] / rho), (temp2[:, max_lag:] + lambda0 * mat0) / (rho + lambda0), axis=1 )[pos_missing] else: z[pos_missing] = temp2[pos_missing] / rho t = t + rho * (x - _mat2ten(z, dim, 0)) tol = np.linalg.norm((mat_hat - last_mat), 'fro') / snorm last_mat = mat_hat.copy() it += 1 if not np.mod(it, 10): temp1 = _ten2mat(_mat2ten(z, dim, 0) - t / rho, 2) _, phi = np.linalg.eig(temp1 @ temp1.T) del temp1 print(f"Iteration: {it}", end="\r") if tol < epsilon or it >= maxiter: break print(f"Iteration: {it}") return x class Timeseries: def __init__(self, x: Union[np.ndarray, pd.DataFrame]) -> None: self.xi: np.ndarray self.index: pd.Index self.columns: pd.Index if isinstance(x, pd.DataFrame): self.xi = x.values self.index = x.index self.columns = x.columns else: self.xi = x self.index = pd.RangeIndex(x.shape[0]) self.columns = pd.RangeIndex(x.shape[1]) self.x: np.ndarray = self.xi.copy() self.flags: np.ndarray = np.empty(self.x.shape, dtype=object) self.flagged: List[str] = [] def to_dataframe(self, array: np.ndarray = None, copy: bool = True) -> pd.DataFrame: x = self.x if array is None else array return pd.DataFrame(x, columns=self.columns, index=self.index, copy=copy) def flag(self, mask: np.ndarray, flag: str) -> None: mask = mask & ~np.isnan(self.x) self.flags[mask] = flag self.flagged.append(flag) self.x[mask] = np.nan for name in dir(self): attr = getattr(self, name) if hasattr(attr, 'cache_clear'): attr.cache_clear() def unflag(self, flags: Iterable[str] = None) -> None: mask = slice(None) if flags is None else np.isin(self.flags, flags) self.flags[mask] = None self.x[mask] = self.xi[mask] self.flagged = [f for f in self.flagged if flags is not None and f not in flags] def flag_negative_or_zero(self) -> None: mask = self.x <= 0 self.flag(mask, "NEGATIVE_OR_ZERO") def flag_identical_run(self, length: int = 3) -> None: if length < 2: raise ValueError("Run length must be 2 or greater") mask = np.ones(self.x.shape, dtype=bool) mask[0] = False for n in range(1, length): mask[n:] &= self.x[n:] == self.x[:-n] self.flag(mask, "IDENTICAL_RUN") def flag_global_outlier(self, medians: float = 9) -> None: median = np.nanmedian(self.x, axis=0) mask = np.abs(self.x - median) > np.abs(median * medians) self.flag(mask, "GLOBAL_OUTLIER") def flag_global_outlier_neighbor(self, neighbors: int = 1) -> None: if "GLOBAL_OUTLIER" not in self.flagged: raise ValueError("Global outliers must be flagged first") mask = np.zeros(self.x.shape, dtype=bool) outliers = self.flags == "GLOBAL_OUTLIER" for shift in range(1, neighbors + 1): mask[:-shift][outliers[shift:]] = True mask[shift:][outliers[:-shift]] = True self.flag(mask, "GLOBAL_OUTLIER_NEIGHBOR") @functools.lru_cache(maxsize=2) def rolling_median(self, window: int = 48) -> np.ndarray: df = pd.DataFrame(self.x, copy=False) return df.rolling(window, min_periods=1, center=True).median().values def rolling_median_offset(self, window: int = 48) -> np.ndarray: return self.x - self.rolling_median(window=window) def median_of_rolling_median_offset( self, window: int = 48, shifts: Sequence[int] = range(-240, 241, 24) ) -> np.ndarray: offset = self.rolling_median_offset(window=window) shifted = np.empty([len(shifts), *offset.shape], dtype=float) for i, shift in enumerate(shifts): if shift > 0: shifted[i, :shift] = np.nan shifted[i, shift:] = offset[:-shift] elif shift < 0: shifted[i, shift:] = np.nan shifted[i, :shift] = offset[-shift:] else: shifted[i, :] = offset with warnings.catch_warnings(): warnings.filterwarnings( "ignore", category=RuntimeWarning, message="All-NaN slice encountered" ) return np.nanmedian(shifted, axis=0) def rolling_iqr_of_rolling_median_offset( self, window: int = 48, iqr_window: int = 240 ) -> np.ndarray: offset = self.rolling_median_offset(window=window) df = pd.DataFrame(offset, copy=False) rolling = df.rolling(iqr_window, min_periods=1, center=True) return (rolling.quantile(0.75) - rolling.quantile(0.25)).values def median_prediction( self, window: int = 48, shifts: Sequence[int] = range(-240, 241, 24), long_window: int = 480 ) -> np.ndarray: return self.rolling_median(window=window) * ( 1 + self.median_of_rolling_median_offset(window=window, shifts=shifts) / self.rolling_median(window=long_window) ) def flag_local_outlier( self, window: int = 48, shifts: Sequence[int] = range(-240, 241, 24), long_window: int = 480, iqr_window: int = 240, multiplier: Tuple[float, float] = (3.5, 2.5) ) -> None: prediction = self.median_prediction( window=window, shifts=shifts, long_window=long_window ) iqr = self.rolling_iqr_of_rolling_median_offset( window=window, iqr_window=iqr_window ) mask = self.x > prediction + multiplier[0] * iqr self.flag(mask, "LOCAL_OUTLIER_HIGH") mask = self.x < prediction - multiplier[1] * iqr self.flag(mask, "LOCAL_OUTLIER_LOW")
MIT License
storj/storj-python-sdk
storj/model.py
ShardManager._make_shards
python
def _make_shards(self): if self.num_chunks == 0: self.num_chunks = self.get_optimal_shard_number() self.__logger.debug('number of chunks %d', self.num_chunks) index = 0 try: with open(self.filepath, 'rb') as f: bname = os.path.split(self.filepath)[1] chunk_size = int(float(self.filesize) / float(self.num_chunks)) self.__logger.debug('chunk_size = %d', chunk_size) for x in range(1, self.num_chunks + 1): chunk_fn = '%s-%s%s' % (bname, x, self.suffix) try: data = f.read(chunk_size) self.__logger.debug('writing file %s', chunk_fn) with open(os.path.join(self.tmp_path, chunk_fn), 'wb') as chunkf: chunkf.write(data) challenges = self._make_challenges(self.nchallenges) self.shards.append( Shard(size=len(data), index=index, hash=ShardManager.hash(data), tree=self._make_tree(challenges, data), challenges=challenges)) index += 1 except (OSError, IOError) as e: self.__logger.error(e) continue except (OSError, IOError) as e: self.__logger.error(e) raise ShardingException(str(e))
Populates the shard manager with shards.
https://github.com/storj/storj-python-sdk/blob/440add68f153a6d5a8e05ea577774559f2ada6ba/storj/model.py#L801-L843
import base58 import base64 import binascii import hashlib import logging import math import io import random import os import six import strict_rfc3339 import types from Crypto.Cipher import AES from datetime import datetime from micropayment_core import keys from os import urandom from pycoin.key.Key import Key from pycoin.serialize import b2h from pycoin.key.BIP32Node import BIP32Node from pycoin.serialize.bitcoin_streamer import stream_bc_string from pycoin.ecdsa import numbertheory, generator_secp256k1 from pycoin.encoding import to_bytes_32, from_bytes_32, double_sha256 from steenzout.object import Object from sys import platform class Bucket(Object): def __init__( self, id=None, name=None, status=None, user=None, created=None, storage=None, transfer=None, pubkeys=None, publicPermissions=None, encryptionKey=None, index=None ): self.id = id self.name = name self.status = status self.user = user self.storage = storage self.transfer = transfer self.pubkeys = pubkeys self.publicPermissions = publicPermissions self.encryptionKey = encryptionKey self.index = index if created is not None: self.created = datetime.fromtimestamp( strict_rfc3339.rfc3339_to_timestamp(created)) else: self.created = None class Contact(Object): def __init__( self, address=None, port=None, nodeID=None, lastSeen=None, protocol=None, userAgent=None, responseTime=None, timeoutRate=None, lastTimeout=None ): self.address = address self.port = port self.nodeID = nodeID self.lastSeen = lastSeen self.protocol = protocol self.userAgent = userAgent self.responseTime = responseTime self.timeoutRate = timeoutRate self.lastTimeout = lastTimeout @property def lastSeen(self): return self._last_seen @lastSeen.setter def lastSeen(self, value): if value is not None: self._last_seen = datetime.fromtimestamp( strict_rfc3339.rfc3339_to_timestamp(value)) else: self._last_seen = None class File(Object): def __init__(self, bucket=None, hash=None, mimetype=None, filename=None, size=None, id=None, frame=None, created=None, hmac=None, erasure=None, index=None): self.bucket = Bucket(id=bucket) self.hash = hash self.mimetype = mimetype self.filename = filename self.size = size self.erasure = erasure self.index = index self.shard_manager = None self.id = id self.frame = Frame(id=frame) self.created = created self.hmac = hmac @property def content_type(self): return self.mimetype @property def name(self): return self.filename class FilePointer(Object): def __init__(self, hash=None, token=None, operation=None, channel=None): self.hash = hash self.token = Token(id=token) self.operation = operation self.channel = channel class Frame(Object): def __init__(self, id=None, created=None, shards=None, locked=None, user=None, size=None, storageSize=None): self.id = id self.locked = locked self.user = user self.size = size self.storageSize = storageSize if created is not None: self.created = datetime.fromtimestamp( strict_rfc3339.rfc3339_to_timestamp(created)) else: self.created = None if shards is None: self.shards = [] else: self.shards = shards class KeyPair(object): def __init__(self, pkey=None, secret=None): if secret is not None: pkey = format( BIP32Node.from_master_secret( secret.encode('utf-8') ).secret_exponent(), "064x") elif pkey is None: try: pkey = format( BIP32Node.from_master_secret( urandom(4096) ).secret_exponent(), '064x') except NotImplementedError as e: raise ValueError('No randomness source found: %s' % e) self.keypair = Key(secret_exponent=int(pkey, 16)) @property def node_id(self): return b2h(self.keypair.hash160()) @property def public_key(self): return b2h(self.keypair.sec(use_uncompressed=False)) @property def private_key(self): return format(self.keypair.secret_exponent(), '064x') @property def address(self): return self.keypair.address(use_uncompressed=False) def sign(self, message, compact=True): if compact: fd = io.BytesIO() stream_bc_string(fd, bytearray('Bitcoin Signed Message:\n', 'ascii')) stream_bc_string(fd, bytearray(message, 'utf-8')) mhash = from_bytes_32(double_sha256(fd.getvalue())) G = generator_secp256k1 n = G.order() k = from_bytes_32(os.urandom(32)) p1 = k * G r = p1.x() if r == 0: raise RuntimeError("amazingly unlucky random number r") s = (numbertheory.inverse_mod(k, n) * (mhash + (self.keypair.secret_exponent() * r) % n)) % n if s == 0: raise RuntimeError("amazingly unlucky random number s") y_odd = p1.y() % 2 assert y_odd in (0, 1) first = 27 + y_odd + (4 if not self.keypair._use_uncompressed(False) else 0) sig = binascii.b2a_base64(bytearray([first]) + to_bytes_32(r) + to_bytes_32(s)).strip() if not isinstance(sig, str): sig = str(sig, 'ascii') return sig else: return keys.sign_sha256(self.private_key, message) class IdecdsaCipher(Object): @staticmethod def pad(data): padding = 16 - len(data) % 16 return '%s%s' % (data, padding * chr(padding)) @staticmethod def unpad(data): return data[0:-ord(data[-1])] def decrypt(self, data, key, iv): aes = AES.new(key, AES.MODE_CBC, iv) return self.unpad(aes.decrypt(data)).decode('hex') def encrypt(self, data, key, iv): aes = AES.new(key, AES.MODE_CBC, iv) return aes.encrypt(self.pad(data.encode('hex'))) def EVP_BytesToKey(self, password, key_len, iv_len): m = [] len_m = 0 i = 0 utf8_password = password.encode('utf-8') while len_m < (key_len + iv_len): md5 = hashlib.md5() data = utf8_password if i > 0: data = m[i - 1] + utf8_password md5.update(data) md5_digest = md5.digest() m.append(md5_digest) len_m += len(md5_digest) i += 1 ms = ''.encode('utf-8') for mi in m: ms += mi key = ms[:key_len] iv = ms[key_len:key_len + iv_len] return key, iv def simpleEncrypt(self, passphrase, data): key, iv = self.EVP_BytesToKey(passphrase, 32, 16) return base58.b58encode(self.encrypt(data, key, iv)) def simpleDecrypt(self, passphrase, base58_data): key, iv = self.EVP_BytesToKey(passphrase, 32, 16) return self.decrypt(base58.b58decode(base58_data), key, iv) class Keyring(Object): def __init__(self): self.password = None self.salt = None def generate(self): user_pass = raw_input("Enter your keyring password: ") password = hex(random.getrandbits(512 * 8))[2:-1] salt = hex(random.getrandbits(32 * 8))[2:-1] pbkdf2 = hashlib.pbkdf2_hmac('sha512', password, salt, 25000, 512) key = hashlib.new('sha256', pbkdf2).hexdigest() IV = salt[:16] self.export_keyring(password, salt, user_pass) self.password = password self.salt = salt def get_encryption_key(self, user_pass): password = hex(random.getrandbits(512 * 8))[2:-1] salt = hex(random.getrandbits(32 * 8))[2:-1] pbkdf2 = hashlib.pbkdf2_hmac('sha512', password, salt, 25000, 512) key = hashlib.new('sha256', pbkdf2).hexdigest() IV = salt[:16] self.password = password self.salt = salt return key def export_keyring(self, password, salt, user_pass): plain = self.pad('{"pass" : "%s", \n"salt" : "%s"\n}' % (password, salt)) IV = hex(random.getrandbits(8 * 8))[2:-1] aes = AES.new(self.pad(user_pass), AES.MODE_CBC, IV) with open('key.b64', 'wb') as f: f.write(base64.b64encode(IV + aes.encrypt(plain))) def import_keyring(self, filepath): with open(filepath, 'rb') as f: keyb64 = f.read() user_pass = raw_input('Enter your keyring password: ') key_enc = base64.b64decode(keyb64) IV = key_enc[:16] key = AES.new(self.pad(user_pass), AES.MODE_CBC, IV) creds = eval(key.decrypt(key_enc[16:])[:-4]) self.password = creds['pass'] self.salt = creds['salt'] return creds class MerkleTree(Object): def __init__(self, leaves, prehashed=True): self.prehashed = prehashed self.leaves = leaves self.count = 0 self._rows = [] self._generate() @property def depth(self): pow = 0 while (2 ** pow) < len(self._leaves): pow += 1 return pow @property def leaves(self): return self._leaves @leaves.setter def leaves(self, value): if value is None: raise ValueError('Leaves should be a list.') elif not isinstance(value, list) and not isinstance(value, types.GeneratorType): raise ValueError('Leaves should be a list or a generator (%s).' % type(value)) if self.prehashed: self._leaves = list(value) else: self._leaves = [ShardManager.hash(leaf) for leaf in value] if not len(self._leaves) > 0: raise ValueError('Leaves must contain at least one entry.') for leaf in self._leaves: if not isinstance(leaf, six.string_types): raise ValueError('Leaves should only contain strings.') def _generate(self): self._rows = [[] for _ in range(self.depth + 1)] while len(self._leaves) < (2 ** self.depth): self._leaves.append(ShardManager.hash('')) leaf_row = self.depth next_branches = self.depth - 1 self._rows[leaf_row] = self._leaves if not self.prehashed: self.count += len(self._leaves) while next_branches >= 0: self._rows[next_branches] = self._make_row(next_branches) self.count += len(self._rows[next_branches]) next_branches -= 1 def _make_row(self, depth): row = [] prior = self._rows[depth + 1] for i in range(0, len(prior), 2): entry = ShardManager.hash('%s%s' % (prior[i], prior[i + 1])) row.append(entry) return row def get_root(self): return self._rows[0][0] def get_level(self, depth): return self._rows[depth] class Mirror(Object): def __init__(self, hash=None, mirrors=None, status=None): self.hash = hash self.mirrors = mirrors self.status = status class FileMirrors(Object): def __init__(self, available=None, established=None): self.established = established self.available = available class Shard(Object): def __init__(self, id=None, hash=None, size=None, index=None, challenges=None, tree=None, exclude=None): self.id = id self.hash = hash self.size = size self.index = index self.challenges = challenges self.tree = tree self.exclude = exclude if challenges is not None: self.challenges = challenges else: self.challenges = [] if tree is not None: self.tree = tree else: self.tree = [] if exclude is not None: self.exclude = exclude else: self.exclude = [] def all(self): return_string = 'Shard{index=%s, hash=%s, ' % ( self.index, self.hash) return_string += 'size=%s, tree={%s}, challenges={%s}' % ( self.size, ', '.join(self.tree), ', '.join(self.challenges)) return return_string def add_challenge(self, challenge): self.challenges.append(challenge) def add_tree(self, tree): self.tree.append(tree) def get_public_record(self): pass def get_private_record(self): pass class ShardingException(Exception): def __init__(self, value): self.value = value def __str__(self): return str(self.value) class ShardManager(Object): __logger = logging.getLogger('%s.ShardManager' % __name__) MAX_SHARD_SIZE = 4294967296 SHARD_MULTIPLES_BACK = 4 SHARD_SIZE = 8 * (1024 * 1024) def __init__(self, filepath, num_chunks=0, tmp_path=None, nchallenges=2, suffix=''): self.num_chunks = num_chunks self.nchallenges = nchallenges self.shards = [] self.suffix = suffix self.tmp_path = tmp_path self.filepath = filepath @property def filepath(self): return self._filepath @filepath.setter def filepath(self, value): if not isinstance(value, six.string_types): raise ValueError('%s must be a string' % value) elif not os.path.exists(value): raise ValueError('%s must exist' % value) elif not os.path.isfile(value): raise ValueError('%s must be a file' % value) self._filepath = value self._filesize = os.path.getsize(value) self._make_shards() @property def filesize(self): return self._filesize @property def tmp_path(self): return self._tmp_path @tmp_path.setter def tmp_path(self, value): if value is not None: self._tmp_path = value elif platform == 'linux' or platform == 'linux2': self._tmp_path = '/tmp' elif platform == 'darwin': self._tmp_path = '/tmp' elif platform == 'win32': self._tmp_path = 'C://Windows/temp' self.__logger.debug('self.tmp_path=%s', self._tmp_path) def get_optimal_shard_number(self): shard_size = self.determine_shard_size() if shard_size == 0: shard_size = self.filesize shard_count = int(math.ceil(float(self.filesize) / float(shard_size))) self.__logger.debug( 'shard_size = %d, shard_count = %d, file_size = %d', shard_size, shard_count, self.filesize) return shard_count def determine_shard_size(self, accumulator=0): self.__logger.debug('determine_shard_size(%d)', accumulator) if self.filesize <= 0: return self.filesize if accumulator - ShardManager.SHARD_MULTIPLES_BACK < 0: hops = 0 else: hops = accumulator - ShardManager.SHARD_MULTIPLES_BACK byte_multiple = ShardManager.SHARD_SIZE * pow(2, accumulator) check = float(self.filesize) / float(byte_multiple) self.__logger.debug( 'hops=%d acumulator = %d check = %.2f file_size = %d byte_multiple = %d', hops, accumulator, check, self.filesize, byte_multiple) if 0 < check <= 1: while hops > 0 and ShardManager.SHARD_SIZE * pow(2, hops) > ShardManager.MAX_SHARD_SIZE: if hops - 1 <= 0: hops = 0 else: hops -= 1 self.__logger.debug( 'hops=%d acumulator = %d check = %.2f file_size = %d byte_multiple = %d', hops, accumulator, check, self.filesize, byte_multiple) return ShardManager.SHARD_SIZE * pow(2, hops) if accumulator > 41: return 0 accumulator += 1 return self.determine_shard_size(accumulator)
MIT License
decile-team/cords
cords/utils/config_utils.py
load_config_data
python
def load_config_data(filepath): filename = osp.abspath(osp.expanduser(filepath)) check_file_exist(filename) fileExtname = osp.splitext(filename)[1] if fileExtname not in ['.py', '.yaml', '.yml']: raise IOError('Only py/yml/yaml type are supported now!') """ Parsing Config file """ if filename.endswith('.yaml'): with open(filename, 'r') as config_file: configdata = yaml.load(config_file, Loader=yaml.FullLoader) elif filename.endswith('.py'): spec = importlib.util.spec_from_file_location("config", filename) mod = importlib.util.module_from_spec(spec) spec.loader.exec_module(mod) configdata = copy.deepcopy(mod.config) return DotMap(configdata)
Parsing Config file
https://github.com/decile-team/cords/blob/e10de177355a10e6931743401e80debd20e2f240/cords/utils/config_utils.py#L54-L71
import os import os.path as osp from pathlib import Path import ast import yaml import importlib.util import copy import os from dotmap import DotMap def is_str(x): return isinstance(x, str) def is_filepath(x): return is_str(x) or isinstance(x, Path) def fopen(filepath, *args, **kwargs): if is_str(filepath): return open(filepath, *args, **kwargs) elif isinstance(filepath, Path): return filepath.open(*args, **kwargs) raise ValueError('`filepath` should be a string or a Path') def check_file_exist(filename, msg_tmpl='file "{}" does not exist'): if not osp.isfile(filename): raise FileNotFoundError(msg_tmpl.format(filename)) def mkdir_or_exist(dir_name, mode=0o777): if dir_name == '': return else: dir_name = osp.expanduser(dir_name) os.makedirs(dir_name, mode=mode, exist_ok=True) def _validate_py_syntax(filename): with open(filename, 'r') as f: content = f.read() try: ast.parse(content) except SyntaxError as e: raise SyntaxError('There are syntax errors in config ' f'file {filename}: {e}')
MIT License
eivindeb/pyfly
pyfly/pyfly.py
AttitudeQuaternion.__init__
python
def __init__(self): self.quaternion = None self.euler_angles = {"roll": None, "pitch": None, "yaw": None} self.history = None
Quaternion attitude representation used by PyFly.
https://github.com/eivindeb/pyfly/blob/8451687b8df4a7bc6f78f3a1cdcfd503b76bf596/pyfly/pyfly.py#L552-L558
import numpy as np import math import scipy.integrate import scipy.io import os.path as osp import json import matplotlib.pyplot as plt import matplotlib.gridspec class ConstraintException(Exception): def __init__(self, variable, value, limit): self.message = "Constraint on {} violated ({}/{})".format(variable, value, limit) self.variable = variable class Variable: def __init__(self, name, value_min=None, value_max=None, init_min=None, init_max=None, constraint_min=None, constraint_max=None, convert_to_radians=False, unit=None, label=None, wrap=False): self.value_min = value_min self.value_max = value_max self.init_min = init_min if init_min is not None else value_min self.init_max = init_max if init_max is not None else value_max self.constraint_min = constraint_min self.constraint_max = constraint_max if convert_to_radians: for attr_name, val in self.__dict__.items(): if val is not None: setattr(self, attr_name, np.radians(val)) self.name = name self.value = None self.wrap = wrap self.unit = unit self.label = label if label is not None else self.name self.lines = {"self": None} self.target_lines = {"self": None} self.target_bounds = {"self": None} self.np_random = None self.seed() self.history = None def reset(self, value=None): self.history = [] if value is None: try: value = self.np_random.uniform(self.init_min, self.init_max) except TypeError: raise Exception("Variable init_min and init_max can not be None if no value is provided on reset") else: value = self.apply_conditions(value) self.value = value self.history.append(value) def seed(self, seed=None): self.np_random = np.random.RandomState(seed) def apply_conditions(self, value): if self.constraint_min is not None and value < self.constraint_min: raise ConstraintException(self.name, value, self.constraint_min) if self.constraint_max is not None and value > self.constraint_max: raise ConstraintException(self.name, value, self.constraint_max) if self.value_min is not None or self.value_max is not None: value = np.clip(value, self.value_min, self.value_max) if self.wrap and np.abs(value) > np.pi: value = np.sign(value) * (np.abs(value) % np.pi - np.pi) return value def set_value(self, value, save=True): value = self.apply_conditions(value) if save: self.history.append(value) self.value = value def plot(self, axis=None, y_unit=None, target=None, plot_id=None, **plot_kw): def linear_scaling(val, old_min, old_max, new_min, new_max): return (new_max - np.sign(old_min) * (- new_min)) / (old_max - old_min) * ( np.array(val) - old_max) + new_max if y_unit is None: y_unit = self.unit if y_unit is None else y_unit x, y = self._get_plot_x_y_data() if "degrees" in y_unit: y = np.degrees(y) if target is not None: target["data"] = np.degrees(target["data"]) if "bound" in target: target["bound"] = np.degrees(target["bound"]) elif y_unit == "%": y = linear_scaling(y, self.value_min, self.value_max, -100, 100) if target is not None: target["data"] = linear_scaling(target["data"], self.value_min, self.value_max, -100, 100) if "bound" in target: target["bound"] = linear_scaling(target["bound"], self.value_min, self.value_max, -100, 100) else: y = y plot_object = axis if axis is None: plot_object = plt plot_id = "self" fig_kw = {"title": self.name, "ylabel": y_unit} if self.lines.get(plot_id, None) is None: line, = plot_object.plot(x, y, label=self.label, **plot_kw) self.lines[plot_id] = line if target is not None: tar_line, = plot_object.plot(x, target["data"], color=self.lines[plot_id].get_color(), linestyle="dashed", marker="x", markevery=0.2) if "bound" in target: tar_bound = plot_object.fill_between(np.arange(target["bound"].shape[0]), target["data"] + target["bound"], target["data"] - target["bound"], alpha=0.15, facecolor=self.lines[plot_id].get_color() ) self.target_bounds[plot_id] = tar_bound self.target_lines[plot_id] = tar_line else: self.lines[plot_id].set_data(x, y) if target is not None: self.target_lines[plot_id].set_data(x, target) if "bound" in target: self.target_bounds[plot_id].set_data(np.arange(target["bound"].shape[0]), target["data"] + target["bound"], target["data"] - target["bound"]) if axis is None: for k, v in fig_kw.items(): getattr(plot_object, format(k))(v) plt.show() def close_plot(self, plot_id="self"): self.lines[plot_id] = None self.target_lines[plot_id] = None self.target_bounds[plot_id] = None def _get_plot_x_y_data(self): x = list(range(len(self.history))) y = self.history return x, y class ControlVariable(Variable): def __init__(self, order=None, tau=None, omega_0=None, zeta=None, dot_max=None, disabled=False, **kwargs): assert (disabled or (order == 1 or order == 2)) super().__init__(**kwargs) self.order = order self.tau = tau self.omega_0 = omega_0 self.zeta = zeta self.dot_max = dot_max if order == 1: assert (tau is not None) self.coefs = [[-1 / self.tau, 0, 1 / self.tau], [0, 0, 0]] elif order == 2: assert (omega_0 is not None and zeta is not None) self.coefs = [[0, 1, 0], [-self.omega_0 ** 2, -2 * self.zeta * self.omega_0, self.omega_0 ** 2]] self.dot = None self.command = None self.disabled = disabled if self.disabled: self.value = 0 self.plot_quantity = "value" def apply_conditions(self, values): try: value, dot = values except: value, dot = values, 0 value = super().apply_conditions(value) if self.dot_max is not None: dot = np.clip(dot, -self.dot_max, self.dot_max) return [value, dot] def set_command(self, command): command = super().apply_conditions(command) self.command = command self.history["command"].append(command) def reset(self, value=None): self.history = {"value": [], "dot": [], "command": []} if not self.disabled: if value is None: value = self.np_random.uniform(self.init_min, self.init_max), 0 else: value = self.apply_conditions(value) self.value = value[0] self.dot = value[1] command = None self.command = command else: value, dot, command = 0, 0, None self.value = value self.dot = dot self.command = command self.history["value"].append(self.value) self.history["dot"].append(self.dot) def set_value(self, value, save=True): value, dot = self.apply_conditions(value) self.value = value self.dot = dot if save: self.history["value"].append(value) self.history["dot"].append(dot) def _get_plot_x_y_data(self): y = self.history[self.plot_quantity] x = list(range(len(y))) return x, y def get_coeffs(self): if self.order == 1: return else: return [] class EnergyVariable(Variable): def __init__(self, mass=None, inertia_matrix=None, gravity=None, **kwargs): super().__init__(**kwargs) self.required_variables = [] self.variables = {} if self.name == "energy_potential" or self.name == "energy_total": assert(mass is not None and gravity is not None) self.mass = mass self.gravity = gravity self.required_variables.append("position_d") if self.name == "energy_kinetic" or self.name == "energy_total": assert (mass is not None and inertia_matrix is not None) self.mass = mass self.inertia_matrix = inertia_matrix self.required_variables.extend(["Va", "omega_p", "omega_q", "omega_r"]) if self.name == "energy_kinetic_rotational": assert(inertia_matrix is not None) self.inertia_matrix = inertia_matrix self.required_variables.extend(["omega_p", "omega_q", "omega_r"]) if self.name == "energy_kinetic_translational": assert(mass is not None) self.mass = mass self.required_variables.append("Va") def add_requirement(self, name, variable): self.variables[name] = variable def calculate_value(self): val = 0 if self.name == "energy_potential" or self.name == "energy_total": val += self.mass * self.gravity * (-self.variables["position_d"].value) if self.name == "energy_kinetic_rotational" or self.name == "energy_kinetic" or self.name == "energy_total": for i, axis in enumerate(["omega_p", "omega_q", "omega_r"]): m_i = self.inertia_matrix[i, i] val += 1 / 2 * m_i * self.variables[axis].value ** 2 if self.name == "energy_kinetic_translational" or self.name == "energy_kinetic" or self.name == "energy_total": val += 1 / 2 * self.mass * self.variables["Va"].value ** 2 return val class Actuation: def __init__(self, model_inputs, actuator_inputs, dynamics): self.states = {} self.coefficients = [[np.array([]) for _ in range(3)] for __ in range(2)] self.elevon_dynamics = False self.dynamics = dynamics self.inputs = actuator_inputs self.model_inputs = model_inputs self.input_indices = {s: i for i, s in enumerate(actuator_inputs)} self.dynamics_indices = {s: i for i, s in enumerate(dynamics)} def set_states(self, values, save=True): for i, state in enumerate(self.dynamics): self.states[state].set_value((values[i], values[len(self.dynamics) + i]), save=save) if self.elevon_dynamics: elevator, aileron = self._map_elevon_to_elevail(er=self.states["elevon_right"].value, el=self.states["elevon_left"].value) self.states["aileron"].set_value((aileron, 0), save=save) self.states["elevator"].set_value((elevator, 0), save=save) def add_state(self, state): self.states[state.name] = state if state.name in self.dynamics: for i in range(2): for j in range(3): self.coefficients[i][j] = np.append(self.coefficients[i][j], state.coefs[i][j]) def get_values(self): return [self.states[state].value for state in self.dynamics] + [self.states[state].dot for state in self.dynamics] def rhs(self, setpoints=None): if setpoints is None: setpoints = [self.states[state].command for state in self.dynamics] states = [self.states[state].value for state in self.dynamics] dots = [self.states[state].dot for state in self.dynamics] dot = np.multiply(states, self.coefficients[0][0]) + np.multiply(setpoints, self.coefficients[0][2]) + np.multiply(dots, self.coefficients[0][1]) ddot = np.multiply(states, self.coefficients[1][0]) + np.multiply(setpoints, self.coefficients[1][2]) + np.multiply(dots, self.coefficients[1][1]) return np.concatenate((dot, ddot)) def set_and_constrain_commands(self, commands): dynamics_commands = {} if self.elevon_dynamics and "elevator" and "aileron" in self.inputs: elev_c, ail_c = commands[self.input_indices["elevator"]], commands[self.input_indices["aileron"]] elevon_r_c, elevon_l_c = self._map_elevail_to_elevon(elev=elev_c, ail=ail_c) dynamics_commands = {"elevon_right": elevon_r_c, "elevon_left": elevon_l_c} for state in self.dynamics: if state in self.input_indices: state_c = commands[self.input_indices[state]] else: state_c = dynamics_commands[state] self.states[state].set_command(state_c) dynamics_commands[state] = self.states[state].command if self.elevon_dynamics: elev_c, ail_c = self._map_elevon_to_elevail(er=dynamics_commands["elevon_right"], el=dynamics_commands["elevon_left"]) self.states["elevator"].set_command(elev_c) self.states["aileron"].set_command(ail_c) for state, i in self.input_indices.items(): commands[i] = self.states[state].command return commands def finalize(self): if "elevon_left" in self.dynamics or "elevon_right" in self.dynamics: assert("elevon_left" in self.dynamics and "elevon_right" in self.dynamics and not ("aileron" in self.dynamics or "elevator" in self.dynamics)) assert ("elevon_left" in self.states and "elevon_right" in self.states) self.elevon_dynamics = True if "elevator" in self.states: elev_min, _ = self._map_elevon_to_elevail(er=self.states["elevon_right"].value_min, el=self.states["elevon_left"].value_min) elev_max, _ = self._map_elevon_to_elevail(er=self.states["elevon_right"].value_max, el=self.states["elevon_left"].value_max) self.states["elevator"].value_min = elev_min self.states["elevator"].value_max = elev_max if "aileron" in self.states: _, ail_min = self._map_elevon_to_elevail(er=self.states["elevon_right"].value_max, el=self.states["elevon_left"].value_min) _, ail_max = self._map_elevon_to_elevail(er=self.states["elevon_right"].value_min, el=self.states["elevon_left"].value_max) self.states["aileron"].value_min = ail_min self.states["aileron"].value_max = ail_max def reset(self, state_init=None): for state in self.dynamics: init = None if state_init is not None and state in state_init: init = state_init[state] self.states[state].reset(value=init) if self.elevon_dynamics: elev, ail = self._map_elevon_to_elevail(er=self.states["elevon_right"].value, el=self.states["elevon_left"].value) self.states["elevator"].reset(value=elev) self.states["aileron"].reset(value=ail) def _map_elevail_to_elevon(self, elev, ail): er = -1 * ail + elev el = ail + elev return er, el def _map_elevon_to_elevail(self, er, el): ail = (-er + el) / 2 elev = (er + el) / 2 return elev, ail class AttitudeQuaternion:
MIT License
mrknow/filmkodi
plugin.video.mrknow/mylib/pydevd_attach_to_process/winappdbg/module.py
Module.clear
python
def clear(self): try: self.set_process(None) finally: self.close_handle()
Clears the resources held by this object.
https://github.com/mrknow/filmkodi/blob/0162cde9ae25ddbf4a69330948714833ff2f78c9/plugin.video.mrknow/mylib/pydevd_attach_to_process/winappdbg/module.py#L437-L444
from __future__ import with_statement __revision__ = "$Id$" __all__ = ['Module', 'DebugSymbolsWarning'] import sys from winappdbg import win32 from winappdbg import compat from winappdbg.textio import HexInput, HexDump from winappdbg.util import PathOperations Process = None import os import warnings import traceback class DebugSymbolsWarning (UserWarning): class Module (object): unknown = '<unknown>' class _SymbolEnumerator (object): def __init__(self, undecorate = False): self.symbols = list() self.undecorate = undecorate def __call__(self, SymbolName, SymbolAddress, SymbolSize, UserContext): if self.undecorate: try: SymbolName = win32.UnDecorateSymbolName(SymbolName) except Exception: pass self.symbols.append( (SymbolName, SymbolAddress, SymbolSize) ) return win32.TRUE def __init__(self, lpBaseOfDll, hFile = None, fileName = None, SizeOfImage = None, EntryPoint = None, process = None): self.lpBaseOfDll = lpBaseOfDll self.fileName = fileName self.SizeOfImage = SizeOfImage self.EntryPoint = EntryPoint self.__symbols = list() self.set_handle(hFile) self.set_process(process) def get_handle(self): return self.__hFile def set_handle(self, hFile): if hFile == win32.INVALID_HANDLE_VALUE: hFile = None self.__hFile = hFile hFile = property(get_handle, set_handle, doc="") def get_process(self): return self.__process def set_process(self, process = None): if process is None: self.__process = None else: global Process if Process is None: from winappdbg.process import Process if not isinstance(process, Process): msg = "Parent process must be a Process instance, " msg += "got %s instead" % type(process) raise TypeError(msg) self.__process = process process = property(get_process, set_process, doc="") def get_pid(self): process = self.get_process() if process is not None: return process.get_pid() def get_base(self): return self.lpBaseOfDll def get_size(self): if not self.SizeOfImage: self.__get_size_and_entry_point() return self.SizeOfImage def get_entry_point(self): if not self.EntryPoint: self.__get_size_and_entry_point() return self.EntryPoint def __get_size_and_entry_point(self): process = self.get_process() if process: try: handle = process.get_handle( win32.PROCESS_VM_READ | win32.PROCESS_QUERY_INFORMATION ) base = self.get_base() mi = win32.GetModuleInformation(handle, base) self.SizeOfImage = mi.SizeOfImage self.EntryPoint = mi.EntryPoint except WindowsError: e = sys.exc_info()[1] warnings.warn( "Cannot get size and entry point of module %s, reason: %s" % (self.get_name(), e.strerror), RuntimeWarning) def get_filename(self): if self.fileName is None: if self.hFile not in (None, win32.INVALID_HANDLE_VALUE): fileName = self.hFile.get_filename() if fileName: fileName = PathOperations.native_to_win32_pathname(fileName) self.fileName = fileName return self.fileName def __filename_to_modname(self, pathname): filename = PathOperations.pathname_to_filename(pathname) if filename: filename = filename.lower() filepart, extpart = PathOperations.split_extension(filename) if filepart and extpart: modName = filepart else: modName = filename else: modName = pathname return modName def get_name(self): pathname = self.get_filename() if pathname: modName = self.__filename_to_modname(pathname) if isinstance(modName, compat.unicode): try: modName = modName.encode('cp1252') except UnicodeEncodeError: e = sys.exc_info()[1] warnings.warn(str(e)) else: modName = "0x%x" % self.get_base() return modName def match_name(self, name): my_name = self.get_name().lower() if name.lower() == my_name: return True try: base = HexInput.integer(name) except ValueError: base = None if base is not None and base == self.get_base(): return True modName = self.__filename_to_modname(name) if modName.lower() == my_name: return True return False def open_handle(self): if not self.get_filename(): msg = "Cannot retrieve filename for module at %s" msg = msg % HexDump.address( self.get_base() ) raise Exception(msg) hFile = win32.CreateFile(self.get_filename(), dwShareMode = win32.FILE_SHARE_READ, dwCreationDisposition = win32.OPEN_EXISTING) if not hasattr(self.hFile, '__del__'): self.close_handle() self.hFile = hFile def close_handle(self): try: if hasattr(self.hFile, 'close'): self.hFile.close() elif self.hFile not in (None, win32.INVALID_HANDLE_VALUE): win32.CloseHandle(self.hFile) finally: self.hFile = None def get_handle(self): if self.hFile in (None, win32.INVALID_HANDLE_VALUE): self.open_handle() return self.hFile
Apache License 2.0
scalyr/scalyr-agent-2
scalyr_agent/configuration.py
Configuration.print_useful_settings
python
def print_useful_settings(self, other_config=None): options = [ "verify_server_certificate", "ca_cert_path", "compression_type", "compression_level", "pipeline_threshold", "max_send_rate_enforcement", "disable_max_send_rate_enforcement_overrides", "min_allowed_request_size", "max_allowed_request_size", "min_request_spacing_interval", "max_request_spacing_interval", "read_page_size", "max_line_size", "internal_parse_max_line_size", "line_completion_wait_time", "max_log_offset_size", "max_existing_log_offset_size", "json_library", "use_multiprocess_workers", "default_sessions_per_worker", "default_worker_session_status_message_interval", "enable_worker_session_process_metrics_gather", "sanitized_worker_configs", ] other_options = None if other_config is not None: other_options = {} for option in options: other_options[option] = getattr(other_config, option, None) first = True for option in options: value = getattr(self, option, None) print_value = False if other_config is None: print_value = True elif ( other_options is not None and option in other_options and other_options[option] != value ): print_value = True if option == "json_library" and value == "auto": json_lib = scalyr_util.get_json_lib() value = "%s (%s)" % (value, json_lib) if print_value: if first: self.__logger.info("Configuration settings") first = False if isinstance(value, (list, dict)): value = six.text_type(value).replace("u'", "'") self.__logger.info("\t%s: %s" % (option, value)) win32_max_open_fds_previous_value = getattr( other_config, "win32_max_open_fds", None ) win32_max_open_fds_current_value = getattr(self, "win32_max_open_fds", None) if ( sys.platform.startswith("win") and win32file and ( win32_max_open_fds_current_value != win32_max_open_fds_previous_value or other_config is None ) ): try: win32_max_open_fds_actual_value = win32file._getmaxstdio() except Exception: win32_max_open_fds_actual_value = "unknown" if first: self.__logger.info("Configuration settings") self.__logger.info( "\twin32_max_open_fds(maxstdio): %s (%s)" % (win32_max_open_fds_current_value, win32_max_open_fds_actual_value) ) if self.debug_level >= 5: try: raw_config = self.__get_sanitized_raw_config() self.__logger.info("Raw config value: %s" % (json.dumps(raw_config))) except Exception: pass
Prints various useful configuration settings to the agent log, so we have a record in the log of the settings that are currently in use. @param other_config: Another configuration option. If not None, this function will only print configuration options that are different between the two objects.
https://github.com/scalyr/scalyr-agent-2/blob/6d32b861889078f044c9ab3f1f7157f2c89ba04a/scalyr_agent/configuration.py#L651-L763
from __future__ import unicode_literals from __future__ import absolute_import __author__ = "[email protected]" if False: from typing import Tuple from typing import Dict from typing import List import os import re import sys import socket import time import logging import copy import json import stat import platform import six import six.moves.urllib.parse from six.moves import range try: import win32file except ImportError: win32file = None import scalyr_agent.util as scalyr_util from scalyr_agent.json_lib import JsonConversionException, JsonMissingFieldException from scalyr_agent.json_lib.objects import ( JsonObject, JsonArray, ArrayOfStrings, SpaceAndCommaSeparatedArrayOfStrings, ) from scalyr_agent.monitor_utils.blocking_rate_limiter import BlockingRateLimiter from scalyr_agent.config_util import BadConfiguration, get_config_from_env from scalyr_agent.__scalyr__ import get_install_root from scalyr_agent.compat import os_environ_unicode from scalyr_agent import compat FILE_WRONG_OWNER_ERROR_MSG = """ File \"%s\" is not readable by the current user (%s). You need to make sure that the file is owned by the same account which is used to run the agent. Original error: %s """.strip() MASKED_CONFIG_ITEM_VALUE = "********** MASKED **********" AGENT_WORKER_SESSION_LOG_NAME_PREFIX = "agent-worker-session-" class Configuration(object): DEFAULT_K8S_IGNORE_NAMESPACES = ["kube-system"] DEFAULT_K8S_INCLUDE_NAMESPACES = ["*"] def __init__( self, file_path, default_paths, logger, extra_config_dir=None, log_warnings=True ): self._environment_aware_map = {} self.__file_path = os.path.abspath(file_path) self.__additional_paths = [] self.__config = None self.__read_time = None self.__last_error = None self.__log_configs = [] self.__journald_log_configs = [] self.__k8s_log_configs = [] self.__monitor_configs = [] self.__worker_configs = [] self.__default_paths = default_paths self.max_retry_time = 15 * 60 self.max_allowed_checkpoint_age = 15 * 60 self.__extra_config_directory = extra_config_dir self.__log_warnings = log_warnings self.__logger = logger def parse(self): self.__read_time = time.time() try: try: self.__config = scalyr_util.read_config_file_as_json(self.__file_path) except Exception as e: msg = str(e).lower() if ( "file is not readable" in msg or "error reading" in msg or "failed while reading" ): from scalyr_agent.platform_controller import PlatformController platform_controller = PlatformController.new_platform() current_user = platform_controller.get_current_user() msg = FILE_WRONG_OWNER_ERROR_MSG % ( self.__file_path, current_user, six.text_type(e), ) raise BadConfiguration(msg, None, "fileParseError") raise BadConfiguration(six.text_type(e), None, "fileParseError") self.__perform_substitutions(self.__config) self._check_config_file_permissions_and_warn(self.__file_path) already_seen = {} for k in self.__config.keys(): already_seen[k] = self.__file_path self.__verify_main_config_and_apply_defaults( self.__config, self.__file_path ) api_key, api_config_file = self.__check_field( "api_key", self.__config, self.__file_path ) scalyr_server, scalyr_server_config_file = self.__check_field( "scalyr_server", self.__config, self.__file_path ) self.__verify_logs_and_monitors_configs_and_apply_defaults( self.__config, self.__file_path ) allowed_multiple_keys = ( "import_vars", "logs", "journald_logs", "k8s_logs", "monitors", "server_attributes", "workers", ) allowed_multiple_keys_deprecated_synonyms = {"workers": ["api_keys"]} extra_config = self.__list_files(self.config_directory) extra_config.extend(self.__list_files(self.extra_config_directory)) for fp in extra_config: self.__additional_paths.append(fp) content = scalyr_util.read_config_file_as_json(fp) if not isinstance(content, (dict, JsonObject)): raise BadConfiguration( 'Invalid content inside configuration fragment file "%s". ' "Expected JsonObject (dictionary), got %s." % (fp, type(content).__name__), "multiple", "invalidConfigFragmentType", ) for k, v in list(content.items()): for ( key, key_synonyms, ) in allowed_multiple_keys_deprecated_synonyms.items(): if k in key_synonyms: content[key] = v del content[k] break for k in content.keys(): if k not in allowed_multiple_keys: if k in already_seen: self.__last_error = BadConfiguration( 'Configuration fragment file "%s" redefines the config key "%s", first seen in "%s". ' "The only config items that can be defined in multiple config files are: %s." % (fp, k, already_seen[k], allowed_multiple_keys), k, "multipleKeys", ) raise self.__last_error else: already_seen[k] = fp self._check_config_file_permissions_and_warn(fp) self.__perform_substitutions(content) self.__verify_main_config(content, self.__file_path) self.__verify_logs_and_monitors_configs_and_apply_defaults(content, fp) for (key, value) in six.iteritems(content): if key not in allowed_multiple_keys: self.__config.put(key, value) self.__add_elements_from_array("logs", content, self.__config) self.__add_elements_from_array("journald_logs", content, self.__config) self.__add_elements_from_array("k8s_logs", content, self.__config) self.__add_elements_from_array("monitors", content, self.__config) self.__add_elements_from_array( "workers", content, self.__config, deprecated_names=["api_keys"] ) self.__merge_server_attributes(fp, content, self.__config) self.__set_api_key(self.__config, api_key) if scalyr_server is not None: self.__config.put("scalyr_server", scalyr_server) self.__verify_or_set_optional_string( self.__config, "scalyr_server", "https://agent.scalyr.com", "configuration file %s" % self.__file_path, env_name="SCALYR_SERVER", ) self.__config["raw_scalyr_server"] = self.__config["scalyr_server"] if not self.__config["allow_http"]: server = self.__config["scalyr_server"].strip() https_server = server parts = six.moves.urllib.parse.urlparse(server) scheme = parts[0] if not scheme: https_server = "https://" + server elif scheme == "http": https_server = re.sub("^http://", "https://", server) if https_server != server: self.__config["scalyr_server"] = https_server if ( not self.__config["disable_max_send_rate_enforcement_overrides"] and not self.__config["max_send_rate_enforcement"] == "legacy" ): self._warn_of_override_due_to_rate_enforcement( "max_allowed_request_size", 1024 * 1024 ) self._warn_of_override_due_to_rate_enforcement( "pipeline_threshold", 1.1 ) self._warn_of_override_due_to_rate_enforcement( "min_request_spacing_interval", 1.0 ) self._warn_of_override_due_to_rate_enforcement( "max_request_spacing_interval", 5.0 ) self._warn_of_override_due_to_rate_enforcement( "max_log_offset_size", 5 * 1024 * 1024 ) self._warn_of_override_due_to_rate_enforcement( "max_existing_log_offset_size", 100 * 1024 * 1024 ) self.__config["max_allowed_request_size"] = 5900000 self.__config["pipeline_threshold"] = 0 self.__config["min_request_spacing_interval"] = 0.0 self.__config["max_request_spacing_interval"] = 5.0 self.__config["max_log_offset_size"] = 200000000 self.__config["max_existing_log_offset_size"] = 200000000 if ( self.__config["max_send_rate_enforcement"] != "unlimited" and self.__config["max_send_rate_enforcement"] != "legacy" ): try: self.__config[ "parsed_max_send_rate_enforcement" ] = scalyr_util.parse_data_rate_string( self.__config["max_send_rate_enforcement"] ) except ValueError as e: raise BadConfiguration( six.text_type(e), "max_send_rate_enforcement", "notDataRate" ) if "serverHost" not in self.server_attributes: self.__config["server_attributes"][ "serverHost" ] = self.__get_default_hostname() agent_log = None worker_session_agent_logs = None if self.implicit_agent_log_collection: config = JsonObject( path="agent.log", parser="scalyrAgentLog", ) worker_session_logs_config = JsonObject( path="%s*.log" % AGENT_WORKER_SESSION_LOG_NAME_PREFIX, exclude=JsonArray( "*%s*_debug.log" % AGENT_WORKER_SESSION_LOG_NAME_PREFIX ), parser="scalyrAgentLog", ) self.__verify_log_entry_and_set_defaults( config, description="implicit rule" ) self.__verify_log_entry_and_set_defaults( worker_session_logs_config, description="implicit rule" ) agent_log = config worker_session_agent_logs = worker_session_logs_config self.__log_configs = list(self.__config.get_json_array("logs")) if agent_log is not None: self.__log_configs.append(agent_log) if worker_session_agent_logs is not None: self.__log_configs.append(worker_session_agent_logs) self.__journald_log_configs = list( self.__config.get_json_array("journald_logs") ) self.__k8s_log_configs = list(self.__config.get_json_array("k8s_logs")) if self.enable_profiling: profile_config = JsonObject( path=self.profile_log_name, copy_from_start=True, staleness_threshold_secs=20 * 60, parser="scalyrAgentProfiling", ) self.__verify_log_entry_and_set_defaults( profile_config, description="CPU profile log config" ) self.__log_configs.append(profile_config) self.__monitor_configs = list(self.__config.get_json_array("monitors")) self._check_k8s_logs_config_option_and_warn() self.__verify_workers() self.__verify_and_match_workers_in_logs() self.__worker_configs = list(self.__config.get_json_array("workers")) except BadConfiguration as e: self.__last_error = e raise e def __verify_workers(self): workers = list(self.__config.get_json_array("workers")) unique_worker_ids = {} for i, worker_entry in enumerate(workers): self.__verify_workers_entry_and_set_defaults(worker_entry, entry_index=i) worker_id = worker_entry["id"] if worker_id in unique_worker_ids: raise BadConfiguration( "There are multiple workers with the same '%s' id. Worker id's must remain unique." % worker_id, "workers", "workerIdDuplication", ) else: unique_worker_ids[worker_id] = worker_entry default_worker_entry = unique_worker_ids.get("default") if default_worker_entry is None: default_worker_entry = JsonObject(api_key=self.api_key, id="default") self.__verify_workers_entry_and_set_defaults(default_worker_entry) workers.insert(0, default_worker_entry) self.__config.put("workers", JsonArray(*workers)) def __verify_and_match_workers_in_logs(self): worker_ids = set() for worker_config in self.__config.get_json_array("workers"): worker_ids.add(worker_config["id"]) log_config_lists = [ self.__log_configs, self.__k8s_log_configs, self.__journald_log_configs, ] for log_config_list in log_config_lists: for log_file_config in log_config_list: worker_id = log_file_config.get("worker_id", none_if_missing=True) if worker_id is None: log_file_config["worker_id"] = "default" else: if worker_id not in worker_ids: valid_worker_ids = ", ".join(sorted(worker_ids)) raise BadConfiguration( "The log entry '%s' refers to a non-existing worker with id '%s'. Valid worker ids: %s." % ( six.text_type(log_file_config), worker_id, valid_worker_ids, ), "logs", "invalidWorkerReference", ) def _check_config_file_permissions_and_warn(self, file_path): if not self.__log_warnings: return None if not os.path.isfile(file_path) or not self.__logger: return None st_mode = os.stat(file_path).st_mode if bool(st_mode & stat.S_IROTH) or bool(st_mode & stat.S_IWOTH): file_permissions = str(oct(st_mode)[4:]) if file_permissions.startswith("0") and len(file_permissions) == 4: file_permissions = file_permissions[1:] limit_key = "config-permissions-warn-%s" % (file_path) self.__logger.warn( "Config file %s is readable or writable by others (permissions=%s). Config " "files can " "contain secrets so you are strongly encouraged to change the config " "file permissions so it's not readable by others." % (file_path, file_permissions), limit_once_per_x_secs=86400, limit_key=limit_key, ) def _check_k8s_logs_config_option_and_warn(self): if ( self.__k8s_log_configs and not self._is_kubernetes_monitor_configured() and self.__log_warnings ): self.__logger.warn( '"k8s_logs" config options is defined, but Kubernetes monitor is ' "not configured / enabled. That config option applies to " "Kubernetes monitor so for it to have an affect, Kubernetes " "monitor needs to be enabled and configured", limit_once_per_x_secs=86400, limit_key="k8s_logs_k8s_monitor_not_enabled", ) def _is_kubernetes_monitor_configured(self): monitor_configs = self.monitor_configs or [] for monitor_config in monitor_configs: if ( monitor_config.get("module", "") == "scalyr_agent.builtin_monitors.kubernetes_monitor" ): return True return False def _warn_of_override_due_to_rate_enforcement(self, config_option, default): if self.__log_warnings and self.__config[config_option] != default: self.__logger.warn( "Configured option %s is being overridden due to max_send_rate_enforcement setting." % config_option, limit_once_per_x_secs=86400, limit_key="max_send_rate_enforcement_override", ) def apply_config(self): if not self.__config: return json_library = self.json_library current_json_library = scalyr_util.get_json_lib() if json_library != "auto" and json_library != current_json_library: self.__logger.debug( 'Changing JSON library from "%s" to "%s"' % (current_json_library, json_library) ) scalyr_util.set_json_lib(json_library) self.__apply_win32_global_config_options() def __apply_win32_global_config_options(self): if not sys.platform.startswith("win"): return if not win32file: return None max_open_fds = self.win32_max_open_fds current_max_open_fds = win32file._getmaxstdio() if (max_open_fds and current_max_open_fds) and ( max_open_fds != current_max_open_fds ): self.__logger.debug( 'Changing limit for max open fds (maxstdio) from "%s" to "%s"' % (current_max_open_fds, max_open_fds) ) try: win32file._setmaxstdio(max_open_fds) except Exception: self.__logger.exception("Failed to change the value of maxstdio")
Apache License 2.0
microsoft/applicationinsights-python
applicationinsights/channel/SynchronousQueue.py
SynchronousQueue.flush
python
def flush(self): local_sender = self.sender if not local_sender: return while True: data = [] while len(data) < local_sender.send_buffer_size: item = self.get() if not item: break data.append(item) if len(data) == 0: break local_sender.send(data)
Flushes the current queue by by calling :func:`sender`'s :func:`send` method.
https://github.com/microsoft/applicationinsights-python/blob/4b6eb85f314b0a0d9ee41fbaa6b8dcd9f569a64d/applicationinsights/channel/SynchronousQueue.py#L15-L31
from .QueueBase import QueueBase class SynchronousQueue(QueueBase):
MIT License
helixyte/everest
everest/traversal.py
DataTraversalProxy.get_attribute_proxy
python
def get_attribute_proxy(self, attribute): attr_val = self._get_relation_attribute_value(attribute) if attr_val is None: prx = None else: if not self._accessor is None: acc = self._accessor.get_root_aggregate(attribute.attr_type) else: acc = None reg = get_current_registry() prx_fac = reg.getUtility(IDataTraversalProxyFactory) prx = prx_fac.make_proxy(attr_val, acc, self.relationship_direction, self.relation_operation, options= self._get_proxy_options(attribute)) return prx
Returns a traversal proxy (cardinality ONE) or an iterable sequence data traversal proxy (cardinality MANY) for the specified relation attribute value of the proxied data. :raises ValueError: If :param:`attribute` is a terminal attribute.
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/traversal.py#L163-L188
from collections import MutableSequence from collections import MutableSet from logging import getLogger as get_logger from pyramid.compat import itervalues_ from pyramid.threadlocal import get_current_registry from pyramid.traversal import ResourceTreeTraverser from everest.attributes import get_attribute_cardinality from everest.attributes import is_terminal_attribute from everest.constants import CARDINALITY_CONSTANTS from everest.constants import RELATIONSHIP_DIRECTIONS from everest.constants import RELATION_OPERATIONS from everest.constants import RESOURCE_ATTRIBUTE_KINDS from everest.constants import RESOURCE_KINDS from everest.interfaces import IDataTraversalProxyAdapter from everest.interfaces import IDataTraversalProxyFactory from everest.resources.interfaces import IResource from everest.traversalpath import TraversalPath from zope.interface import implementer __docformat__ = 'reStructuredText en' __all__ = ['ConvertingDataTraversalProxyMixin', 'DataSequenceTraversalProxy', 'DataTraversalProxy', 'DataTraversalProxyAdapter', 'DataTraversalProxyFactory', 'SourceTargetDataTreeTraverser', 'SuffixResourceTraverser', ] class SuffixResourceTraverser(ResourceTreeTraverser): def __call__(self, request): system = ResourceTreeTraverser.__call__(self, request) context = system['context'] view_name = system['view_name'] if IResource.providedBy(context) and '.' in view_name: rc_name, repr_name = view_name.split('.') try: child_rc = context[rc_name] except KeyError: pass else: if IResource.providedBy(child_rc): system['context'] = child_rc system['view_name'] = repr_name return system class DataSequenceTraversalProxy(object): proxy_for = RESOURCE_KINDS.COLLECTION def __init__(self, proxies, relation_operation): self.__proxies = proxies self.relation_operation = relation_operation def __iter__(self): return iter(self.__proxies) class DataTraversalProxy(object): proxy_for = RESOURCE_KINDS.MEMBER def __init__(self, data, accessor, relationship_direction, relation_operation): super(DataTraversalProxy, self).__init__() self.relationship_direction = relationship_direction self.relation_operation = relation_operation self._data = data self._accessor = accessor def get_relationship_attributes(self): for attr in self._attribute_iterator(): if not is_terminal_attribute(attr): yield attr def get_matching(self, source_id): value = self._accessor.get_by_id(source_id) if not value is None: reg = get_current_registry() prx_fac = reg.getUtility(IDataTraversalProxyFactory) prx = prx_fac.make_proxy(value, self._accessor, self.relationship_direction, self.relation_operation) else: prx = None return prx @property def update_attribute_value_items(self): for attr in self._attribute_iterator(): if attr.kind != RESOURCE_ATTRIBUTE_KINDS.COLLECTION: try: attr_val = self._get_proxied_attribute_value(attr) except AttributeError: continue else: yield (attr, attr_val)
MIT License
workos-inc/workos-python
workos/utils/request.py
RequestHelper.set_base_api_url
python
def set_base_api_url(self, base_api_url): self.base_api_url = "{}{{}}".format(base_api_url)
Creates an accessible template for constructing the URL for an API request. Args: base_api_url (str): Base URL for api requests (Should end with a /)
https://github.com/workos-inc/workos-python/blob/722a468a7deb2999c341a01f430afe8ac6dd26f8/workos/utils/request.py#L27-L33
import requests import workos from workos.exceptions import ( AuthorizationException, AuthenticationException, BadRequestException, ServerException, ) BASE_HEADERS = { "User-Agent": "WorkOS Python/{}".format(workos.__version__), } RESPONSE_TYPE_CODE = "code" REQUEST_METHOD_DELETE = "delete" REQUEST_METHOD_GET = "get" REQUEST_METHOD_POST = "post" REQUEST_METHOD_PUT = "put" class RequestHelper(object): def __init__(self): self.set_base_api_url(workos.base_api_url)
MIT License