repository_name
stringlengths 7
107
| function_path
stringlengths 4
190
| function_identifier
stringlengths 1
236
| language
stringclasses 1
value | function
stringlengths 9
647k
| docstring
stringlengths 5
488k
| function_url
stringlengths 71
285
| context
stringlengths 0
2.51M
| license
stringclasses 5
values |
---|---|---|---|---|---|---|---|---|
lostindarkmath/pedantic-python-decorators | pedantic/type_checking_logic/check_types.py | _get_base_generic | python | def _get_base_generic(cls: Any) -> Any:
origin = cls.__origin__ if hasattr(cls, '__origin__') else None
name = cls._name if hasattr(cls, '_name') else None
if name is not None:
return getattr(typing, name)
elif origin is not None:
return origin
return cls | >>> from typing import List, Union, Tuple, Callable, Dict, Set
>>> _get_base_generic(List)
typing.List
>>> _get_base_generic(List[float])
typing.List
>>> _get_base_generic(List[List[float]])
typing.List
>>> _get_base_generic(List[Union[int, float]])
typing.List
>>> _get_base_generic(Tuple)
typing.Tuple
>>> _get_base_generic(Tuple[float, int])
typing.Tuple
>>> _get_base_generic(Tuple[Union[int, float], str])
typing.Tuple
>>> _get_base_generic(Callable[..., int])
typing.Callable
>>> _get_base_generic(Callable[[Union[int, str], float], int])
typing.Callable
>>> _get_base_generic(Dict)
typing.Dict
>>> _get_base_generic(Dict[str, str])
typing.Dict
>>> _get_base_generic(Union)
typing.Union
>>> _get_base_generic(Union[float, int, str])
typing.Union
>>> _get_base_generic(Set)
typing.Set
>>> _get_base_generic(Set[int])
typing.Set | https://github.com/lostindarkmath/pedantic-python-decorators/blob/66865a958a36440b48e790f22ea42d2beb725b16/pedantic/type_checking_logic/check_types.py#L413-L455 | import inspect
import typing
from io import BytesIO, StringIO, BufferedWriter, TextIOWrapper
from typing import Any, Dict, Iterable, ItemsView, Callable, Union, Optional, Tuple, Mapping, TypeVar, NewType
import collections
import sys
from pedantic.constants import TypeVar as TypeVar_
from pedantic.exceptions import PedanticTypeCheckException, PedanticTypeVarMismatchException, PedanticException
def _assert_value_matches_type(value: Any,
type_: Any,
err: str,
type_vars: Dict[TypeVar_, Any],
key: Optional[str] = None,
msg: Optional[str] = None
) -> None:
if not _check_type(value=value, type_=type_, err=err, type_vars=type_vars):
t = type(value)
value = f'{key}={value}' if key is not None else str(value)
if not msg:
msg = f'{err}Type hint is incorrect: Argument {value} of type {t} does not match expected type {type_}.'
raise PedanticTypeCheckException(msg)
def _check_type(value: Any, type_: Any, err: str, type_vars: Dict[TypeVar_, Any]) -> bool:
if type_ is None:
return value == type_
elif isinstance(type_, str):
class_name = value.__class__.__name__
base_class_name = value.__class__.__base__.__name__
return class_name == type_ or base_class_name == type_
if isinstance(type_, tuple):
raise PedanticTypeCheckException(f'{err}Use "Tuple[]" instead of "{type_}" as type hint.')
if isinstance(type_, list):
raise PedanticTypeCheckException(f'{err}Use "List[]" instead of "{type_}" as type hint.')
if type_ is tuple:
raise PedanticTypeCheckException(f'{err}Use "Tuple[]" instead of "tuple" as type hint.')
if type_ is list:
raise PedanticTypeCheckException(f'{err}Use "List[]" instead of "list" as type hint.')
if type_ is dict:
raise PedanticTypeCheckException(f'{err}Use "Dict[]" instead of "dict" as type hint.')
if type_ is set:
raise PedanticTypeCheckException(f'{err}Use "Set[]" instead of "set" as type hint.')
if type_ is frozenset:
raise PedanticTypeCheckException(f'{err}Use "FrozenSet[]" instead of "frozenset" as type hint.')
if type_ is type:
raise PedanticTypeCheckException(f'{err}Use "Type[]" instead of "type" as type hint.')
try:
return _is_instance(obj=value, type_=type_, type_vars=type_vars)
except PedanticTypeCheckException as ex:
raise PedanticTypeCheckException(f'{err} {ex}')
except PedanticTypeVarMismatchException as ex:
raise PedanticTypeVarMismatchException(f'{err} {ex}')
except (AttributeError, Exception) as ex:
raise PedanticTypeCheckException(
f'{err}An error occurred during type hint checking. Value: {value} Annotation: '
f'{type_} Mostly this is caused by an incorrect type annotation. Details: {ex} ')
def _is_instance(obj: Any, type_: Any, type_vars: Dict[TypeVar_, Any]) -> bool:
if not _has_required_type_arguments(type_):
raise PedanticTypeCheckException(
f'The type annotation "{type_}" misses some type arguments e.g. '
f'"typing.Tuple[Any, ...]" or "typing.Callable[..., str]".')
if type_.__module__ == 'typing':
if _is_generic(type_):
origin = _get_base_generic(type_)
else:
origin = type_
name = _get_name(origin)
if name in _SPECIAL_INSTANCE_CHECKERS:
validator = _SPECIAL_INSTANCE_CHECKERS[name]
return validator(obj, type_, type_vars)
if type_ == typing.BinaryIO:
return isinstance(obj, (BytesIO, BufferedWriter))
elif type_ == typing.TextIO:
return isinstance(obj, (StringIO, TextIOWrapper))
if _is_generic(type_):
python_type = type_.__origin__
if not isinstance(obj, python_type):
return False
base = _get_base_generic(type_)
type_args = _get_type_arguments(cls=type_)
if base in _ORIGIN_TYPE_CHECKERS:
validator = _ORIGIN_TYPE_CHECKERS[base]
return validator(obj, type_args, type_vars)
assert base.__base__ == typing.Generic, f'Unknown base: {base}'
return isinstance(obj, base)
if isinstance(type_, TypeVar):
constraints = type_.__constraints__
if len(constraints) > 0 and type(obj) not in constraints:
return False
if _is_forward_ref(type_=type_.__bound__):
return type(obj).__name__ == type_.__bound__.__forward_arg__
if type_.__bound__ is not None and not isinstance(obj, type_.__bound__):
return False
if type_ in type_vars:
other = type_vars[type_]
if type_.__contravariant__:
if not _is_subtype(sub_type=other, super_type=obj.__class__):
raise PedanticTypeVarMismatchException(
f'For TypeVar {type_} exists a type conflict: value {obj} has type {type(obj)} but TypeVar {type_} '
f'was previously matched to type {other}')
else:
if not _is_instance(obj=obj, type_=other, type_vars=type_vars):
raise PedanticTypeVarMismatchException(
f'For TypeVar {type_} exists a type conflict: value {obj} has type {type(obj)} but TypeVar {type_} '
f'was previously matched to type {other}')
type_vars[type_] = type(obj)
return True
if _is_forward_ref(type_=type_):
return type(obj).__name__ == type_.__forward_arg__
if _is_type_new_type(type_):
return isinstance(obj, type_.__supertype__)
if hasattr(obj, '_asdict'):
if hasattr(type_, '_field_types'):
field_types = type_._field_types
elif hasattr(type_, '__annotations__'):
field_types = type_.__annotations__
else:
return False
if not obj._asdict().keys() == field_types.keys():
return False
return all([_is_instance(obj=obj._asdict()[k], type_=v, type_vars=type_vars) for k, v in field_types.items()])
return isinstance(obj, type_)
def _is_forward_ref(type_: Any) -> bool:
return hasattr(typing, 'ForwardRef') and isinstance(type_, typing.ForwardRef) or hasattr(typing, '_ForwardRef') and isinstance(type_, typing._ForwardRef)
def _is_type_new_type(type_: Any) -> bool:
return type_.__qualname__ == NewType('name', int).__qualname__
def _get_name(cls: Any) -> str:
if hasattr(cls, '_name'):
return cls._name
elif hasattr(cls, '__name__'):
return cls.__name__
else:
return type(cls).__name__[1:]
def _is_generic(cls: Any) -> bool:
if hasattr(typing, '_SpecialGenericAlias') and isinstance(cls, typing._SpecialGenericAlias):
return True
elif hasattr(typing, '_GenericAlias'):
if isinstance(cls, typing._GenericAlias):
return True
if isinstance(cls, typing._SpecialForm):
return cls not in {Any}
elif isinstance(cls, (typing.GenericMeta, typing._Union, typing._Optional, typing._ClassVar)):
return True
return False
def _has_required_type_arguments(cls: Any) -> bool:
base: str = _get_name(cls=cls)
num_type_args = len(_get_type_arguments(cls=cls))
if base in NUM_OF_REQUIRED_TYPE_ARGS_EXACT:
return NUM_OF_REQUIRED_TYPE_ARGS_EXACT[base] == num_type_args
elif base in NUM_OF_REQUIRED_TYPE_ARGS_MIN:
return NUM_OF_REQUIRED_TYPE_ARGS_MIN[base] <= num_type_args
return True
def _get_type_arguments(cls: Any) -> Tuple[Any, ...]:
result = ()
if hasattr(cls, '__args__'):
result = cls.__args__
origin = _get_base_generic(cls=cls)
if origin != cls and ((origin is Callable) or (origin is collections.abc.Callable)) and result[0] is not Ellipsis:
result = (list(result[:-1]), result[-1])
result = result or ()
return result if '[' in str(cls) else () | Apache License 2.0 |
seung-lab/chunkflow | chunkflow/chunk/base.py | Chunk.ndoffset | python | def ndoffset(self) -> tuple:
if self.ndim == 4:
return (0, *self.voxel_offset)
else:
return self.voxel_offset | make the voxel offset have the same dimension with array | https://github.com/seung-lab/chunkflow/blob/0e032cdf4f2ba104af4f7809ac11df17352384ed/chunkflow/chunk/base.py#L395-L402 | from typing import Union
import os
from numbers import Number
import h5py
import numpy as np
import nrrd
from numpy.core.numerictypes import issubdtype
from numpy.lib.mixins import NDArrayOperatorsMixin
from scipy.ndimage import gaussian_filter
import tifffile
import cc3d
from cloudvolume.lib import yellow, Bbox
from chunkflow.lib.bounding_boxes import BoundingBox
from .validate import validate_by_template_matching
class Chunk(NDArrayOperatorsMixin):
def __init__(self, array: np.ndarray, voxel_offset: tuple = None, voxel_size: tuple = None):
assert isinstance(array, np.ndarray) or isinstance(array, Chunk)
self.array = array
if voxel_offset is None:
if isinstance(array, Chunk):
self.array = array.array
voxel_offset = array.voxel_offset
else:
voxel_offset = (0, 0, 0)
if voxel_offset is not None:
if len(voxel_offset) == 4:
assert voxel_offset[0] == 0
voxel_offset = voxel_offset[1:]
assert len(voxel_offset) == 3
self.voxel_offset = voxel_offset
self.voxel_size = voxel_size
if voxel_size is not None:
assert len(voxel_size) == 3
assert np.alltrue([vs > 0 for vs in voxel_size])
assert array.ndim >= 3 and array.ndim <= 4
_HANDLED_TYPES = (np.ndarray, Number)
@classmethod
def from_array(cls, array: np.ndarray, bbox: BoundingBox, voxel_size: tuple = None):
return cls(array, voxel_offset=bbox.minpt, voxel_size=voxel_size)
@classmethod
def from_bbox(cls, bbox: BoundingBox, dtype: type = np.uint8,
voxel_size: tuple=None, all_zero: bool=False):
assert isinstance(bbox, BoundingBox)
size = bbox.maxpt - bbox.minpt
return cls.create(size=size, dtype=dtype, voxel_offset=bbox.minpt,
voxel_size=voxel_size, all_zero=all_zero)
@classmethod
def create(cls, size: tuple = (64, 64, 64),
dtype: type = np.uint8, voxel_offset: tuple = (0, 0, 0),
voxel_size: tuple = None,
all_zero: bool = False):
if isinstance(dtype, str):
dtype = np.dtype(dtype)
if all_zero:
chunk = np.zeros(size, dtype=dtype)
else:
ix, iy, iz = np.meshgrid(*[np.linspace(0, 1, n) for
n in size[-3:]], indexing='ij')
chunk = np.abs(np.sin(4 * (ix + iy + iz)))
if len(size) == 4:
chunk = np.expand_dims(chunk, axis=0)
chunk = np.repeat(chunk, size[0], axis=0)
if np.dtype(dtype) == np.uint8:
chunk = (chunk * 255).astype( dtype )
elif np.dtype(dtype) == np.uint32:
chunk = (chunk>0.5).astype(dtype)
elif np.issubdtype(dtype, np.floating):
chunk = chunk.astype(dtype)
else:
raise NotImplementedError()
return cls(chunk, voxel_offset=voxel_offset, voxel_size=voxel_size)
def clone(self):
return Chunk(self.array.copy(),
voxel_offset=self.voxel_offset, voxel_size=self.voxel_size)
@classmethod
def from_nrrd(cls, file_name: str, voxel_offset: tuple=None, dtype: str = None,
voxel_size: tuple=None):
arr, _ = nrrd.read(file_name)
if dtype:
arr = arr.astype(dtype)
return cls(arr, voxel_offset=voxel_offset, voxel_size=voxel_size)
def to_nrrd(self, file_name: str=None):
if file_name is None:
file_name = f'{self.bbox.to_filename()}.nrrd'
elif not file_name.endswith('.nrrd'):
file_name += f'_{self.bbox.to_filename()}.nrrd'
print('write chunk to file: ', file_name)
nrrd.write(file_name, self.array)
@classmethod
def from_tif(cls, file_name: str, voxel_offset: tuple=None, dtype: str = None,
voxel_size: tuple=None):
arr = tifffile.imread(file_name)
if dtype:
arr = arr.astype(dtype)
print(f'read tif chunk with size of {arr.shape}, voxel offset: {voxel_offset}, voxel size: {voxel_size}')
return cls(arr, voxel_offset=voxel_offset, voxel_size=voxel_size)
def to_tif(self, file_name: str=None):
if file_name is None:
file_name = f'{self.bbox.to_filename()}.tif'
print('write chunk to file: ', file_name)
if self.array.dtype==np.float32:
print(yellow('transforming data type from float32 to uint8'))
img = self.array*255
img = img.astype( np.uint8 )
else:
img = self.array
tifffile.imwrite(file_name, data=img)
@classmethod
def from_h5(cls, file_name: str,
voxel_offset: tuple=None,
dataset_path: str = None,
voxel_size: tuple = None,
cutout_start: tuple = None,
cutout_stop: tuple = None,
cutout_size: tuple = None,
zero_filling: bool = False,
dtype: str = None):
assert os.path.exists(file_name)
if cutout_start is not None and cutout_size is not None:
cutout_stop = tuple(t+s for t, s in zip(cutout_start, cutout_size))
if not h5py.is_hdf5(file_name):
assert cutout_start is not None
assert cutout_stop is not None
bbox = BoundingBox.from_list([*cutout_start, *cutout_stop])
file_name += f'{bbox.to_filename()}.h5'
if not os.path.exists(file_name) and zero_filling:
assert dtype is not None
print(f'file do not exist, will fill with zero: {file_name}')
return cls.from_bbox(bbox, dtype=dtype, voxel_size=voxel_size, all_zero=True)
with h5py.File(file_name, 'r') as f:
if dataset_path is None:
for key in f.keys():
if 'offset' not in key and 'unique' not in key:
dataset_path = key
break
dset = f[dataset_path]
if voxel_offset is None:
if 'voxel_offset' in f:
voxel_offset = tuple(f['voxel_offset'])
else:
voxel_offset = (0, 0, 0)
if voxel_size is None:
if 'voxel_size' in f:
voxel_size = tuple(f['voxel_size'])
else:
voxel_size = (1, 1, 1)
if cutout_start is None:
cutout_start = voxel_offset
if cutout_size is None:
cutout_size = dset.shape[-3:]
if cutout_stop is None:
cutout_stop = tuple(t+s for t, s in zip(cutout_start, cutout_size))
for c, v in zip(cutout_start, voxel_offset):
assert c >= v, "can only cutout after the global voxel offset."
assert len(cutout_start) == 3
assert len(cutout_stop) == 3
dset = dset[...,
cutout_start[0]-voxel_offset[0]:cutout_stop[0]-voxel_offset[0],
cutout_start[1]-voxel_offset[1]:cutout_stop[1]-voxel_offset[1],
cutout_start[2]-voxel_offset[2]:cutout_stop[2]-voxel_offset[2],
]
print(f"""read from HDF5 file: {file_name} and start with {cutout_start}, \
ends with {cutout_stop}, size is {cutout_size}, voxel size is {voxel_size}.""")
arr = np.asarray(dset)
if arr.dtype == np.dtype('<f4'):
arr = arr.astype('float32')
elif arr.dtype == np.dtype('<f8'):
arr = arr.astype('float64')
print('new chunk voxel offset: {}'.format(cutout_start))
return cls(arr, voxel_offset=cutout_start, voxel_size=voxel_size)
def to_h5(self, file_name: str, with_offset: bool=True,
chunk_size: tuple=(64,64,64),
with_unique: bool= True,
compression="gzip",
voxel_size: tuple = None):
if chunk_size:
assert len(chunk_size) == 3
if not file_name.endswith('.h5'):
file_name += self.bbox.to_filename() + '.h5'
print('write chunk to file: ', file_name)
if os.path.exists(file_name):
print(yellow(f'deleting existing file: {file_name}'))
os.remove(file_name)
with h5py.File(file_name, 'w') as f:
f.create_dataset('/main', data=self.array, chunks=chunk_size, compression=compression)
if voxel_size is None and self.voxel_size is not None:
voxel_size = self.voxel_size
if voxel_size is not None:
f.create_dataset('/voxel_size', data=voxel_size)
if with_offset and self.voxel_offset is not None:
f.create_dataset('/voxel_offset', data=self.voxel_offset)
if with_unique and self.is_segmentation:
unique = np.unique(self.array)
if unique[0]:
unique = unique[1:]
f.create_dataset('/unique_nonzeros', data = unique)
return file_name
def __array__(self):
return self.array
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.get('out', ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (Chunk,)):
return NotImplemented
inputs = tuple(x.array if isinstance(x, Chunk) else x
for x in inputs)
if out:
kwargs['out'] = tuple(
x.array if isinstance(x, Chunk) else x
for x in out)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple:
return tuple(type(self)(x, voxel_offset=self.voxel_offset, voxel_size=self.voxel_size) for x in result)
elif method == 'at':
return None
elif isinstance(result, Number):
return result
elif isinstance(result, np.ndarray):
return type(self)(result, voxel_offset=self.voxel_offset, voxel_size=self.voxel_size)
else:
return result
def __getitem__(self, index):
return self.array[index]
def __setitem__(self, key, value):
self.array[key] = value
def __repr__(self):
return f'array: {self.array}\n voxel offset: {self.voxel_offset} \n voxel size: {self.voxel_size}'
def __eq__(self, value):
if isinstance(value, type(self)):
return np.array_equal(self.array, value.array) and np.array_equal(
self.voxel_offset, value.voxel_offset)
elif isinstance(value, Number):
return np.all(self.array==value)
elif isinstance(value, np.ndarray):
return np.all(self.array == value)
else:
raise NotImplementedError
def set_properties(self, properties: dict):
if 'voxel_offset' in properties:
self.voxel_offset = properties['voxel_offset']
if 'voxel_size' in properties:
self.voxel_size = properties['voxel_size']
@property
def properties(self) -> dict:
props = dict()
if self.voxel_offset is not None or self.voxel_offset != (0, 0, 0):
props['voxel_offset'] = self.voxel_offset
if self.voxel_size is not None or self.voxel_size != (1, 1, 1):
props['voxel_size'] = self.voxel_size
return props
@property
def slices(self) -> tuple:
return tuple(
slice(o, o + s) for o, s in zip(self.ndoffset, self.shape))
@property
def is_image(self) -> bool:
return self.array.ndim == 3 and self.array.dtype == np.uint8
@property
def is_segmentation(self) -> bool:
return self.array.ndim == 3 and (np.issubdtype(
self.array.dtype, np.integer) or np.issubdtype(
self.dtype, np.bool8)) and self.array.dtype != np.uint8
@property
def is_affinity_map(self) -> bool:
return self.array.ndim == 4 and self.shape[0] == 3 and self.array.dtype == np.float32
@property
def is_probability_map(self) -> bool:
return self.array.ndim == 4 and self.array.dtype == np.float32
@property | Apache License 2.0 |
twisted/axiom | axiom/tags.py | Catalog.tagNames | python | def tagNames(self):
return self.store.query(_TagName, _TagName.catalog == self).getColumn("name") | Return an iterator of unicode strings - the unique tag names which have
been applied objects in this catalog. | https://github.com/twisted/axiom/blob/28191ede99287e9a87c1ff561b831f7d80aaa2fe/axiom/tags.py#L83-L88 | from epsilon.extime import Time
from axiom.item import Item
from axiom.attributes import text, reference, integer, AND, timestamp
class Tag(Item):
typeName = 'tag'
schemaVersion = 1
name = text(doc="""
The short string which is being applied as a tag to an Item.
""")
created = timestamp(doc="""
When this tag was applied to the Item to which it applies.
""")
object = reference(doc="""
The Item to which this tag applies.
""")
catalog = reference(doc="""
The L{Catalog} item in which this tag was created.
""")
tagger = reference(doc="""
An optional reference to the Item which is responsible for this tag's
existence.
""")
class _TagName(Item):
typeName = 'tagname'
name = text(doc="""
The short string which uniquely represents this tag.
""", indexed=True)
catalog = reference(doc="""
The L{Catalog} item in which this tag exists.
""")
class Catalog(Item):
typeName = 'tag_catalog'
schemaVersion = 2
tagCount = integer(default=0)
def tag(self, obj, tagName, tagger=None):
if self.store.findFirst(Tag,
AND(Tag.object == obj,
Tag.name == tagName,
Tag.catalog == self)):
return
self.store.findOrCreate(_TagName, name=tagName, catalog=self)
self.tagCount += 1
Tag(store=self.store, object=obj,
name=tagName, catalog=self,
created=Time(), tagger=tagger) | MIT License |
fredhutch/proxmox-tools | prox/cmdprox.py | ssh_exec | python | def ssh_exec(user, pwd, commands, host):
if not isinstance(commands, list):
print('commands parameter in ssh_exec needs to be a list')
return False
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
ssh.connect(host, username=user, password=pwd)
for command in commands:
stdin, stdout, stderr = ssh.exec_command(command)
for line in stdout.readlines():
print(line.strip()) | execute list of commands via ssh | https://github.com/fredhutch/proxmox-tools/blob/cfd4d7333969d3ad8af80f15be56d0d5052fee4e/prox/cmdprox.py#L949-L961 | import sys, os, subprocess, re, platform, getpass, argparse, logging, hostlist
import time, warnings, functools, random, json, requests, paramiko, socket
try:
import easygui
except:
pass
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
try:
from .pyproxmox import *
except:
from pyproxmox import *
logging.basicConfig(level=logging.WARNING)
__app__ = 'Proxmox command line deployment tool'
PROXHOST = os.getenv('PPROXHOST', 'proxa1.fhcrc.org')
REALM = os.getenv('PREALM', 'FHCRC.ORG')
LXCIMAGE = os.getenv('PLXCIMAGE', 'proxnfs:vztmpl/ubuntu-16.04-standard_16.04-1_amd64.tar.gz')
STORLOC = os.getenv('PSTORLOC', 'proxZFS')
STORNET = os.getenv('PSTORNET', 'proxnfs')
USERDB = os.getenv('PUSERDB', 'https://toolbox.fhcrc.org/json/sc_users.json')
EXCLUDEHOSTS = ['proxa5']
CHEFVERSION = '12.19.36'
homedir = os.path.expanduser("~")
def parse_arguments():
parser = argparse.ArgumentParser(prog='prox ',
description='a tool for deploying resources from proxmox ' + '(LXC containers or VMs)')
parser.add_argument( '--debug', '-g', dest='debug', action='store_true', default=False,
help="verbose output for all commands")
subparsers = parser.add_subparsers(dest="subcommand", help='sub-command help')
parser_ssh = subparsers.add_parser('assist', aliases=['gui'],
help='navigate application via GUI (experimental)')
parser_ssh = subparsers.add_parser('ssh', aliases=['connect'],
help='connect to first host via ssh')
parser_ssh.add_argument('hosts', action='store', default=[], nargs='*',
help='hostname(s) of VM/containers (separated by space), ' +
' example: prox ssh host1 host2 host3')
parser_list = subparsers.add_parser('list', aliases=['ls', 'show'],
help='list hosts(s) with status, size and contact (optional)')
parser_list.add_argument( '--all', '-a', dest='all', action='store_true', default=False,
help="show all hosts (LXC and KVM)")
parser_list.add_argument( '--contacts', '-c', dest='contacts', action='store_true', default=False,
help="show the technical contact / owner of the machine")
parser_list.add_argument( '--snapshots', '-s', dest='listsnap', action='store_true', default=False,
help="list machine snapshots that can be rolled back")
parser_list.add_argument('hosts', action='store', default=[], nargs='*',
help='hostname(s) of VM/containers (separated by space), ' +
' example: prox modify host1 host2 host3')
parser_start = subparsers.add_parser('start', aliases=['run'],
help='start the host(s)')
parser_start.add_argument('hosts', action='store', default=[], nargs='*',
help='hostname(s) of VM/containers (separated by space), ' +
' example: prox start host1 host2 host3')
parser_stop = subparsers.add_parser('stop', aliases=['shutdown'],
help='stop the host(s)')
parser_stop.add_argument('hosts', action='store', default=[], nargs='*',
help='hostname(s) of VM/containers (separated by space), ' +
' example: prox stop host1 host2 host3')
parser_destroy = subparsers.add_parser('destroy', aliases=['delete', 'rm'],
help='delete the hosts(s) from disk')
parser_destroy.add_argument('hosts', action='store', default=[], nargs='*',
help='hostname(s) of VM/containers (separated by space), ' +
' example: prox destroy host1 host2 host3')
parser_modify = subparsers.add_parser('modify', aliases=['mod'],
help='modify the config of one or more hosts')
parser_modify.add_argument('--mem', '-m', dest='mem', action='store', default='0',
help='Memory allocation for the machine, e.g. 4G or 512')
parser_modify.add_argument('--disk', '-d', dest='disk', action='store', default='0',
help='disk storage allocated to the machine.')
parser_modify.add_argument('--cores', '-c', dest='cores', action='store', default='0',
help='Number of cores to be allocated for the machine.')
parser_modify.add_argument('hosts', action='store', default=[], nargs='*',
help='hostname(s) of VM/containers (separated by space), ' +
' example: prox modify host1 host2 host3')
parser_snap = subparsers.add_parser('snap', aliases=['snapshot'],
help='take a snapshot of the host')
parser_snap.add_argument('--description', '-d', dest='snapdesc', action='store', default='',
help='description of the snapshot')
parser_snap.add_argument('snapname', action='store',
help='name of the snapshot')
parser_snap.add_argument('hosts', action='store', default=[], nargs='*',
help='hostname(s) of VM/containers (separated by space), ' +
' example: prox snap host1 host2 host3')
parser_rollback = subparsers.add_parser('rollback', aliases=['rb'],
help='roll back a snapshot')
parser_rollback.add_argument('snapname', action='store',
help='name of the snapshot')
parser_rollback.add_argument('hosts', action='store', default=[], nargs='*',
help='hostname(s) of VM/containers (separated by space), ' +
' example: prox snap host1 host2 host3')
parser_new = subparsers.add_parser('new', aliases=['create'],
help='create one or more new hosts')
parser_new.add_argument('--runlist', '-r', dest='runlist', action='store', default='',
help='a local shell script file or a command to execute after install')
parser_new.add_argument('--node', '-N', dest='node', action='store', default='',
help='Hostname of Proxmox node that will be used for install')
parser_new.add_argument('--mem', '-m', dest='mem', action='store', default='512',
help='Memory allocation for the machine, e.g. 4G or 512 Default: 512')
parser_new.add_argument('--disk', '-d', dest='disk', action='store', default='4',
help='disk storage allocated to the machine. Default: 4')
parser_new.add_argument('--cores', '-c', dest='cores', action='store', default='2',
help='Number of cores to be allocated for the machine. Default: 2')
parser_new.add_argument('--ubuntu', '-u', dest='ubuntu', action='store', default='',
help='Ubuntu version: 14.04, 16.04, 17.10 or 18.04')
parser_new.add_argument( '--store-net', '-s', dest='stornet', action='store_true', default=False,
help="use networked storage with backup (nfs, ceph) instead of local storage")
parser_new.add_argument( '--docker', '-o', dest='docker', action='store_true', default=False,
help="install latest docker-ce on new machine")
parser_new.add_argument( '--bootstrap', '-b', dest='bootstrap', action='store_true', default=False,
help="auto-configure the system using Chef.")
parser_new.add_argument( '--no-bootstrap', '-n', dest='nobootstrap', action='store_true', default=False,
help="do not auto-configure the system using Chef.")
parser_new.add_argument('hosts', action='store', default=[], nargs='*',
help='hostname(s) of VM/containers (separated by space), ' +
' example: prox new host1 host2 host3')
return parser.parse_args()
def main():
uselxc = True
usegui = False
user = getpass.getuser()
if not args.subcommand:
print('usage: prox <command> [options] host1 host2 host3')
print(' Please run "prox --help"')
return False
if args.subcommand == 'assist':
if 'DISPLAY' in os.environ.keys() or sys.platform == 'win32':
usegui = True
if args.debug:
print('Debugging ....')
print(args, l)
if args.subcommand in ['straaange', 'oppptions']:
prn("This feature is not yet implemented.", usegui)
return False
check_ssh_agent()
check_ssh_auth(user)
pwd = os.getenv('proxpw', '')
if pwd == '':
pwd = os.getenv('PROXPW', '')
if pwd == '':
pwd = getpwd("Password for '%s':" % user, usegui)
if pwd == '':
return False
loginname = user + '@' + REALM
if user == 'root':
loginname = user + '@pam'
if args.subcommand in ['ssh', 'connect']:
ret = subprocess.run("ssh -i %s/.ssh/id_rsa_prox %s"
% (homedir, args.hosts[0]), shell=True)
return True
a = prox_auth(PROXHOST, loginname, pwd, True)
if a.ticket is None:
prn('Could not get an authentication ticket. Wrong password?', usegui)
return False
p = pyproxmox(a)
pool = p.getPools()['data'][0]['poolid']
nodelist = p.getNodes()['data']
nodes = []
hosttempl = {}
templlist = []
ourmachines = {}
oursnaps = {}
if args.subcommand in ['list', 'ls', 'show']:
if args.contacts or args.listsnap:
prn("please wait ...")
for n in nodelist:
node = n['node']
if node in EXCLUDEHOSTS:
continue
nodes.append(node)
try:
conts = p.getContainers(node)['data']
except:
continue
for c in conts:
descr = ''
if args.subcommand in ['list', 'ls', 'show']:
if args.contacts:
descr = parse_contact(p,node,c['vmid'])
if args.listsnap:
shots = p.getContainerSnapshots(node,c['vmid'])['data']
oursnaps[int(c['vmid'])] = shots
ourmachines[int(c['vmid'])] = [c['vmid'], c[
'name'], c['type'], c['status'], node, int(c['maxmem'])/
1024/1024/1024, c['cpus'], int(c['maxdisk'])/1024/1024/1024,
descr]
if args.subcommand in ['list', 'ls', 'show']:
if args.all == True:
vms = p.getNodeVirtualIndex(node)['data']
for v in vms:
if args.contacts:
descr = parse_contact_vm(p,node,v['vmid'])
if v['template'] == 1:
hosttempl[v['name']] = [node, v['vmid']]
templlist.append(v['name'])
else:
ourmachines[int(v['vmid'])] = [v['vmid'], v[
'name'], 'kvm', v['status'], node, '', '', 0, descr]
vmids = []
if args.hosts != []:
vmids = getvmids(ourmachines, args.hosts)
print('')
if args.subcommand in ['list', 'ls', 'show'] or (
args.subcommand in [
'start', 'stop', 'destroy', 'modify', 'mod'] and not vmids):
prn(' {0: <5} {1: <20} {2: <5} {3: <9} {4: <8} {5: <5} {6: <3} {7: <5} {8: <10}'.format(
'vmid', 'name', 'type', 'status', 'node' , 'mem', 'cpu', 'disk', ''))
prn(' {0: <5} {1: <20} {2: <5} {3: <9} {4: <8} {5: <5} {6: <3} {7: <5} {8: <10}'.format(
'----', '--------------------', '----', '--------', '-------', '-----', '---', '-----', ''))
recip = []
for k, v in sorted(ourmachines.items()):
prn(' {0: <5} {1: <20.20} {2: <5} {3: <9} {4: <8} {5: <5} {6: <3} {7: <5.0f} {8: <10}'.format(*v))
recip.append(v[-1])
if args.subcommand in ['list', 'ls', 'show']:
if args.listsnap and k in oursnaps.keys():
for snap in oursnaps[k]:
sparent = ''
sdescription = ''
if 'parent' in snap.keys():
sparent = snap['parent']
if 'description' in snap.keys():
sdescription = snap['description']
sdescription = sdescription.replace('\n', ' ')
if snap['name'] != 'current':
prn(' snapshot: {:<15} parent: {:<15} descr: {:<25} {:<10}'.format(
snap['name'] , sparent, sdescription, ''))
if args.subcommand in ['list', 'ls', 'show']:
if args.contacts:
recip = filter(None,uniq(recip))
prn("\nContact list: " + '; '.join(recip))
if args.subcommand in ['assist', 'gui']:
if not usegui:
print('running "prox assist" command which will guide you '
'through a number of choices, however no GUI is available')
return False
chce = []
msg = ("Running 'prox assist'! Please select from the list "
"below or 'Cancel' and run 'prox --help' for other options. "
"Example: 'prox new mybox1 mybox2 mybox3' will create "
"3 Linux machines.")
chce = easygui.choicebox(msg, __app__,['New linux machine',
'New docker host', 'New virtual machine', 'List machines',
'Start machine', 'Stop machine', 'Modify machine',
'Destroy machine'])
if not chce:
return False
if chce.startswith('New '):
args.subcommand = 'new'
if chce != "New linux machine":
uselxc = False
else:
msg = ("Please select the size of your machine. "
"Memory sizes are in MB, unless you add G "
"(e.g. 1G). Disk sizes are always in GB\n."
"Please start small, you can always resize."
)
title = "Configuring Machine Size"
fieldNames = ["Memory", "# Cores", "Disk Size"]
fieldValues = ['512M', '2', '4G']
fieldValues = easygui.multenterbox(msg, title,
fieldNames, fieldValues)
if fieldValues:
args.mem, args.cores, args.disk = fieldValues
else:
return False
elif chce.startswith('List '):
args.subcommand = 'list'
elif chce.startswith('Start '):
args.subcommand = 'start'
elif chce.startswith('Stop '):
args.subcommand = 'stop'
elif chce.startswith('Modify '):
args.subcommand = 'modify'
elif chce.startswith('Destroy '):
args.subcommand = 'destroy'
else:
args.subcommand = 'assist'
if args.subcommand in ['new', 'create', 'modify', 'mod', 'assist', 'gui']:
lxccores = re.sub("[^0-9^.]", "", args.cores)
lxcdisk = int(re.sub("[^0-9^.]", "", args.disk))
lxcmem = int(re.sub("[^0-9^.]", "", args.mem))
if "G" in args.mem.upper() or lxcmem <= 64:
lxcmem = lxcmem*1024
if args.subcommand in ['start', 'run']:
if not vmids:
vmids.append(input('\nenter vmid to start:'))
if vmids[-1] == '':
prn('vmid is required', usegui)
return False
start_machines(p, ourmachines, vmids, usegui=False)
pingwait(ourmachines[vmids[0]][1],1)
if args.subcommand in ['stop', 'shutdown']:
if not vmids:
vmids.append(input('\nnot found, enter vmid to stop:'))
if vmids[-1] == '':
prn("no vmid entered", usegui)
return False
for vmid in vmids:
machine = ourmachines[vmid]
if machine[3] == 'stopped':
prn('Machine "%s" is already stopped!' % machine[1], usegui)
continue
if machine[2] == 'kvm':
ret = p.stopVirtualMachine(machine[4], vmid)['data']
if ret:
print(ret)
else:
prn("host with id %s not yet stopped!" % vmid, usegui)
for i in range(15):
time.sleep(1)
ret = p.getVirtualStatus(machine[4], vmid)['data']
prn(
'Machine {0: <4}: {1}, cpu: {2:.0%} '.format(
vmid, ret['status'], ret['cpu']))
if ret['status'] == 'stopped':
break
else:
ret = p.stopLXCContainer(machine[4], vmid)['data']
print(ret)
if args.subcommand in ['modify', 'mod']:
if not vmids:
vmids.append(int(input('\nnot found, enter vmid to modify:')))
if vmids[-1] == '':
prn("no vmid entered", usegui)
return False
for vmid in vmids:
machine = ourmachines[vmid]
if machine[2] == 'kvm':
prn("currently cannot modify virtual machines.", usegui)
else:
ccfg = p.getContainerConfig(machine[4], vmid)['data']
rootstr=ccfg['rootfs']
post_data = {}
post_data2 = {}
if ccfg['cpulimit'] != lxccores and lxccores != '0':
post_data['cpulimit'] = lxccores
if ccfg['memory'] != lxcmem and lxcmem > 0:
post_data['memory'] = lxcmem
if machine[3] == 'stopped':
if lxcdisk > 0:
post_data['rootfs'] = re.sub(r",size=[0-9]+G", ",size=%sG"
% lxcdisk, rootstr)
else:
post_data2 = {}
if lxcdisk > 0:
post_data2['disk'] = 'rootfs'
post_data2['size'] = '%sG' % lxcdisk
ret = p.resizeLXCContainer(machine[4], vmid,
post_data2)['data']
if iserr(ret,400):
prn ('Error 40X, could not resize disk. ' 'You may need to shutdown the machine to resize a disk', usegui)
elif iserr(ret,500):
prn ('Error 50X, could not resize disk', usegui)
else:
pass
if post_data != {}:
ret = p.setLXCContainerOptions(machine[4], vmid,
post_data)['data']
if iserr(ret,400):
prn ('Error 40X, could not set machine options', usegui)
elif iserr(ret,500):
prn ('Error 50X, could not set machine options', usegui)
if post_data != {} or post_data2 != {}:
ret = p.getContainerConfig(machine[4], vmid)['data']
print ('Machine reconfigured. New settings '
'cores: %s, mem: %s MB, rootfs: %s '
% (ret['cpulimit'], ret['memory'],
ret['rootfs'])
)
else:
prn('No changes made', usegui)
if args.subcommand in ['destroy', 'delete']:
if not vmids:
vmids.append(input('\nnot found, enter vmid to destroy:'))
if vmids[-1] == '':
return False
for vmid in vmids:
if not int(vmid) in ourmachines:
prn('machine with id %s does not exist' % vmid)
return False
machine = ourmachines[vmid]
if machine[3] != 'stopped':
print(
'Machine "%s" needs to be stopped before it can be destroyed!' %
machine[1])
continue
if machine[2] == 'kvm':
ret = p.deleteVirtualMachine(machine[4], vmid)['data']
print(ret)
else:
ret = p.deleteLXCContainer(machine[4], vmid)['data']
print(ret)
hip = '127.0.0.1'
try:
hip = socket.gethostbyname(machine[1])
except:
pass
ret = subprocess.run("ssh-keygen -R %s,%s > /dev/null 2>&1"
% (machine[1], hip), shell=True)
if args.subcommand in ['snap', 'snapshot']:
if not vmids:
vmids.append(input('\nnot found, enter vmid to snapshot:'))
if vmids[-1] == '':
return False
for vmid in vmids:
if not int(vmid) in ourmachines:
prn('machine with id %s does not exist' % vmid)
return False
machine = ourmachines[vmid]
if machine[2] == 'kvm':
print('KVM machines are currently not supported')
continue
else:
post_data = {
'description': args.snapdesc,
'snapname': args.snapname}
ret = p.snapshotLXCContainer(machine[4],vmid,post_data)['data']
print(ret)
if args.subcommand in ['rollback', 'rb']:
if not vmids:
vmids.append(input('\nnot found, enter vmid to snapshot:'))
if vmids[-1] == '':
return False
for vmid in vmids:
if not int(vmid) in ourmachines:
prn('machine with id %s does not exist' % vmid)
return False
machine = ourmachines[vmid]
if machine[2] == 'kvm':
print('KVM machines are currently not supported')
continue
else:
post_data = {
'snapname': args.snapname}
ret = p.rollbackSnapshotLXCContainer(machine[4],vmid,args.snapname)['data']
print(ret)
if args.subcommand in ['new', 'create', 'make']:
myhosts = hostdedupe(ourmachines, args.hosts)
if len(myhosts) == 0:
msg=("enter the hostname(s) you want to deploy (separated by "
"space, no domain name): ")
myhosts = def_input(msg, usegui)
myhosts = myhosts.split(' ')
if not myhosts or myhosts == '':
prn('hostname(s) are required', usegui)
return False
desc = 'testing'
if len(args.hosts) == 0:
msg=("What is the description/purpose of the system(s)? (e.g. "
"testing, development, other")
desc = def_input(msg, 'testing', usegui)
storage = STORLOC
if len(args.hosts) == 0:
if yn_choice(
"Do you want to use local storage on host (for better performance) ?") == 'n':
storage = STORNET
if args.stornet:
storage = STORNET
newhostids = []
if uselxc:
newcontid = 0
for h in myhosts:
if hostexists(h):
if not yn_choice('Host "%s" already exists in DNS. ' 'This hostname will not be used. Do you still ' 'want to continue?' % h, default='n'):
return False
if args.node == '':
mynode = random.choice(nodes)
else:
mynode = args.node
print('installing container on node "%s" !!! ' % mynode)
oldcontid = newcontid
for i in range(10):
newcontid = p.getClusterVmNextId()['data']
if oldcontid != newcontid:
break
time.sleep(1)
prn(
'creating host %s with ID %s in pool %s' %
(h, newcontid, pool))
try:
mydummy = LXCIMAGE
except:
LXCIMAGE = 'proxnfs:vztmpl/ubuntu-16.04-standard_16.04-1_amd64.tar.gz'
if args.ubuntu == '14.04':
LXCIMAGE = 'proxnfs:vztmpl/ubuntu-14.04-standard_14.04-1_amd64.tar.gz'
if args.ubuntu == '16.04':
LXCIMAGE = 'proxnfs:vztmpl/ubuntu-16.04-standard_16.04-1_amd64.tar.gz'
elif args.ubuntu == '17.10':
LXCIMAGE = 'proxnfs:vztmpl/ubuntu-17.10-standard_17.10-1_amd64.tar.gz'
elif args.ubuntu == '18.04':
LXCIMAGE = 'proxnfs:vztmpl/ubuntu-18.04-standard_18.04-1_amd64.tar.gz'
post_data = {
'ostemplate': LXCIMAGE,
'cpulimit': lxccores,
'memory': lxcmem,
'rootfs': lxcdisk,
'vmid': newcontid,
'description': build_notes(user, pool, desc),
'hostname': h,
'password': pwd,
'storage': storage,
'pool': pool,
'net0': 'name=eth0,bridge=vmbr0,ip=dhcp'}
ret = p.createLXCContainer(mynode, post_data)['data']
print(' ...%s' % ret)
newhostids.append(int(newcontid))
ourmachines[int(newcontid)] = [newcontid, h, 'lxc',
'stopped', mynode]
start_machines(p, ourmachines, newhostids, usegui=False)
pingwait(myhosts[-1],1)
idrsapub = ''
if os.path.exists('%s/.ssh/id_rsa_prox.pub' % homedir):
idrsapub = '%s/.ssh/id_rsa_prox.pub' % homedir
for h in myhosts:
if idrsapub != '':
ssh_exec('root', pwd, ['mkdir -p .ssh',], h)
sftp_put('root', pwd, idrsapub, '.ssh/id_rsa_prox.pub', h)
ssh_exec('root', pwd, ['cat .ssh/id_rsa_prox.pub >> .ssh/authorized_keys',], h)
ssh_exec('root', pwd, ['echo "session required pam_mkhomedir.so skel=/etc/skel/ umask=0022" >> /etc/pam.d/common-account',], h)
ssh_exec('root', pwd, ['echo "%s ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers.d/zz_%s'
% (user, user), 'chmod 440 /etc/sudoers.d/%s' % user], h)
hip = '127.0.0.1'
try:
hip = socket.gethostbyname(h)
except:
pass
ret = subprocess.run("ssh-keygen -R %s,%s > /dev/null 2>&1"
% (h, hip), shell=True)
ret = subprocess.run("ssh-keyscan -t rsa %s >> %s/.ssh/known_hosts 2>/dev/null"
% (h, homedir), shell=True)
if args.docker:
print('\ninstalling docker....')
install_docker(pwd, h)
print ('\nfixing docker and restarting services...')
fixcmds = ['sed -i "s/^ExecStartPre=\/sbin\/modprobe overlay/ExecStartPre=-\/sbin\/modprobe overlay/" /lib/systemd/system/containerd.service']
fixcmds.append('systemctl daemon-reload')
fixcmds.append('systemctl restart containerd docker')
ssh_exec('root', pwd, fixcmds, h)
loginuser='root@'
dobootstrap = False
if args.bootstrap:
dobootstrap = True
elif args.nobootstrap:
dobootstrap = False
else:
if yn_choice("\nDo you want to install the SciComp base config (e.g. user login) ?"):
dobootstrap = True
if dobootstrap:
loginuser=''
if os.path.exists('%s/.chef' % homedir):
ret = easy_par(run_chef_knife, myhosts)
else:
func = functools.partial(run_chef_client, pwd)
ret = easy_par(func, myhosts)
if idrsapub != '':
for h in myhosts:
ssh_exec(user, pwd, ['mkdir -p .ssh',], h)
sftp_put(user, pwd, idrsapub, '.ssh/id_rsa_prox.pub', h)
ssh_exec(user, pwd, ['cat .ssh/id_rsa_prox.pub >> .ssh/authorized_keys',], h)
else:
run_chef_knife('hostname')
if args.runlist != '':
func = functools.partial(runlist_exec, pwd)
ret = easy_par(func, myhosts)
prn("**** login: ssh %s%s" % (loginuser,myhosts[0]))
ret = subprocess.run("ssh %s%s"
% (loginuser, myhosts[0]), shell=True)
else:
myimage = args.image
if myimage == '':
if not usegui:
msg="Please enter a template name"
myimage = def_input(msg, ','.join(templlist))
else:
msg=("Please enter a template name or just hit enter "
"to select from a list:")
myimage = easygui.choicebox(msg, __app__,
','.join(templlist))
if myimage == ','.join(templlist) and usegui:
myimage = easygui.choicebox(
'You must select a image or template name', __app__, templlist)
if not myimage or myimage == ','.join(templlist) or myimage == '':
prn('image is required')
return False
notes = build_notes(user, pool)
for h in myhosts:
newvmid = p.getClusterVmNextId()['data']
prn(
'creating host %s with VM ID %s in pool %s' %
(h, newvmid, pool))
post_data = {
'newid': newvmid,
'name': h,
'description': notes,
'pool': pool
}
ret = p.cloneVirtualMachine(
hosttempl[myimage][0],
hosttempl[myimage][1],
post_data)['data']
print(' ...' + ret)
newhostids.append(newvmid)
if yn_choice("Do you want to start the machine(s) now?"):
for n in newhostids:
print('Starting host %s ..' % n)
ret = p.startVirtualMachine(
hosttempl[myimage][0], n)['data']
print(' ...' + ret)
pingwait(myhosts[0],7)
else:
prn('Please start the host with "prox start <hostname>"', usegui)
print('')
def parse_contact(p,node,vmid):
found = ''
cfg = p.getContainerConfig(node,vmid)['data']
if 'description' in cfg.keys() :
m = re.search('technical_contact: (.+?)@', cfg['description'])
if m:
found = m.group(1)
return found
def parse_contact_vm(p,node,vmid):
found = ''
cfg = p.getVirtualConfig(node,vmid)['data']
if 'description' in cfg.keys() :
m = re.search('technical_contact: (.+?)@', cfg['description'])
if m:
found = m.group(1)
return found
def start_machines(p, ourmachines, vmids, usegui=False):
for vmid in vmids:
machine = ourmachines[vmid]
ret = None
sleeptime = 1
if machine[3] == 'running':
prn('Machine "%s" is already running!' % machine[1], usegui)
continue
print('Starting host %s ..' % vmid)
if machine[2] == 'kvm':
ret = p.startVirtualMachine(machine[4], vmid)['data']
print('...%s' % ret)
for i in range(25):
time.sleep(sleeptime)
ret = p.getVirtualStatus(machine[4], vmid)['data']
print('Machine {0: <4}: {1}, cpu: {2:.0%} '.format(
vmid, ret['status'], ret['cpu']))
if ret['cpu'] > 0.2:
break
else:
ret = None
for i in range(15):
ret = p.startLXCContainer(machine[4], vmid)['data']
if isinstance(ret, str):
print(' ...%s' % ret)
break
time.sleep(sleeptime)
sleeptime+=1
print('starting host %s, re-try %s' % (vmid, i))
if not isinstance(ret, str):
print("Failed starting host id %s !" % vmid)
continue
sleeptime = 1
for i in range(15):
time.sleep(sleeptime)
sleeptime+=1
ret = p.getContainerStatus(machine[4], vmid)['data']
if not isinstance(ret, int):
prn(
'Machine {0: <4}: {1}, cpu: {2:.0%} '.format(
vmid, ret['status'], ret['cpu']))
if ret['status'] == 'running':
break
else:
print(' ...Error %s' % ret)
if isinstance(ret, int):
prn("Failed starting host id %s !" % vmid)
continue
def run_chef_knife(host):
knife = "knife bootstrap --no-host-key-verify " "--ssh-user root --ssh-identity-file %s/.ssh/id_rsa_prox " "--environment scicomp_prod " "--bootstrap-version %s " '--server-url "https://chef.fhcrc.org/organizations/cit" ' "--run-list 'role[cit-base]','role[scicomp_base]' " "--node-name %s " "%s" % (homedir,CHEFVERSION,host,host)
if host == 'hostname':
print('you can also execute this knife command manually:')
print('************************************')
print(knife)
print('************************************')
else:
if os.path.exists('%s/.chef' % homedir):
print('*** executing knife command:')
print(knife)
ret = subprocess.run(knife, shell=True)
else:
print ('chef/knife config dir %s/.chef does not exist.' % homedir)
def run_chef_client(pwd, host):
chefclient = "chef-client --environment scicomp_prod " "--validation_key /root/.chef/cit-validator.pem " "--runlist role[cit-base],role[scicomp_base] "
print ('\nbootstrapping chef-client configs on %s ... please wait a few minutes ... !!!\n' % host)
cmdlist = ['dpkg -i /opt/chef/tmp/chef_amd64.deb', chefclient]
ssh_exec('root', pwd, cmdlist, host)
def check_ssh_auth(user):
if os.path.exists('%s/.ssh/id_rsa_prox' % homedir):
return True
else:
ret = subprocess.run("ssh-keygen -q -t rsa -f %s/.ssh/id_rsa_prox -C prox-%s -N ''"
% (homedir, user), shell=True)
def check_ssh_agent():
SSH_AUTH_SOCK = os.getenv('SSH_AUTH_SOCK', '')
if SSH_AUTH_SOCK == '':
print("\nYou don't have ssh-agent running, please execute this command:")
if os.path.exists('%s/.ssh/id_rsa' % homedir):
print("eval $(ssh-agent -s); ssh-add\n")
else:
print("eval $(ssh-agent -s)\n")
else:
if os.path.exists('%s/.ssh/id_rsa_prox' % homedir):
ret = subprocess.run("ssh-add %s/.ssh/id_rsa_prox > /dev/null 2>&1"
% homedir, shell=True)
def runlist_exec(pwd, myhost):
prn('***** Executing run list %s on host %s........' % (args.runlist, myhost))
rlist = os.path.expanduser(args.runlist.strip())
if os.path.exists(rlist):
with open(rlist) as f:
commands = f.read().splitlines()
prn('*** Running commands %s' % commands)
ssh_exec('root', pwd, commands, myhost)
else:
ssh_exec('root', pwd, [args.runlist.strip(),], myhost)
def install_docker(pwd, myhost):
cmd = []
cmd.append('apt-get update')
cmd.append('apt-get install -y apt-transport-https ca-certificates curl software-properties-common')
cmd.append('apt-get install -y gpg-agent')
cmd.append('curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -')
cmd.append('add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"')
cmd.append('apt-get update')
cmd.append('apt-get install -y docker-ce')
ssh_exec('root', pwd, cmd, myhost) | Apache License 2.0 |
derfies/panda3d-editor | src/pandaEditor/ui/mainFrame.py | MainFrame.OnFileSave | python | def OnFileSave(self, evt, saveAs=False):
if self.base.doc.file_path is None or saveAs:
filePath = self._GetSavePath()
if filePath:
self.base.doc.file_path = filePath
else:
return
self.base.doc.save() | Save the document. | https://github.com/derfies/panda3d-editor/blob/a50939bd4bfa5c22d27a9ddee090717e8d95f404/src/pandaEditor/ui/mainFrame.py#L248-L262 | import os
import sys
import wx
import wx.aui
import wx.propgrid as wxpg
from pubsub import pub
import panda3d.core as pm
import p3d
from direct.showbase.PythonUtil import getBase as get_base
from wxExtra import utils as wxUtils, ActionItem
from wxExtra.logpanel import LogPanel
from wxExtra import AuiManagerConfig, CustomAuiToolBar, CustomMenu
from pandaEditor import commands as cmds
from pandaEditor.constants import MODEL_EXTENSIONS
from pandaEditor.ui.viewport import Viewport
from pandaEditor.ui.resourcesPanel import ResourcesPanel
from pandaEditor.ui.sceneGraphPanel import SceneGraphPanel
from pandaEditor.ui.propertiesPanel import PropertiesPanel
from pandaEditor.ui.preferenceseditor import PreferencesEditor
from pandaEditor.ui.createdialog import CreateDialog
FRAME_TITLE = 'Panda Editor 0.1'
TBAR_ICON_SIZE = (24, 24)
WILDCARD_SCENE = '.xml|*.xml'
WILDCARD_P3D = '.p3d|*.p3d'
ID_FILE_NEW = wx.NewId()
ID_FILE_OPEN = wx.NewId()
ID_FILE_SAVE = wx.NewId()
ID_FILE_SAVE_AS = wx.NewId()
ID_FILE_IMPORT = wx.NewId()
ID_FILE_PROJ = wx.NewId()
ID_PROJ_NEW = wx.NewId()
ID_PROJ_SET = wx.NewId()
ID_PROJ_BUILD = wx.NewId()
ID_EDIT_UNDO = wx.NewId()
ID_EDIT_REDO = wx.NewId()
ID_EDIT_GROUP = wx.NewId()
ID_EDIT_UNGROUP = wx.NewId()
ID_EDIT_PARENT = wx.NewId()
ID_EDIT_UNPARENT = wx.NewId()
ID_EDIT_DUPLICATE = wx.NewId()
ID_EDIT_WRITE_BAM_FILE = wx.NewId()
ID_EDIT_EXPORT_OBJ = wx.NewId()
ID_MODIFY_PHYSICS = wx.NewId()
ID_XFORM_SEL = wx.NewId()
ID_XFORM_POS = wx.NewId()
ID_XFORM_ROT = wx.NewId()
ID_XFORM_SCL = wx.NewId()
ID_XFORM_WORLD = wx.NewId()
ID_VIEW_GRID = wx.NewId()
ID_VIEW_TOP = wx.NewId()
ID_VIEW_BOTTOM = wx.NewId()
ID_VIEW_FRONT = wx.NewId()
ID_VIEW_BACK = wx.NewId()
ID_VIEW_RIGHT = wx.NewId()
ID_VIEW_LEFT = wx.NewId()
ID_CREATE_PREFAB = wx.NewId()
ID_LAYOUT_GAME = wx.NewId()
ID_LAYOUT_EDITOR = wx.NewId()
ID_LAYOUT_BOTH = wx.NewId()
ID_WIND_PANEL = wx.NewId()
ID_WIND_FILE_TOOLBAR = wx.NewId()
ID_WIND_EDIT_TOOLBAR = wx.NewId()
ID_WIND_MODIFY_TOOLBAR = wx.NewId()
ID_WIND_XFORM_TOOLBAR = wx.NewId()
ID_WIND_LAYOUT_TOOLBAR = wx.NewId()
ID_WIND_VIEWPORT = wx.NewId()
ID_WIND_SCENE_GRAPH = wx.NewId()
ID_WIND_LIGHT_LINKER = wx.NewId()
ID_WIND_PROPERTIES = wx.NewId()
ID_WIND_RESOURCES = wx.NewId()
ID_WIND_LOG = wx.NewId()
ID_WIND_PREFERENCES = wx.NewId()
ID_PLAY = wx.NewId()
ID_PAUSE = wx.NewId()
class MainFrame(wx.Frame):
def __init__(self, base, *args, **kwargs):
super().__init__(*args, **kwargs)
self.base = base
self.preMaxPos = None
self.preMaxSize = None
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_KEY_UP, p3d.wxPanda.OnKeyUp)
self.Bind(wx.EVT_KEY_DOWN, p3d.wxPanda.OnKeyDown)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_MOVE, self.OnMove)
pub.subscribe(self.OnUpdate, 'Update')
self.cfg = wx.Config('pandaEditor')
self.BuildFileActions()
self.BuildEditActions()
self.BuildModifyActions()
self.BuildXformActions()
self.BuildLayoutActions()
self.pnlViewport = Viewport(self.base, self)
self.pnlSceneGraph = SceneGraphPanel(self)
self.pnlProps = PropertiesPanel(self)
self.pnlRsrcs = ResourcesPanel(self)
self.pnlLog = LogPanel(self)
self.BuildAuiManager()
self.mb = wx.MenuBar()
self.BuildViewMenu()
self.BuildCreateMenu()
self.BuildWindowMenu()
self.BuildMenuBar()
self.RebuildPanelMenu()
self.OnUpdateWindowMenu(None)
def _GetSavePath(self):
defaultDir = ''
defaultFile = ''
if self.base.doc.file_path is not None:
defaultDir, defaultFile = os.path.split(self.base.doc.file_path)
elif self.base.project.path is not None:
defaultDir = self.base.project.GetScenesDirectory()
filePath = wxUtils.file_save_dialog('Save Scene As', WILDCARD_SCENE, defaultDir=defaultDir, defaultFile=defaultFile)
if filePath and os.path.exists(filePath):
msg = ''.join(['The file "', filePath, '" already exists.\nDo you want to replace it?'])
if wxUtils.YesNoDialog(msg, 'Replace File?', wx.ICON_WARNING) == wx.ID_NO:
return False
return filePath
def _CheckForSave(self):
if self.base.doc.dirty:
msg = ''.join(['The document "', self.base.doc.title, '" was modified after last save.\nSave changes before continuing?'])
result = wxUtils.YesNoCancelDialog(msg, 'Save Changes?', wx.ICON_WARNING)
if result == wx.ID_YES:
self.OnFileSave(None)
elif result == wx.ID_CANCEL:
return False
return True
def OnClose(self, evt):
if not self._CheckForSave():
evt.Veto()
return
self.auiCfg.Save()
if self.preMaxPos is not None:
self.auiCfg.SavePosition(*self.preMaxPos)
if self.preMaxSize is not None:
self.auiCfg.SaveSize(*self.preMaxSize)
if self.base.project.path is not None:
self.cfg.Write('projDirPath', self.base.project.path)
self.Show(False)
try:
base
except NameError:
sys.exit()
base.userExit()
def OnFileNew(self, evt):
if not self._CheckForSave():
return
self.base.CreateScene()
self.base.doc.on_refresh()
def OnFileOpen(self, evt, filePath=None):
if not self._CheckForSave():
return
if filePath is None:
scnsDirPath = self.base.project.GetScenesDirectory()
if scnsDirPath is None:
scnsDirPath = os.getcwd()
filePath = wxUtils.file_open_dialog('Open Scene', WILDCARD_SCENE,
defaultDir=scnsDirPath)
if filePath:
self.base.CreateScene(filePath)
self.base.doc.load() | MIT License |
obi-wan3/ob13-cogs | mentionhelp/mentionhelp.py | MentionHelp._mention_help | python | async def _mention_help(self, ctx: commands.Context): | Send a message when a user mentions the bot (with no other text). | https://github.com/obi-wan3/ob13-cogs/blob/716527f8581e0345802ea2626d43324f87edf941/mentionhelp/mentionhelp.py#L79-L80 | import re
import discord
from redbot.core import commands, Config
class MentionHelp(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, 14000605, force_registration=True)
default_guild = {
"toggle": True
}
default_global = {
"toggle": True,
"message": None,
"embed": False
}
self.config.register_guild(**default_guild)
self.config.register_global(**default_global)
@commands.Cog.listener("on_message_without_command")
async def _message_listener(self, message: discord.Message):
if (
message.author.bot or
not await self.config.toggle()
):
return
if message.guild and (
await self.bot.cog_disabled_in_guild(self, message.guild) or
not await self.config.guild(message.guild).toggle()
):
return
mention = re.compile(rf"<@!?{self.bot.user.id}>")
destination = message.channel if message.guild else message.author
if message.guild and not destination.permissions_for(message.guild.me).send_messages:
return
to_send = await self.config.message()
if mention.fullmatch(message.content.strip()) and self.bot.user.id in [u.id for u in message.mentions] and to_send:
if (await self.config.embed()) and ((not message.guild) or destination.permissions_for(message.guild.me).embed_links):
return await destination.send(embed=discord.Embed(description=to_send, color=await self.bot.get_embed_color(destination)))
return await destination.send(to_send)
@commands.group(name="mentionhelp") | MIT License |
medtagger/medtagger | backend/medtagger/repositories/label_tags.py | enable | python | def enable(label_tag_key: str) -> None:
enabling_query = LabelTag.query.filter(LabelTag.key == label_tag_key)
updated = enabling_query.update({'disabled': False}, synchronize_session='fetch')
if not updated:
raise InternalErrorException(f'Label Tag "{label_tag_key}" was not enabled due to unknown database error.') | Enable existing Label Tag. | https://github.com/medtagger/medtagger/blob/8b7575e55764a95d2040f3b9bcd23b6ff846ecaa/backend/medtagger/repositories/label_tags.py#L75-L80 | from typing import List
from medtagger.database import db_transaction_session
from medtagger.database.models import LabelTag
from medtagger.definitions import LabelTool
from medtagger.exceptions import InternalErrorException
from medtagger.types import TaskID
def get_all_tags(include_disabled: bool = False) -> List[LabelTag]:
query = LabelTag.query
if not include_disabled:
query = query.filter(~LabelTag.disabled)
return query.order_by(LabelTag.key).all()
def get_label_tag_by_key(label_tag_key: str) -> LabelTag:
return LabelTag.query.filter(LabelTag.key == label_tag_key).one()
def add_new_tag(key: str, name: str, tools: List[LabelTool], task_id: TaskID) -> LabelTag:
label_tag = LabelTag(key, name, tools)
label_tag.task_id = task_id
with db_transaction_session() as session:
session.add(label_tag)
return label_tag
def delete_tag_by_key(key: str) -> None:
with db_transaction_session() as session:
session.query(LabelTag).filter(LabelTag.key == key).delete()
def update(key: str, name: str = None, tools: List[LabelTool] = None, task_id: TaskID = None) -> LabelTag:
label_tag = get_label_tag_by_key(key)
if name:
label_tag.name = name
if tools:
label_tag.tools = tools
if task_id:
label_tag.task_id = task_id
with db_transaction_session() as session:
session.add(label_tag)
return label_tag
def disable(label_tag_key: str) -> None:
disabling_query = LabelTag.query.filter(LabelTag.key == label_tag_key)
updated = disabling_query.update({'disabled': True}, synchronize_session='fetch')
if not updated:
raise InternalErrorException(f'Label Tag "{label_tag_key}" was not disabled due to unknown database error.') | Apache License 2.0 |
linmx0130/ya_mxdet | train_faster_rcnn.py | train_dataset | python | def train_dataset():
train_dataset = VOCDataset(annotation_dir=cfg.annotation_dir,
img_dir=cfg.img_dir,
dataset_index=cfg.dataset_index,
transform=train_transformation,
resize_func=img_resize)
return train_dataset | prepare a custom dataset
return: train_dataset | https://github.com/linmx0130/ya_mxdet/blob/eaa6de7faf819f3720d8dac64c57a42dec38eed7/train_faster_rcnn.py#L37-L47 | from faster_rcnn.config import cfg
from VOCDataset import VOCDataset
from faster_rcnn.faster_rcnn import FasterRCNN
import mxnet as mx
from faster_rcnn.utils import random_flip, imagenetNormalize, img_resize, random_square_crop, select_class_generator, bbox_inverse_transform, softmax_celoss_with_ignore
from faster_rcnn.rpn_gt_opr import rpn_gt_opr
from faster_rcnn.rpn_proposal import proposal_train
import os
import argparse
import logging
import time
def logging_system():
global args
logger = logging.getLogger("training")
logger.setLevel(logging.INFO)
fh = logging.FileHandler(os.path.join(args.save_path, args.logger), 'w')
formatter = logging.Formatter(
'[%(asctime)s - %(name)s - %(filename)s:%(lineno)d - %(levelname)s] %(message)s'
)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
logger.addHandler(ch)
return logger
def train_transformation(data, label):
data, label = random_flip(data, label)
data = imagenetNormalize(data)
return data, label | MIT License |
usc-isi-i2/rltk | rltk/record.py | remove_raw_object | python | def remove_raw_object(cls):
cls._remove_raw_object = True
return cls | Decorator for Record class.
If a Record class is decorated, raw_object will be removed once all mark properties are cached. | https://github.com/usc-isi-i2/rltk/blob/aee10ed5dd561583e60db3373ed82fe1208da1e9/rltk/record.py#L75-L81 | import re
from typing import Callable
re_record_id = re.compile(r'^[^*]{1,255}$')
re_valid_property_name = re.compile(r'^[A-Za-z_]{1}[\w]*$')
class Record(object):
_remove_raw_object = False
def __init__(self, raw_object):
self.raw_object = raw_object
@property
def id(self):
raise NotImplementedError
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.id == other.id
class cached_property(property):
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
cached_name = self.func.__name__
if cached_name not in obj.__dict__:
obj.__dict__[cached_name] = self.func(obj)
value = obj.__dict__.get(cached_name)
return value
def __reduce__(self):
return cached_property.__new__, (cached_property,), {'func': self.func} | MIT License |
google-research/long-range-arena | lra_benchmarks/models/reformer/reformer.py | ReformerDualEncoder.apply | python | def apply(self,
inputs1,
inputs2,
vocab_size=None,
inputs1_positions=None,
inputs2_positions=None,
inputs1_segmentation=None,
inputs2_segmentation=None,
use_bfloat16=False,
emb_dim=512,
num_heads=8,
num_layers=6,
qkv_dim=512,
mlp_dim=2048,
max_len=2048,
train=False,
dropout_rate=0.1,
attention_dropout_rate=0.1,
classifier=True,
classifier_pool='CLS',
num_classes=2,
interaction=None):
encoder = ReformerEncoder.shared(
inputs_positions=inputs1_positions,
inputs_segmentation=inputs1_segmentation,
vocab_size=vocab_size,
use_bfloat16=use_bfloat16,
emb_dim=emb_dim,
num_heads=num_heads,
num_layers=num_layers,
qkv_dim=qkv_dim,
mlp_dim=mlp_dim,
max_len=max_len,
train=train,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
name='encoder')
inputs1_encoded = encoder(inputs1)
inputs2_encoded = encoder(inputs2)
encoded = common_layers.classifier_head_dual(
inputs1_encoded,
inputs2_encoded,
num_classes,
mlp_dim,
pooling_mode=classifier_pool,
interaction=interaction)
return encoded | Applies Transformer model on text similarity.
A deliberate choice to distinguish this from NLI because
we may want to do different things to the model later. Dual Encoding
mode enforces that we do not do cross attention between pairs.
Args:
inputs1: input data.
inputs2: target data.
vocab_size: size of the input vocabulary.
inputs1_positions: input subsequence positions for packed examples.
inputs2_positions: target subsequence positions for packed examples.
inputs1_segmentation: input segmentation info for packed examples.
inputs2_segmentation: target segmentation info for packed examples.
use_bfloat16: bool: whether use bfloat16.
emb_dim: dimension of embedding.
num_heads: number of heads.
num_layers: number of layers.
qkv_dim: dimension of the query/key/value.
mlp_dim: dimension of the mlp on top of attention block.
max_len: maximum length.
train: whether it is training.
dropout_rate: dropout rate.
attention_dropout_rate: dropout rate for attention weights.
classifier: boolean, to use classifier.
classifier_pool: str, supports "MEAN", "MAX" pooling.
num_classes: int, number of classification classes.
interaction: str
Returns:
output of a transformer decoder. | https://github.com/google-research/long-range-arena/blob/09c2916c3f33a07347dcc70c8839957d3c9d4062/lra_benchmarks/models/reformer/reformer.py#L204-L284 | from flax import nn
import jax.numpy as jnp
from lra_benchmarks.models.layers import common_layers
from lra_benchmarks.models.reformer import reformer_attention
class ReformerBlock(nn.Module):
def apply(self,
inputs,
qkv_dim,
mlp_dim,
num_heads,
dtype=jnp.float32,
causal_mask=False,
inputs_segmentation=None,
padding_mask=None,
dropout_rate=0.1,
attention_dropout_rate=0.1,
deterministic=False,
cache=None):
assert inputs.ndim == 3
x = nn.LayerNorm(inputs)
x = reformer_attention.ReformerSelfAttention(
x,
num_heads=num_heads,
qkv_features=qkv_dim,
causal_mask=causal_mask,
padding_mask=padding_mask,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
bias=False,
broadcast_dropout=False,
dropout_rate=attention_dropout_rate,
deterministic=deterministic,
cache=cache)
x = nn.dropout(x, rate=dropout_rate, deterministic=deterministic)
x = x + inputs
y = nn.LayerNorm(x)
y = common_layers.MlpBlock(
y,
mlp_dim=mlp_dim,
dropout_rate=dropout_rate,
deterministic=deterministic)
return x + y
class ReformerEncoder(nn.Module):
def apply(self,
inputs,
vocab_size,
inputs_positions=None,
inputs_segmentation=None,
shared_embedding=None,
use_bfloat16=False,
emb_dim=512,
num_heads=8,
dtype=jnp.float32,
num_layers=6,
qkv_dim=512,
mlp_dim=2048,
max_len=512,
train=True,
dropout_rate=0.1,
attention_dropout_rate=0.1,
learn_pos_emb=False,
classifier=False,
classifier_pool='CLS',
num_classes=10):
assert inputs.ndim == 2
src_padding_mask = (inputs > 0)[..., None]
if shared_embedding is None:
input_embed = nn.Embed.partial(
num_embeddings=vocab_size,
features=emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
else:
input_embed = shared_embedding
x = inputs.astype('int32')
x = input_embed(x)
if classifier and classifier_pool == 'CLS':
cls = self.param('cls', (1, 1, emb_dim), nn.initializers.zeros)
cls = jnp.tile(cls, [x.shape[0], 1, 1])
x = jnp.concatenate([cls, x], axis=1)
max_len += 1
src_padding_mask = jnp.concatenate(
[src_padding_mask[:, :1], src_padding_mask], axis=1)
pe_init = nn.initializers.normal(stddev=0.02) if learn_pos_emb else None
x = common_layers.AddPositionEmbs(
x,
inputs_positions=inputs_positions,
posemb_init=pe_init,
max_len=max_len,
name='posembed_input')
x = nn.dropout(x, rate=dropout_rate, deterministic=not train)
if use_bfloat16:
x = x.astype(jnp.bfloat16)
dtype = jnp.bfloat16
else:
dtype = jnp.float32
for lyr in range(num_layers):
x = ReformerBlock(
x,
qkv_dim=qkv_dim,
mlp_dim=mlp_dim,
num_heads=num_heads,
dtype=dtype,
padding_mask=src_padding_mask,
inputs_segmentation=inputs_segmentation,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate,
deterministic=not train,
name=f'encoderblock_{lyr}')
encoded = nn.LayerNorm(x, dtype=dtype, name='encoder_norm')
if classifier:
encoded = common_layers.classifier_head(
encoded, num_classes, mlp_dim, pooling_mode=classifier_pool)
return encoded
class ReformerDualEncoder(nn.Module): | Apache License 2.0 |
beartype/beartype | beartype/_decor/_code/_pep/pepcode.py | _unmemoize_pep_code | python | def _unmemoize_pep_code(
data: BeartypeData,
func_wrapper_code: str,
pith_repr: str,
hint_forwardrefs_class_basename: tuple,
) -> str:
assert data.__class__ is BeartypeData, f'{repr(data)} not @beartype data.'
assert isinstance(func_wrapper_code, str), (
f'{repr(func_wrapper_code)} not string.')
assert isinstance(pith_repr, str), f'{repr(pith_repr)} not string.'
assert isinstance(hint_forwardrefs_class_basename, Iterable), (
f'{repr(hint_forwardrefs_class_basename)} not iterable.')
func_wrapper_code = replace_str_substrs(
text=func_wrapper_code,
old=PEP_CODE_PITH_ROOT_PARAM_NAME_PLACEHOLDER,
new=pith_repr,
)
if hint_forwardrefs_class_basename:
data.func_wrapper_locals[ARG_NAME_TYPISTRY] = bear_typistry
for hint_forwardref_class_basename in hint_forwardrefs_class_basename:
func_wrapper_code = replace_str_substrs(
text=func_wrapper_code,
old=(
f'{PEP_CODE_HINT_FORWARDREF_UNQUALIFIED_PLACEHOLDER_PREFIX}'
f'{hint_forwardref_class_basename}'
f'{PEP_CODE_HINT_FORWARDREF_UNQUALIFIED_PLACEHOLDER_SUFFIX}'
),
new=register_typistry_forwardref(
get_hint_pep484585_forwardref_classname_relative_to_object(
hint=hint_forwardref_class_basename, obj=data.func)
),
)
return func_wrapper_code | Convert the passed memoized code snippet type-checking any parameter or
return of the decorated callable into a memoized code snippet type-checking
a specific parameter or return of that callable.
Specifically, this function (in order):
#. Globally replaces all references to the
:data:`PEP_CODE_PITH_ROOT_PARAM_NAME_PLACEHOLDER` placeholder substring
cached into this code with the passed ``pith_repr`` parameter.
#. Unmemoizes this code by globally replacing all relative forward
reference placeholder substrings cached into this code with Python
expressions evaluating to the classes referred to by those substrings
relative to that callable when accessed via the private
``__beartypistry`` parameter.
Parameters
----------
data : BeartypeData
Decorated callable to be type-checked.
func_wrapper_code : str
Memoized callable-agnostic code snippet type-checking any parameter or
return of the decorated callable.
pith_repr : str
Machine-readable representation of the name of this parameter or
return.
hint_forwardrefs_class_basename : tuple
Tuple of the unqualified classnames referred to by all relative forward
reference type hints visitable from the current root type hint.
Returns
----------
str
This memoized code unmemoized by globally resolving all relative
forward reference placeholder substrings cached into this code relative
to the currently decorated callable. | https://github.com/beartype/beartype/blob/9da0bbebe408d281d5bfb6cc203dc6969e241aa4/beartype/_decor/_code/_pep/pepcode.py#L237-L331 | from beartype.roar import BeartypeDecorHintPepException
from beartype._decor._cache.cachetype import (
bear_typistry,
register_typistry_forwardref,
)
from beartype._decor._code.codesnip import ARG_NAME_TYPISTRY
from beartype._decor._code._pep._pephint import pep_code_check_hint
from beartype._decor._code._pep._pepsnip import (
PARAM_KIND_TO_PEP_CODE_LOCALIZE,
PEP_CODE_CHECK_RETURN_PREFIX,
PEP_CODE_CHECK_RETURN_SUFFIX,
PEP_CODE_HINT_FORWARDREF_UNQUALIFIED_PLACEHOLDER_PREFIX,
PEP_CODE_HINT_FORWARDREF_UNQUALIFIED_PLACEHOLDER_SUFFIX,
PEP_CODE_PITH_ROOT_PARAM_NAME_PLACEHOLDER,
)
from beartype._decor._data import BeartypeData
from beartype._util.cache.utilcacheerror import reraise_exception_cached
from beartype._util.hint.pep.proposal.pep484585.utilpep484585ref import (
get_hint_pep484585_forwardref_classname_relative_to_object)
from beartype._util.kind.utilkinddict import update_mapping
from beartype._util.text.utiltextlabel import (
prefix_callable_decorated_param,
prefix_callable_decorated_return,
)
from beartype._util.text.utiltextmunge import replace_str_substrs
from collections.abc import Iterable
from inspect import Parameter
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
_RETURN_REPR = repr('return')
def pep_code_check_param(
data: BeartypeData,
hint: object,
param: Parameter,
param_index: int,
) -> str:
assert data.__class__ is BeartypeData, f'{repr(data)} not @beartype data.'
assert isinstance(param, Parameter), (
f'{repr(param)} not parameter metadata.')
assert isinstance(param_index, int), (
f'{repr(param_index)} not integer.')
PARAM_LOCALIZE_TEMPLATE = PARAM_KIND_TO_PEP_CODE_LOCALIZE.get(
param.kind, None)
if PARAM_LOCALIZE_TEMPLATE is None:
exception_prefix = prefix_callable_decorated_param(
func=data.func, param_name=param.name)
raise BeartypeDecorHintPepException(
f'{exception_prefix}kind {repr(param.kind)} '
f'currently unsupported by @beartype.'
)
try:
(
code_param_check_pith,
func_wrapper_locals,
hint_forwardrefs_class_basename,
) = pep_code_check_hint(hint)
update_mapping(data.func_wrapper_locals, func_wrapper_locals)
code_param_check = _unmemoize_pep_code(
data=data,
func_wrapper_code=code_param_check_pith,
pith_repr=repr(param.name),
hint_forwardrefs_class_basename=hint_forwardrefs_class_basename,
)
except Exception as exception:
reraise_exception_cached(
exception=exception,
target_str=prefix_callable_decorated_param(
func=data.func, param_name=param.name),
)
code_param_localize = PARAM_LOCALIZE_TEMPLATE.format(
arg_name=param.name, arg_index=param_index)
return f'{code_param_localize}{code_param_check}'
def pep_code_check_return(data: BeartypeData, hint: object) -> str:
assert data.__class__ is BeartypeData, f'{repr(data)} not @beartype data.'
hint_forwardrefs_class_basename = ()
try:
(
code_return_check_pith,
func_wrapper_locals,
hint_forwardrefs_class_basename,
) = pep_code_check_hint(hint)
update_mapping(data.func_wrapper_locals, func_wrapper_locals)
code_return_check_prefix = PEP_CODE_CHECK_RETURN_PREFIX.format(
func_call_prefix=data.func_wrapper_code_call_prefix)
code_return_check_memoized = (
f'{code_return_check_prefix}'
f'{code_return_check_pith}'
f'{PEP_CODE_CHECK_RETURN_SUFFIX}'
)
code_return_check = _unmemoize_pep_code(
data=data,
func_wrapper_code=code_return_check_memoized,
pith_repr=_RETURN_REPR,
hint_forwardrefs_class_basename=hint_forwardrefs_class_basename,
)
except Exception as exception:
reraise_exception_cached(
exception=exception,
target_str=prefix_callable_decorated_return(data.func),
)
return code_return_check | MIT License |
visualcomputinginstitute/3d-semantic-segmentation | tools/lazy_decorator.py | lazy_property | python | def lazy_property(function):
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator | caches the output of the property and just returns the value for next calls
:param function: property to be cached
:return: cached output of property | https://github.com/visualcomputinginstitute/3d-semantic-segmentation/blob/1dfc010b370a346902ad29460c9ad969c1892a97/tools/lazy_decorator.py#L10-L25 | import functools | MIT License |
nuagenetworks/vspk-python | vspk/v5_0/nuvirtualip.py | NUVirtualIP.associated_floating_ip_id | python | def associated_floating_ip_id(self):
return self._associated_floating_ip_id | Get associated_floating_ip_id value.
Notes:
Id of Floating IP address associated to this virtual ip
This attribute is named `associatedFloatingIPID` in VSD API. | https://github.com/nuagenetworks/vspk-python/blob/375cce10ae144ad6017104e57fcd3630898cc2a6/vspk/v5_0/nuvirtualip.py#L253-L263 | from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUEventLogsFetcher
from bambou import NURESTObject
class NUVirtualIP(NURESTObject):
__rest_name__ = "virtualip"
__resource_name__ = "virtualips"
CONST_IP_TYPE_IPV6 = "IPV6"
CONST_IP_TYPE_IPV4 = "IPV4"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
super(NUVirtualIP, self).__init__()
self._mac = None
self._ip_type = None
self._last_updated_by = None
self._virtual_ip = None
self._entity_scope = None
self._associated_floating_ip_id = None
self._subnet_id = None
self._external_id = None
self.expose_attribute(local_name="mac", remote_name="MAC", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ip_type", remote_name="IPType", attribute_type=str, is_required=False, is_unique=False, choices=[u'IPV4', u'IPV6'])
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="virtual_ip", remote_name="virtualIP", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="associated_floating_ip_id", remote_name="associatedFloatingIPID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="subnet_id", remote_name="subnetID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
@property
def mac(self):
return self._mac
@mac.setter
def mac(self, value):
self._mac = value
@property
def ip_type(self):
return self._ip_type
@ip_type.setter
def ip_type(self, value):
self._ip_type = value
@property
def last_updated_by(self):
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
self._last_updated_by = value
@property
def virtual_ip(self):
return self._virtual_ip
@virtual_ip.setter
def virtual_ip(self, value):
self._virtual_ip = value
@property
def entity_scope(self):
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
self._entity_scope = value
@property | BSD 3-Clause New or Revised License |
v7labs/darwin-py | darwin/dataset/remote_dataset.py | RemoteDataset.push | python | def push(
self,
files_to_upload: Optional[List[Union[PathLike, LocalFile]]],
*,
blocking: bool = True,
multi_threaded: bool = True,
fps: int = 0,
as_frames: bool = False,
files_to_exclude: Optional[List[PathLike]] = None,
path: Optional[str] = None,
preserve_folders: bool = False,
progress_callback: Optional[ProgressCallback] = None,
file_upload_callback: Optional[FileUploadCallback] = None,
):
if files_to_exclude is None:
files_to_exclude = []
if files_to_upload is None:
raise ValueError("No files or directory specified.")
uploading_files = [item for item in files_to_upload if isinstance(item, LocalFile)]
search_files = [item for item in files_to_upload if not isinstance(item, LocalFile)]
generic_parameters_specified = path is not None or fps != 0 or as_frames is not False
if uploading_files and generic_parameters_specified:
raise ValueError("Cannot specify a path when uploading a LocalFile object.")
for found_file in find_files(search_files, files_to_exclude=files_to_exclude):
local_path = path
if preserve_folders:
source_files = [source_file for source_file in search_files if is_relative_to(found_file, source_file)]
if source_files:
local_path = str(found_file.relative_to(source_files[0]).parent)
uploading_files.append(LocalFile(found_file, fps=fps, as_frames=as_frames, path=local_path))
if not uploading_files:
raise ValueError("No files to upload, check your path, exclusion filters and resume flag")
handler = UploadHandler(self, uploading_files)
if blocking:
handler.upload(
multi_threaded=multi_threaded,
progress_callback=progress_callback,
file_upload_callback=file_upload_callback,
)
else:
handler.prepare_upload()
return handler | Uploads a local dataset (images ONLY) in the datasets directory.
Parameters
----------
files_to_upload : Optional[List[Union[PathLike, LocalFile]]]
List of files to upload. Those can be folders.
blocking : bool
If False, the dataset is not uploaded and a generator function is returned instead.
multi_threaded : bool
Uses multiprocessing to upload the dataset in parallel.
If blocking is False this has no effect.
files_to_exclude : Optional[PathLike]]
Optional list of files to exclude from the file scan. Those can be folders.
fps : int
When the uploading file is a video, specify its framerate.
as_frames: bool
When the uploading file is a video, specify whether it's going to be uploaded as a list of frames.
path: Optional[str]
Optional path to store the files in.
preserve_folders : bool
Specify whether or not to preserve folder paths when uploading
progress_callback: Optional[ProgressCallback]
Optional callback, called every time the progress of an uploading files is reported.
file_upload_callback: Optional[FileUploadCallback]
Optional callback, called every time a file chunk is uploaded.
Returns
-------
handler : UploadHandler
Class for handling uploads, progress and error messages | https://github.com/v7labs/darwin-py/blob/694253ec520ec32d791eb4a2d0b8acc9ad686b33/darwin/dataset/remote_dataset.py#L88-L168 | import json
import shutil
import tempfile
import zipfile
from datetime import datetime
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterator, List, Optional, Union
from urllib import parse
from darwin.dataset.download_manager import download_all_images_from_annotations
from darwin.dataset.identifier import DatasetIdentifier
from darwin.dataset.release import Release
from darwin.dataset.split_manager import split_dataset
from darwin.dataset.upload_manager import (
FileUploadCallback,
LocalFile,
ProgressCallback,
UploadHandler,
)
from darwin.dataset.utils import (
exhaust_generator,
get_annotations,
get_classes,
is_relative_to,
is_unix_like_os,
make_class_lists,
sanitize_filename,
)
from darwin.datatypes import AnnotationClass
from darwin.exceptions import NotFound, UnsupportedExportFormat
from darwin.exporter.formats.darwin import build_image_annotation
from darwin.item import DatasetItem, parse_dataset_item
from darwin.item_sorter import ItemSorter
from darwin.types import PathLike
from darwin.utils import find_files, parse_darwin_json, split_video_annotation, urljoin
from darwin.validators import name_taken, validation_error
from rich.console import Console
if TYPE_CHECKING:
from darwin.client import Client
class RemoteDataset:
def __init__(
self,
*,
client: "Client",
team: str,
name: str,
slug: str,
dataset_id: int,
image_count: int = 0,
progress: float = 0,
):
self.team = team
self.name = name
self.slug = slug or name
self.dataset_id = dataset_id
self.image_count = image_count
self.progress = progress
self.client = client
self.annotation_types = None
self.console: Console = Console() | MIT License |
prajdabre/yanmtt | transformers/src/transformers/models/t5/modeling_tf_t5.py | TFT5Attention.compute_bias | python | def compute_bias(self, query_length, key_length):
context_position = tf.range(query_length)[:, None]
memory_position = tf.range(key_length)[None, :]
relative_position = memory_position - context_position
relative_position_bucket = self._relative_position_bucket(
relative_position,
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
)
values = self.relative_attention_bias(relative_position_bucket)
values = tf.expand_dims(
tf.transpose(values, [2, 0, 1]), axis=0
)
return values | Compute binned relative position bias | https://github.com/prajdabre/yanmtt/blob/4d329c3bcb81ca432d5947bb4673897086ee7f32/transformers/src/transformers/models/t5/modeling_tf_t5.py#L226-L240 | import copy
import itertools
import math
import warnings
from typing import Tuple
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPast,
TFSeq2SeqLMOutput,
TFSeq2SeqModelOutput,
)
from ...modeling_tf_utils import (
TFCausalLanguageModelingLoss,
TFPreTrainedModel,
TFSharedEmbeddings,
TFWrappedEmbeddings,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_t5 import T5Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "T5Config"
_TOKENIZER_FOR_DOC = "T5Tokenizer"
TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST = [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]
class TFT5LayerNorm(tf.keras.layers.Layer):
def __init__(self, epsilon=1e-6, **kwargs):
super().__init__(**kwargs)
self.variance_epsilon = epsilon
def build(self, input_shape):
self.weight = self.add_weight("weight", shape=(input_shape[-1],), initializer="ones")
super().build(input_shape)
def call(self, hidden_states):
variance = tf.math.reduce_mean(tf.math.square(hidden_states), axis=-1, keepdims=True)
hidden_states = hidden_states * tf.math.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states
class TFT5DenseReluDense(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.wi = tf.keras.layers.Dense(config.d_ff, use_bias=False, name="wi")
self.wo = tf.keras.layers.Dense(config.d_model, use_bias=False, name="wo")
self.dropout = tf.keras.layers.Dropout(config.dropout_rate)
self.act = tf.keras.activations.relu
def call(self, hidden_states, training=False):
hidden_states = self.wi(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.wo(hidden_states)
return hidden_states
class TFT5GatedGeluDense(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.wi_0 = tf.keras.layers.Dense(config.d_ff, use_bias=False, name="wi_0")
self.wi_1 = tf.keras.layers.Dense(config.d_ff, use_bias=False, name="wi_1")
self.wo = tf.keras.layers.Dense(config.d_model, use_bias=False, name="wo")
self.dropout = tf.keras.layers.Dropout(config.dropout_rate)
self.act = get_tf_activation("gelu_new")
def call(self, hidden_states, training=False):
hidden_gelu = self.act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.wo(hidden_states)
return hidden_states
class TFT5LayerFF(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.feed_forward_proj == "relu":
self.DenseReluDense = TFT5DenseReluDense(config, name="DenseReluDense")
elif config.feed_forward_proj == "gated-gelu":
self.DenseReluDense = TFT5GatedGeluDense(config, name="DenseReluDense")
else:
raise ValueError(
f"{self.config.feed_forward_proj} is not supported. Choose between `relu` and `gated-gelu`"
)
self.layer_norm = TFT5LayerNorm(epsilon=config.layer_norm_epsilon, name="layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout_rate)
def call(self, hidden_states, training=False):
normed_hidden_states = self.layer_norm(hidden_states)
dense_output = self.DenseReluDense(normed_hidden_states, training=training)
hidden_states = hidden_states + self.dropout(dense_output, training=training)
return hidden_states
class TFT5Attention(tf.keras.layers.Layer):
NEW_ID = itertools.count()
def __init__(self, config, has_relative_attention_bias=False, **kwargs):
super().__init__(**kwargs)
self.layer_id = next(TFT5Attention.NEW_ID)
self.is_decoder = config.is_decoder
self.use_cache = config.use_cache
self.has_relative_attention_bias = has_relative_attention_bias
self.output_attentions = config.output_attentions
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.inner_dim = self.n_heads * self.key_value_proj_dim
self.q = tf.keras.layers.Dense(self.inner_dim, use_bias=False, name="q")
self.k = tf.keras.layers.Dense(self.inner_dim, use_bias=False, name="k")
self.v = tf.keras.layers.Dense(self.inner_dim, use_bias=False, name="v")
self.o = tf.keras.layers.Dense(self.d_model, use_bias=False, name="o")
self.dropout = tf.keras.layers.Dropout(config.dropout_rate)
if self.has_relative_attention_bias:
self.relative_attention_bias = tf.keras.layers.Embedding(
self.relative_attention_num_buckets,
self.n_heads,
name="relative_attention_bias",
)
self.pruned_heads = set()
def prune_heads(self, heads):
raise NotImplementedError
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += tf.dtypes.cast(tf.math.greater(relative_position, 0), tf.int32) * num_buckets
relative_position = tf.math.abs(relative_position)
else:
relative_position = -tf.math.minimum(relative_position, 0)
max_exact = num_buckets // 2
is_small = tf.math.less(relative_position, max_exact)
relative_position_if_large = max_exact + tf.dtypes.cast(
tf.math.log(tf.dtypes.cast(relative_position, tf.float32) / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact),
tf.int32,
)
relative_position_if_large = tf.math.minimum(relative_position_if_large, num_buckets - 1)
relative_buckets += tf.where(is_small, relative_position, relative_position_if_large)
return relative_buckets | MIT License |
asteroid-team/asteroid | asteroid/dsp/overlap_add.py | LambdaOverlapAdd.ola_forward | python | def ola_forward(self, x):
assert x.ndim == 3
batch, channels, n_frames = x.size()
unfolded = torch.nn.functional.unfold(
x.unsqueeze(-1),
kernel_size=(self.window_size, 1),
padding=(self.window_size, 0),
stride=(self.hop_size, 1),
)
out = []
n_chunks = unfolded.shape[-1]
for frame_idx in range(n_chunks):
frame = self.nnet(unfolded[..., frame_idx])
if frame_idx == 0:
assert frame.ndim == 3, "nnet should return (batch, n_src, time)"
if self.n_src is not None:
assert frame.shape[1] == self.n_src, "nnet should return (batch, n_src, time)"
n_src = frame.shape[1]
frame = frame.reshape(batch * n_src, -1)
if frame_idx != 0 and self.reorder_chunks:
frame = _reorder_sources(frame, out[-1], n_src, self.window_size, self.hop_size)
if self.use_window:
frame = frame * self.window
else:
frame = frame / (self.window_size / self.hop_size)
out.append(frame)
out = torch.stack(out).reshape(n_chunks, batch * n_src, self.window_size)
out = out.permute(1, 2, 0)
out = torch.nn.functional.fold(
out,
(n_frames, 1),
kernel_size=(self.window_size, 1),
padding=(self.window_size, 0),
stride=(self.hop_size, 1),
)
return out.squeeze(-1).reshape(batch, n_src, -1) | Heart of the class: segment signal, apply func, combine with OLA. | https://github.com/asteroid-team/asteroid/blob/64e10e9de840ada77719ff4fa280be42a19aa51c/asteroid/dsp/overlap_add.py#L84-L131 | import torch
from torch import nn
from ..losses.pit_wrapper import PITReorder
class LambdaOverlapAdd(torch.nn.Module):
def __init__(
self,
nnet,
n_src,
window_size,
hop_size=None,
window="hanning",
reorder_chunks=True,
enable_grad=False,
):
super().__init__()
assert window_size % 2 == 0, "Window size must be even"
self.nnet = nnet
self.window_size = window_size
self.hop_size = hop_size if hop_size is not None else window_size // 2
self.n_src = n_src
self.in_channels = getattr(nnet, "in_channels", None)
if window:
from scipy.signal import get_window
window = get_window(window, self.window_size).astype("float32")
window = torch.from_numpy(window)
self.use_window = True
else:
self.use_window = False
self.register_buffer("window", window)
self.reorder_chunks = reorder_chunks
self.enable_grad = enable_grad | MIT License |
conchylicultor/musicgenerator | deepmusic/modulemanager.py | ModuleManager.save | python | def save(self, config_group):
config_group[self.name] = ' '.join([self.module_name] + self.module_parameters) | Save the current module parameters
Args:
config_group (dict): dictionary where to write the configuration | https://github.com/conchylicultor/musicgenerator/blob/adea76dccaba923b7d3807082ec6f5b512d16bb9/deepmusic/modulemanager.py#L111-L117 | from collections import OrderedDict
class ModuleManager:
def __init__(self, name):
self.name = name
self.modules = OrderedDict()
self.module_instance = None
self.module_name = ''
self.module_parameters = []
def register(self, module):
assert not module.get_module_id() in self.modules
self.modules[module.get_module_id()] = module
def get_modules_ids(self):
return self.modules.keys()
def get_chosen_name(self):
return self.module_name
def get_module(self):
assert self.module_instance is not None
return self.module_instance
def build_module(self, args):
assert self.module_instance is None
module_args = getattr(args, self.name)
self.module_name = module_args[0]
self.module_parameters = module_args[1:]
self.module_instance = self.modules[self.module_name](args, *self.module_parameters)
return self.module_instance
def add_argparse(self, group_args, comment):
assert len(self.modules.keys())
keys = list(self.modules.keys())
group_args.add_argument(
'--{}'.format(self.name),
type=str,
nargs='+',
default=[keys[0]],
help=comment + ' Choices available: {}'.format(', '.join(keys))
) | Apache License 2.0 |
markblundeberg/openswap | lib/util.py | bh2u | python | def bh2u(x):
return hfu(x).decode('ascii') | str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str | https://github.com/markblundeberg/openswap/blob/7de04aa80dab79bebe4b64483011dad70a48694c/lib/util.py#L356-L367 | import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
import decimal
from decimal import Decimal
import traceback
import threading
import hmac
import stat
from .i18n import _
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'BCH':8, 'mBCH':5, 'cash':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class ExcessiveFee(Exception): pass
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __str__(self):
return _("Failed to import file.")
class FileImportFailedEncrypted(FileImportFailed):
def __str__(self):
return (_('Failed to import file.') + ' ' +
_('Perhaps it is encrypted...') + '\n' +
_('Importing encrypted files is not supported.'))
class UserCancelled(Exception):
pass
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
class PrintError(object):
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
def run(self):
pass
class DebugMem(ThreadJob):
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
try:
import jnius
jnius.detach()
self.print_error("jnius detach")
except ImportError:
pass
self.print_error("stopped")
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
class cachedproperty(object):
def __init__(self, f):
self.f = f
def __get__(self, obj, type):
obj = obj or type
value = self.f(obj)
setattr(obj, self.f.__name__, value)
return value
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
def constant_time_compare(val1, val2):
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
try:
import jnius
env = jnius.autoclass('android.os.Environment')
except ImportError:
from android.os import Environment as env
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
try:
import jnius
context = jnius.autoclass('org.kivy.android.PythonActivity').mActivity
except ImportError:
from com.chaquo.python import Python
context = Python.getPlatform().getApplication()
return context.getFilesDir().getPath() + '/data'
def android_headers_dir():
try:
import jnius
d = android_ext_dir() + '/org.electron.electron'
if not os.path.exists(d):
os.mkdir(d)
return d
except ImportError:
return android_data_dir()
def ensure_sparse_file(filename):
if os.name == "nt":
try:
os.system("fsutil sparse setFlag \""+ filename +"\" 1")
except:
pass
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electron Cash datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args):
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify | MIT License |
spilchen/yahoo_fantasy_api | yahoo_fantasy_api/league.py | League.edit_date | python | def edit_date(self):
if self.edit_date_cache is None:
json = self.yhandler.get_settings_raw(self.league_id)
t = objectpath.Tree(json)
edit_key = t.execute('$..edit_key[0]')
self.edit_date_cache = datetime.datetime.strptime(edit_key, '%Y-%m-%d').date()
return self.edit_date_cache | Return the next day that you can edit the lineups.
:return: edit date
:rtype: :class: datetime.date | https://github.com/spilchen/yahoo_fantasy_api/blob/867444eecffe46541c9c099f4ffc06ab5c178bd2/yahoo_fantasy_api/league.py#L579-L591 | import yahoo_fantasy_api as yfa
from yahoo_fantasy_api import yhandler
import objectpath
import datetime
import re
class League:
def __init__(self, sc, league_id):
self.sc = sc
self.league_id = league_id
self.yhandler = yhandler.YHandler(sc)
self.current_week_cache = None
self.end_week_cache = None
self.week_date_range_cache = {}
self.free_agent_cache = {}
self.waivers_cache = None
self.taken_players_cache = None
self.stat_categories_cache = None
self.settings_cache = None
self.edit_date_cache = None
self.positions_cache = None
self.stats_id_map = None
self.player_details_cache = {}
def inject_yhandler(self, yhandler):
self.yhandler = yhandler
def to_team(self, team_key):
tm = yfa.Team(self.sc, team_key)
tm.inject_yhandler(self.yhandler)
return tm
def standings(self):
json = self.yhandler.get_standings_raw(self.league_id)
t = objectpath.Tree(json)
num_teams = int(t.execute('$..count[0]'))
standings = []
for i in range(num_teams):
team = {}
for e in t.execute('$..teams.."{}".team[0]'.format(i)):
if isinstance(e, list):
for td in e:
if "team_key" in td or 'name' in td:
self._merge_dicts(team, td, [])
elif "team_standings" in e:
self._merge_dicts(team, e['team_standings'], [])
standings.append(team)
return standings
def teams(self):
json = self.yhandler.get_standings_raw(self.league_id)
t = objectpath.Tree(json)
num_teams = int(t.execute('$..count[0]'))
teams = {}
for i in range(num_teams):
team = {}
key = None
for e in t.execute('$..teams.."{}".team[0][0]'.format(i)):
if "team_key" in e:
key = e['team_key']
if isinstance(e, dict):
self._merge_dicts(team, e, [])
teams[key] = team
return teams
def matchups(self, week=None):
json = self.yhandler.get_scoreboard_raw(self.league_id, week=week)
return json
def settings(self):
if self.settings_cache is None:
json = self.yhandler.get_settings_raw(self.league_id)
data = {}
if "fantasy_content" in json:
content = json["fantasy_content"]
if "league" in content:
self._merge_dicts(data, content["league"][0], [])
self._merge_dicts(data,
content["league"][1]["settings"][0],
["roster_positions", "stat_categories"])
self.settings_cache = data
return self.settings_cache
def stat_categories(self):
if self.stat_categories_cache is None:
t = objectpath.Tree(self.yhandler.get_settings_raw(self.league_id))
json = t.execute('$..stat_categories..stat')
simple_stat = []
for s in json:
if 'is_only_display_stat' not in s:
simple_stat.append({"display_name": s["display_name"],
"position_type": s["position_type"]})
self.stat_categories_cache = simple_stat
return self.stat_categories_cache
def team_key(self):
t = objectpath.Tree(self.yhandler.get_teams_raw())
json = t.execute('$..(team_key)')
for t in json:
if t['team_key'].startswith(self.league_id):
return t['team_key']
def current_week(self):
if self.current_week_cache is None:
t = objectpath.Tree(self.yhandler.get_scoreboard_raw(
self.league_id))
self.current_week_cache = int(t.execute('$..current_week[0]'))
return self.current_week_cache
def end_week(self):
if self.end_week_cache is None:
t = objectpath.Tree(
self.yhandler.get_scoreboard_raw(self.league_id))
self.end_week_cache = int(t.execute('$..end_week[0]'))
return self.end_week_cache
def week_date_range(self, week):
if week <= self.current_week() or week == 1:
return self._date_range_of_played_or_current_week(week)
elif week == self.current_week() + 1:
(cur_st, cur_end) = self._date_range_of_played_or_current_week(
week - 1)
req_st = cur_end + datetime.timedelta(days=1)
req_end = cur_end + datetime.timedelta(days=7)
return (req_st, req_end)
else:
raise RuntimeError("Cannot request date range more than one week "
"past the current week. The requested week is "
"{}, but current week is {}.".format(
week, self.current_week()))
def free_agents(self, position):
if position not in self.free_agent_cache:
self.free_agent_cache[position] = self._fetch_players(
'FA', position=position)
return self.free_agent_cache[position]
def waivers(self):
if not self.waivers_cache:
self.waivers_cache = self._fetch_players('W')
return self.waivers_cache
def taken_players(self):
if not self.taken_players_cache:
self.taken_players_cache = self._fetch_players('T')
return self.taken_players_cache
def _fetch_players(self, status, position=None):
PLAYERS_PER_PAGE = 25
plyrs = []
plyrIndex = 0
while plyrIndex % PLAYERS_PER_PAGE == 0:
j = self.yhandler.get_players_raw(self.league_id, plyrIndex,
status, position=position)
(num_plyrs_on_pg, fa_on_pg) = self._players_from_page(j)
if len(fa_on_pg) == 0:
break
plyrs += fa_on_pg
plyrIndex += num_plyrs_on_pg
return plyrs
def _players_from_page(self, page):
fa = []
if len(page['fantasy_content']['league'][1]['players']) == 0:
return (0, fa)
t = objectpath.Tree(page)
pct_owns = self._pct_owned_from_page(iter(list(t.execute(
'$..percent_owned.(coverage_type,value)'))))
for i, pct_own in zip(range(0, t.execute('$..players.count[0]')*2, 2),
pct_owns):
path = '$..players..player[{}].'.format(i) + "(name,player_id,position_type,status,eligible_positions)"
obj = list(t.execute(path))
plyr = {}
for ele in obj:
for k in ele.keys():
plyr[k] = ele[k]
plyr['player_id'] = int(plyr['player_id'])
plyr['name'] = plyr['name']['full']
plyr['eligible_positions'] = [e['position'] for e in
plyr['eligible_positions']]
plyr['percent_owned'] = pct_own
if "status" not in plyr:
plyr["status"] = ""
if plyr["status"] != "NA":
fa.append(plyr)
return (i/2 + 1, fa)
def _pct_owned_from_page(self, po_it):
po = []
i = 0
try:
while True:
ele = next(po_it)
if "coverage_type" in ele:
po.append(0)
i += 1
if "value" in ele:
po[i-1] = ele['value']
except StopIteration:
pass
return po
def _date_range_of_played_or_current_week(self, week):
if week not in self.week_date_range_cache:
t = objectpath.Tree(self.yhandler.get_scoreboard_raw(
self.league_id, week))
j = t.execute('$..(week_start,week_end)[0]')
self.week_date_range_cache[week] = (
datetime.datetime.strptime(j['week_start'], "%Y-%m-%d").date(),
datetime.datetime.strptime(j['week_end'], "%Y-%m-%d").date())
return self.week_date_range_cache[week]
def player_details(self, player):
if isinstance(player, int):
player = [player]
self._cache_player_details(player)
players = []
if isinstance(player, list):
for p in player:
players.append(self.player_details_cache[p])
elif player in self.player_details_cache:
assert(isinstance(self.player_details_cache[player], list))
players = self.player_details_cache[player]
return players
def percent_owned(self, player_ids):
t = objectpath.Tree(self.yhandler.get_percent_owned_raw(
self.league_id, player_ids))
player_ids = t.execute("$..player_id")
it = t.execute("$..(player_id,full,value)")
po = []
try:
while True:
plyr = {"player_id": int(next(it)["player_id"]),
"name": next(it)["full"],
"percent_owned": next(it)["value"]}
po.append(plyr)
except StopIteration:
pass
return po
def ownership(self, player_ids):
t = objectpath.Tree(self.yhandler.get_player_ownership_raw(self.league_id, player_ids))
owner_details = t.execute("$..(player_id,ownership_type,owner_team_name)")
ownership = {}
try:
while True:
player_id = next(owner_details)['player_id']
ownership_details = next(owner_details)
ownership[player_id] = ownership_details
except StopIteration:
pass
return ownership | MIT License |
iristyle/chocolateypackages | EthanBrown.SublimeText2.WebPackages/tools/PackageCache/SublimeLinter/sublimelinter/modules/libs/pyflakes/checker.py | Checker._runDeferred | python | def _runDeferred(self, deferred):
for handler, scope in deferred:
self.scopeStack = scope
handler() | Run the callables in C{deferred} using their associated scope stack. | https://github.com/iristyle/chocolateypackages/blob/8c9833710577de6db6e8b1db5d9196e19e19d117/EthanBrown.SublimeText2.WebPackages/tools/PackageCache/SublimeLinter/sublimelinter/modules/libs/pyflakes/checker.py#L229-L235 | import __builtin__
import os.path
import _ast
from pyflakes import messages
try:
import ast
iter_child_nodes = ast.iter_child_nodes
except (ImportError, AttributeError):
def iter_child_nodes(node, astcls=_ast.AST):
for name in node._fields:
field = getattr(node, name, None)
if isinstance(field, astcls):
yield field
elif isinstance(field, list):
for item in field:
yield item
class Binding(object):
def __init__(self, name, source):
self.name = name
self.source = source
self.used = False
def __str__(self):
return self.name
def __repr__(self):
return '<%s object %r from line %r at 0x%x>' % (self.__class__.__name__,
self.name,
self.source.lineno,
id(self))
class UnBinding(Binding):
class Importation(Binding):
def __init__(self, name, source):
self.fullName = name
name = name.split('.')[0]
super(Importation, self).__init__(name, source)
class Argument(Binding):
class Assignment(Binding):
class FunctionDefinition(Binding):
pass
class ExportBinding(Binding):
def names(self):
names = []
if isinstance(self.source, _ast.List):
for node in self.source.elts:
if isinstance(node, _ast.Str):
names.append(node.s)
return names
class Scope(dict):
importStarred = False
def __repr__(self):
return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self), dict.__repr__(self))
def __init__(self):
super(Scope, self).__init__()
class ClassScope(Scope):
pass
class FunctionScope(Scope):
def __init__(self):
super(FunctionScope, self).__init__()
self.globals = {}
class ModuleScope(Scope):
pass
_MAGIC_GLOBALS = ['__file__', '__builtins__']
class Checker(object):
nodeDepth = 0
traceTree = False
def __init__(self, tree, filename='(none)'):
self._deferredFunctions = []
self._deferredAssignments = []
self.dead_scopes = []
self.messages = []
self.filename = filename
self.scopeStack = [ModuleScope()]
self.futuresAllowed = True
self.handleChildren(tree)
self._runDeferred(self._deferredFunctions)
self._deferredFunctions = None
self._runDeferred(self._deferredAssignments)
self._deferredAssignments = None
del self.scopeStack[1:]
self.popScope()
self.check_dead_scopes()
def deferFunction(self, callable):
self._deferredFunctions.append((callable, self.scopeStack[:]))
def deferAssignment(self, callable):
self._deferredAssignments.append((callable, self.scopeStack[:])) | MIT License |
artyompal/tpu_models | models/official/detection/evaluation/coco_utils.py | generate_annotation_file | python | def generate_annotation_file(groundtruth_generator,
annotation_file):
groundtruths = {}
tf.logging.info('Loading groundtruth annotations from dataset to memory...')
for groundtruth in groundtruth_generator():
for k, v in six.iteritems(groundtruth):
if k not in groundtruths:
groundtruths[k] = [v]
else:
groundtruths[k].append(v)
gt_dataset = convert_groundtruths_to_coco_dataset(groundtruths)
tf.logging.info('Saving groundtruth annotations to the JSON file...')
with tf.gfile.Open(annotation_file, 'w') as f:
f.write(json.dumps(gt_dataset))
tf.logging.info('Done saving the JSON file...') | Generates COCO-style annotation JSON file given a groundtruth generator. | https://github.com/artyompal/tpu_models/blob/639306f30e085bb1cdb5b1118a4c96a2dbe14e3e/models/official/detection/evaluation/coco_utils.py#L345-L361 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import numpy as np
from PIL import Image
from pycocotools import coco
from pycocotools import mask as mask_utils
import six
import tensorflow as tf
from dataloader import tf_example_decoder
from utils import box_utils
class COCOWrapper(coco.COCO):
def __init__(self, eval_type='box', annotation_file=None, gt_dataset=None):
if ((annotation_file and gt_dataset) or
((not annotation_file) and (not gt_dataset))):
raise ValueError('One and only one of `annotation_file` and `gt_dataset` '
'needs to be specified.')
if eval_type not in ['box', 'mask']:
raise ValueError('The `eval_type` can only be either `box` or `mask`.')
coco.COCO.__init__(self, annotation_file=annotation_file)
self._eval_type = eval_type
if gt_dataset:
self.dataset = gt_dataset
self.createIndex()
def loadRes(self, predictions):
res = coco.COCO()
res.dataset['images'] = copy.deepcopy(self.dataset['images'])
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
image_ids = [ann['image_id'] for ann in predictions]
if set(image_ids) != (set(image_ids) & set(self.getImgIds())):
raise ValueError('Results do not correspond to the current dataset!')
for ann in predictions:
x1, x2, y1, y2 = [ann['bbox'][0], ann['bbox'][0] + ann['bbox'][2],
ann['bbox'][1], ann['bbox'][1] + ann['bbox'][3]]
if self._eval_type == 'box':
ann['area'] = ann['bbox'][2] * ann['bbox'][3]
ann['segmentation'] = [
[x1, y1, x1, y2, x2, y2, x2, y1]]
elif self._eval_type == 'mask':
ann['bbox'] = mask_utils.toBbox(ann['segmentation'])
ann['area'] = mask_utils.area(ann['segmentation'])
res.dataset['annotations'] = copy.deepcopy(predictions)
res.createIndex()
return res
def convert_predictions_to_coco_annotations(predictions):
coco_predictions = []
num_batches = len(predictions['source_id'])
batch_size = predictions['source_id'][0].shape[0]
max_num_detections = predictions['detection_classes'][0].shape[1]
for i in range(num_batches):
for j in range(batch_size):
for k in range(max_num_detections):
ann = {}
ann['image_id'] = predictions['source_id'][i][j]
ann['category_id'] = predictions['detection_classes'][i][j, k]
boxes = predictions['detection_boxes'][i]
ann['bbox'] = [
boxes[j, k, 1],
boxes[j, k, 0],
boxes[j, k, 3] - boxes[j, k, 1],
boxes[j, k, 2] - boxes[j, k, 0]]
ann['score'] = predictions['detection_scores'][i][j, k]
if 'detection_masks' in predictions:
encoded_mask = mask_utils.encode(
np.asfortranarray(
predictions['detection_masks'][i][j, k].astype(np.uint8)))
ann['segmentation'] = encoded_mask
coco_predictions.append(ann)
for i, ann in enumerate(coco_predictions):
ann['id'] = i + 1
return coco_predictions
def convert_groundtruths_to_coco_dataset(groundtruths, label_map=None):
source_ids = np.concatenate(groundtruths['source_id'], axis=0)
heights = np.concatenate(groundtruths['height'], axis=0)
widths = np.concatenate(groundtruths['width'], axis=0)
gt_images = [{'id': int(i), 'height': int(h), 'width': int(w)} for i, h, w
in zip(source_ids, heights, widths)]
gt_annotations = []
num_batches = len(groundtruths['source_id'])
batch_size = groundtruths['source_id'][0].shape[0]
for i in range(num_batches):
for j in range(batch_size):
num_instances = groundtruths['num_detections'][i][j]
for k in range(num_instances):
ann = {}
ann['image_id'] = int(groundtruths['source_id'][i][j])
if 'is_crowds' in groundtruths:
ann['iscrowd'] = int(groundtruths['is_crowds'][i][j, k])
else:
ann['iscrowd'] = 0
ann['category_id'] = int(groundtruths['classes'][i][j, k])
boxes = groundtruths['boxes'][i]
ann['bbox'] = [
float(boxes[j, k, 1]),
float(boxes[j, k, 0]),
float(boxes[j, k, 3] - boxes[j, k, 1]),
float(boxes[j, k, 2] - boxes[j, k, 0])]
if 'areas' in groundtruths:
ann['area'] = float(groundtruths['areas'][i][j, k])
else:
ann['area'] = float(
(boxes[j, k, 3] - boxes[j, k, 1]) *
(boxes[j, k, 2] - boxes[j, k, 0]))
if 'masks' in groundtruths:
mask = Image.open(six.StringIO(groundtruths['masks'][i][j, k]))
width, height = mask.size
np_mask = (
np.array(mask.getdata()).reshape(height, width).astype(np.uint8))
np_mask[np_mask > 0] = 255
encoded_mask = mask_utils.encode(np.asfortranarray(np_mask))
ann['segmentation'] = encoded_mask
if 'areas' not in groundtruths:
ann['area'] = mask_utils.area(encoded_mask)
gt_annotations.append(ann)
for i, ann in enumerate(gt_annotations):
ann['id'] = i + 1
if label_map:
gt_categories = [{'id': i, 'name': label_map[i]} for i in label_map]
else:
category_ids = [gt['category_id'] for gt in gt_annotations]
gt_categories = [{'id': i} for i in set(category_ids)]
gt_dataset = {
'images': gt_images,
'categories': gt_categories,
'annotations': copy.deepcopy(gt_annotations),
}
return gt_dataset
class COCOGroundtruthGenerator(object):
def __init__(self, file_pattern, num_examples, include_mask):
self._file_pattern = file_pattern
self._num_examples = num_examples
self._include_mask = include_mask
self._dataset_fn = tf.data.TFRecordDataset
def _parse_single_example(self, example):
decoder = tf_example_decoder.TfExampleDecoder(
include_mask=self._include_mask)
decoded_tensors = decoder.decode(example)
image = decoded_tensors['image']
image_size = tf.shape(image)[0:2]
boxes = box_utils.denormalize_boxes(
decoded_tensors['groundtruth_boxes'], image_size)
groundtruths = {
'source_id': tf.string_to_number(
decoded_tensors['source_id'], out_type=tf.int64),
'height': decoded_tensors['height'],
'width': decoded_tensors['width'],
'num_detections': tf.shape(decoded_tensors['groundtruth_classes'])[0],
'boxes': boxes,
'classes': decoded_tensors['groundtruth_classes'],
'is_crowds': decoded_tensors['groundtruth_is_crowd'],
'areas': decoded_tensors['groundtruth_area'],
}
if self._include_mask:
groundtruths.update({
'masks': decoded_tensors['groundtruth_instance_masks_png'],
})
return groundtruths
def _build_pipeline(self):
dataset = tf.data.Dataset.list_files(self._file_pattern, shuffle=False)
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
lambda filename: self._dataset_fn(filename).prefetch(1),
cycle_length=32,
sloppy=False))
dataset = dataset.map(self._parse_single_example, num_parallel_calls=64)
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
dataset = dataset.batch(1, drop_remainder=False)
return dataset
def __call__(self):
with tf.Graph().as_default():
dataset = self._build_pipeline()
groundtruth = dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
for _ in range(self._num_examples):
groundtruth_result = sess.run(groundtruth)
yield groundtruth_result
def scan_and_generator_annotation_file(file_pattern,
num_samples,
include_mask,
annotation_file):
groundtruth_generator = COCOGroundtruthGenerator(
file_pattern, num_samples, include_mask)
generate_annotation_file(groundtruth_generator, annotation_file) | Apache License 2.0 |
e-loue/pyke | pyke/target_pkg.py | target_pkg.reset | python | def reset(self, check_sources = True):
if debug: print >> sys.stderr, "target_pkg.reset"
self.dirty = False
self.check_sources = check_sources
self.source_packages = {}
self.compiled_targets = set()
self.rb_names = set() | This should be called once by engine.__init__ prior to calling
add_source_package. | https://github.com/e-loue/pyke/blob/cfe95d8aaa06de123264f9b7f5bea20eb5924ecd/pyke/target_pkg.py#L180-L192 | from __future__ import with_statement
import os, os.path
import time
import sys
import re
import pyke
debug = False
Name_test = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*$')
class target_pkg(object):
def __init__(self, module_name, filename = None,
pyke_version = pyke.version,
loader = None, sources = None, compiler_version = 0):
self.package_name = module_name.rsplit('.', 1)[0]
if sources is None:
try:
target_package_dir = os.path.dirname(import_(self.package_name).__file__)
except ImportError:
if debug:
print >> sys.stderr, "target_pkg: no target package", self.package_name
last_dot = self.package_name.rfind('.')
if last_dot < 0:
assert filename is not None
package_parent_dir = os.path.dirname(os.path.dirname(filename))
else:
package_parent_dir = os.path.dirname(
import_(self.package_name[:last_dot]).__file__)
if filename is not None:
assert os.path.normpath(
os.path.abspath(package_parent_dir)) == os.path.normpath(
os.path.dirname(os.path.dirname(filename))), "Internal error: %r != %r" % (
os.path.normpath(
os.path.abspath(package_parent_dir)),
os.path.normpath(
os.path.dirname(os.path.dirname(filename))))
if debug:
print >> sys.stderr, "target_pkg package_parent_dir:", package_parent_dir
target_package_dir = os.path.join(package_parent_dir,
self.package_name[last_dot + 1:])
if debug:
print >> sys.stderr, "target_pkg target_package_dir:", target_package_dir
if not os.path.lexists(target_package_dir):
if debug:
print >> sys.stderr, "target_pkg: mkdir", target_package_dir
os.mkdir(target_package_dir)
init_filepath = os.path.join(target_package_dir, '__init__.py')
if debug:
print >> sys.stderr, "target_pkg init_filepath:", init_filepath
if not os.path.lexists(init_filepath):
if debug:
print >> sys.stderr, "target_pkg: creating", init_filepath
open(init_filepath, 'w').close()
filename = os.path.join(target_package_dir,
'compiled_pyke_files.py')
if filename.endswith('.py'):
self.filename = filename
else:
self.filename = filename[:-1]
self.directory = os.path.dirname(self.filename)
if debug:
print >> sys.stderr, "target_pkg:", self.package_name, self.filename
self.loader = loader
if compiler_version == pyke.compiler_version:
self.sources = sources if sources is not None else {}
elif self.loader is None:
self.sources = {}
else:
raise AssertionError("%s: wrong version of pyke, "
"running %s, compiled for %s" %
(module_name, pyke.version, pyke_version)) | MIT License |
zomux/deepy | deepy/trainers/base.py | NeuralTrainer.load_params | python | def load_params(self, path, exclude_free_params=False):
self.network.load_params(path, exclude_free_params=exclude_free_params)
self.best_params = self.copy_params()
if self.network.train_logger.progress() > 0 or self.network.train_logger.epoch() > 0:
self.skip(self.network.train_logger.progress(), self.network.train_logger.epoch() - 1) | Load parameters for the training.
This method can load free parameters and resume the training progress. | https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/trainers/base.py#L144-L153 | import sys
import time
import numpy as np
import theano
from ..conf import TrainerConfig
from ..core import env, runtime
from ..utils import Timer
from ..dataset import Dataset
from controllers import TrainingController
from abc import ABCMeta, abstractmethod
from logging import getLogger
logging = getLogger("trainer")
class NeuralTrainer(object):
__metaclass__ = ABCMeta
def __init__(self, network, config=None, validator=None, annealer=None):
super(NeuralTrainer, self).__init__()
self.config = None
if isinstance(config, TrainerConfig):
self.config = config
elif isinstance(config, dict):
self.config = TrainerConfig(config)
else:
self.config = TrainerConfig()
if type(self.config.learning_rate) == float:
self.config.learning_rate = np.array(self.config.learning_rate, dtype=env.FLOATX)
self.model = self.network = network
self.network.prepare_training()
self._setup_costs()
self.evaluation_func = None
self.validation_frequency = self.config.validation_frequency
self.min_improvement = self.config.min_improvement
self.patience = self.config.patience
self._iter_controllers = []
self._epoch_controllers = []
if annealer:
annealer.bind(self)
self._epoch_controllers.append(annealer)
if validator:
validator.bind(self)
self._iter_controllers.append(validator)
self.best_cost = 1e100
self.best_epoch = 0
self.best_params = self.copy_params()
self._skip_batches = 0
self._skip_epochs = 0
self._progress = 0
self.last_cost = 0
self.last_run_costs = None
self._report_time = True
self._epoch = 0
self._current_train_set = None
self._current_valid_set = None
self._current_test_set = None
self._ended = False
def _compile_evaluation_func(self):
if not self.evaluation_func:
logging.info("compile evaluation function")
self.evaluation_func = theano.function(
self.network.input_variables + self.network.target_variables,
self.evaluation_variables,
updates=self.network.updates,
allow_input_downcast=True, mode=self.config.get("theano_mode", None))
def skip(self, n_batches, n_epochs=0):
logging.info("skip %d epochs and %d batches" % (n_epochs, n_batches))
self._skip_batches = n_batches
self._skip_epochs = n_epochs
def epoch(self):
return self._epoch
def _setup_costs(self):
self.cost = self._add_regularization(self.network.cost)
self.test_cost = self._add_regularization(self.network.test_cost)
self.training_variables = [self.cost]
self.training_names = ['J']
for name, monitor in self.network.training_monitors:
self.training_names.append(name)
self.training_variables.append(monitor)
logging.info("monitor list: %s" % ",".join(self.training_names))
self.evaluation_variables = [self.test_cost]
self.evaluation_names = ['J']
for name, monitor in self.network.testing_monitors:
self.evaluation_names.append(name)
self.evaluation_variables.append(monitor)
def _add_regularization(self, cost):
if self.config.weight_l1 > 0:
logging.info("L1 weight regularization: %f" % self.config.weight_l1)
cost += self.config.weight_l1 * sum(abs(w).sum() for w in self.network.parameters)
if self.config.hidden_l1 > 0:
logging.info("L1 hidden unit regularization: %f" % self.config.hidden_l1)
cost += self.config.hidden_l1 * sum(abs(h).mean(axis=0).sum() for h in self.network._hidden_outputs)
if self.config.hidden_l2 > 0:
logging.info("L2 hidden unit regularization: %f" % self.config.hidden_l2)
cost += self.config.hidden_l2 * sum((h * h).mean(axis=0).sum() for h in self.network._hidden_outputs)
return cost
def set_params(self, targets, free_params=None):
for param, target in zip(self.network.parameters, targets):
param.set_value(target)
if free_params:
for param, param_value in zip(self.network.free_parameters, free_params):
param.set_value(param_value)
def save_params(self, path):
self.set_params(*self.best_params)
self.network.save_params(path) | MIT License |
neuropycon/graphpype | graphpype/labeled_mask.py | compute_ROI_nii_from_ROI_coords_files | python | def compute_ROI_nii_from_ROI_coords_files(
ref_img_file, MNI_coords_file, labels_file, neighbourhood=1):
ref_image = nib.load(ref_img_file)
ref_image_data = ref_image.get_data()
ref_image_data_shape = ref_image_data.shape
ref_image_data_sform = ref_image.get_sform()
ROI_MNI_coords_list = np.array(np.loadtxt(
MNI_coords_file), dtype='int').tolist()
ROI_labels = [lign.strip() for lign in open(labels_file)]
mni_sform_inv = np.linalg.inv(ref_image_data_sform)
ROI_coords = np.array([_coord_transform(x, y, z, mni_sform_inv)
for x, y, z in ROI_MNI_coords_list], dtype="int64")
for i, ROI_coord in enumerate(ROI_coords):
ROI_coords_labelled_mask = np.zeros(
shape=ref_image_data_shape, dtype='int64')
neigh_range = list(range(-neighbourhood, neighbourhood+1))
for relative_coord in iter.product(neigh_range, repeat=3):
neigh_x, neigh_y, neigh_z = ROI_coord + relative_coord
print(neigh_x, neigh_y, neigh_z)
if check_np_dimension(ROI_coords_labelled_mask.shape,
np.array([neigh_x, neigh_y, neigh_z],
dtype='int64')):
ROI_coords_labelled_mask[neigh_x, neigh_y, neigh_z] = 1
print(ROI_coords_labelled_mask)
path, fname, ext = split_f(MNI_coords_file)
ROI_coords_labelled_mask_file = os.path.join(
path, "ROI_{}-neigh_{}_2.nii".format(ROI_labels[i],
str(neighbourhood)))
nib.save(nib.Nifti1Image(
ROI_coords_labelled_mask, ref_image.affine,
ref_image.header), ROI_coords_labelled_mask_file)
return ROI_coords_labelled_mask_file | Export single file VOI binary nii image | https://github.com/neuropycon/graphpype/blob/409a370e7d293c3fcff0d733bf7af50850dfa9e4/graphpype/labeled_mask.py#L256-L309 | import nipype.interfaces.spm as spm
from nipype.utils.filemanip import split_filename as split_f
from graphpype.utils import check_np_dimension
import itertools as iter
import numpy as np
import nibabel as nib
import glob
import os
from scipy import ndimage as ndimg
from scipy.spatial.distance import cdist
def _coord_transform(x, y, z, affine):
coords = np.c_[np.atleast_1d(x).flat,
np.atleast_1d(y).flat,
np.atleast_1d(z).flat,
np.ones_like(np.atleast_1d(z).flat)].T
x, y, z, _ = np.dot(affine, coords)
return x.squeeze(), y.squeeze(), z.squeeze()
def create_indexed_mask(ref_img_file, MNI_coords_list, ROI_dir,
ROI_mask_prefix="def", ROI_shape="cube", ROI_size=10):
np_coord = np.array(MNI_coords_list)
if len(np_coord.shape) > 1:
dist = cdist(np_coord, np_coord, metric='euclidean')
assert np.all(dist[np.triu_indices(dist.shape[0], k=1)]
> ROI_size), "Error, distance < {}".format(ROI_size)
ref_img = nib.load(ref_img_file)
ref_img_shape = ref_img.get_data().shape
if len(ref_img_shape) == 4:
print("using 4D image for computing 3D mask, reducing shape")
ref_img_shape = ref_img_shape[:-1]
print(ref_img_shape)
ref_img_affine = ref_img.affine
inv_affine = np.linalg.inv(ref_img_affine)
ref_img_hd = ref_img.header
pixdims = ref_img_hd['pixdim'][1:4]
indexed_mask_data = np.zeros(shape=ref_img_shape) - 1
if ROI_shape not in ["sphere", "cube"]:
print("Warning, could not determine shape {}, using cube instead"
.format(ROI_shape))
ROI_shape = "cube"
if ROI_shape == "cube":
print("ROI_shape = cube")
vox_dims = list(map(int, float(ROI_size)/pixdims))
print(vox_dims)
neigh_range = []
for vox_dim in vox_dims:
vox_neigh = vox_dim/2
if vox_dim % 2 == 1:
cur_range = np.arange(-vox_neigh, vox_neigh+1)
elif vox_dim % 2 == 0:
cur_range = np.arange(-vox_neigh+1, vox_neigh+1)
neigh_range.append(cur_range)
ROI_coords = []
for index_mask, MNI_coords in enumerate(MNI_coords_list):
ijk_coord = _coord_transform(MNI_coords[0], MNI_coords[1],
MNI_coords[2], inv_affine)
neigh_coords = np.array(
[list(i) for i in iter.product(*neigh_range)], dtype=int)
cur_coords = np.array([list(map(int, ijk_coord + neigh_coord))
for neigh_coord in neigh_coords])
max_i, max_j, max_k = indexed_mask_data.shape
keep = (0 <= cur_coords[:, 0]) & (cur_coords[:, 0] < max_i) & (0 <= cur_coords[:, 1]) & (cur_coords[:, 1] < max_j) & (0 <= cur_coords[:, 2]) & (cur_coords[:, 2] < max_k)
if np.all(keep is False):
continue
indexed_mask_data[cur_coords[keep, 0], cur_coords[keep, 1],
cur_coords[keep, 2]] = index_mask
print(np.sum(indexed_mask_data == index_mask))
ROI_coords.append(ijk_coord)
elif ROI_shape == "sphere":
print("building spheres of {} mm".format(ROI_size))
radius = ROI_size/2.0
print(radius)
vox_dims = list(map(int, float(radius)/pixdims))
print(vox_dims)
r2_dim = []
neigh_range = []
for i, vox_dim in enumerate(vox_dims):
pixdim = pixdims[i]
cur_range = np.arange(-vox_dim, (vox_dim+1))
print(cur_range)
cur_r2 = (cur_range*pixdim)**2
print(cur_r2)
neigh_range.append(cur_range.tolist())
r2_dim.append(cur_r2)
print(neigh_range)
neigh_coords = np.array(
[list(i) for i in iter.product(*neigh_range)], dtype=int)
neigh_dist = np.array([np.sum(i) for i in iter.product(*r2_dim)])
neigh_range = neigh_coords[neigh_dist < radius**2]
ROI_coords = []
for index_mask, MNI_coords in enumerate(MNI_coords_list):
ijk_coord = np.dot(inv_affine, np.array(
MNI_coords + [1], dtype='int'))[:-1]
ROI_coords.append(ijk_coord)
cur_coords = np.array([list(map(int, ijk_coord + neigh_coord))
for neigh_coord in neigh_range.tolist()])
indexed_mask_data[cur_coords[:, 0],
cur_coords[:, 1], cur_coords[:, 2]] = index_mask
print(np.sum(indexed_mask_data == index_mask))
try:
os.makedirs(ROI_dir)
except OSError:
print("directory already created")
indexed_mask_file = os.path.join(
ROI_dir, "indexed_mask-" + ROI_mask_prefix + ".nii")
nib.save(nib.Nifti1Image(indexed_mask_data,
ref_img_affine), indexed_mask_file)
ROI_coords_file = os.path.join(
ROI_dir, "ROI_coords-" + ROI_mask_prefix + ".txt")
np.savetxt(ROI_coords_file, np.array(ROI_coords, dtype=int), fmt="%d")
return indexed_mask_file | BSD 3-Clause New or Revised License |
sanic-org/sanic | sanic/server/socket.py | remove_unix_socket | python | def remove_unix_socket(path: Optional[str]) -> None:
if not path:
return
try:
if stat.S_ISSOCK(os.stat(path, follow_symlinks=False).st_mode):
with socket.socket(socket.AF_UNIX) as testsock:
try:
testsock.connect(path)
except ConnectionRefusedError:
os.unlink(path)
except FileNotFoundError:
pass | Remove dead unix socket during server exit. | https://github.com/sanic-org/sanic/blob/3262878ebd41aa2230ef15d4475bbcf223b2356b/sanic/server/socket.py#L74-L87 | from __future__ import annotations
import os
import secrets
import socket
import stat
from ipaddress import ip_address
from typing import Optional
def bind_socket(host: str, port: int, *, backlog=100) -> socket.socket:
try:
ip = ip_address(host)
host = str(ip)
sock = socket.socket(
socket.AF_INET6 if ip.version == 6 else socket.AF_INET
)
except ValueError:
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(backlog)
return sock
def bind_unix_socket(path: str, *, mode=0o666, backlog=100) -> socket.socket:
path = os.path.abspath(path)
folder = os.path.dirname(path)
if not os.path.isdir(folder):
raise FileNotFoundError(f"Socket folder does not exist: {folder}")
try:
if not stat.S_ISSOCK(os.stat(path, follow_symlinks=False).st_mode):
raise FileExistsError(f"Existing file is not a socket: {path}")
except FileNotFoundError:
pass
tmp_path = f"{path}.{secrets.token_urlsafe()}"
sock = socket.socket(socket.AF_UNIX)
try:
sock.bind(tmp_path)
try:
os.chmod(tmp_path, mode)
sock.listen(backlog)
os.rename(tmp_path, path)
except:
try:
os.unlink(tmp_path)
finally:
raise
except:
try:
sock.close()
finally:
raise
return sock | MIT License |
Dataset Summary
Scotch is a dataset of about 19 million functions collected from open-source repositiories from GitHub with permissive licenses. Each function has its corresponding code context and about 4 million functions have corresponding docstrings.
Languages
The dataset includes functions written in programming languages Python, Java, Javascript, and Go.
Statistics
Split
The functions with docstrings is splitted into train, valid, and test set of 3200626, 400077, 400080 functions respectively.
Features
Each function consists of following features:
- repository_name: Name of the repository the function belongs to.
- function_path: Path of the function within the repository.
- function_identifier: Function name/identifier.
- language: Programming language the function is written in.
- function: Function string.
- docstring: Function docstring.
- function_url: URL to the function code.
- context: Code context.
- license: License info of the repository (includes only repositories with permissive licenses).
Data Collection
The dataset is collected from GitHub repositories of respective languages with 5 or more stars. Such repositories are listed using SEART. Functions are parsed using a lightweight parser build on top of function parser from CodeSearchNet dataset and repositories were collected with help of github-downloader from EleutherAI.
Data Processing
All the code without permissive licenses are removed and deduplication is performed on the remaining set of functions. Afterwards, all the functions with single line of code, whose docstring contains non-English characters are removed. Files with multiple same functions are excluded. This results in about 19M functions. To obtain a dataset of NL-Code pairs, functions with no docstrings or doctrings less than 3 tokens separated by white-space are excluded. Following CodeSearchNet, functions with 'test' keyword in their name are excluded.
License
This dataset is under MIT License. However, the repositories the functions are collected from may have several permissive licenses. Those licenses include MIT License, Apache License 2.0, BSD 3-Clause “New” or “Revised” License, BSD 2-Clause “Simplified” License, and ISC License.
- Downloads last month
- 242